aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-07-12 06:34:42 -0400
committerThomas Gleixner <tglx@linutronix.de>2013-07-12 06:34:42 -0400
commitf2006e27396f55276f24434f56e208d86e7f9908 (patch)
tree71896db916d33888b4286f80117d3cac0da40e6d /drivers/net
parente399eb56a6110e13f97e644658648602e2b08de7 (diff)
parent9903883f1dd6e86f286b7bfa6e4b423f98c1cd9e (diff)
Merge branch 'linus' into timers/urgent
Get upstream changes so we can apply fixes against them Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig20
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_alb.c99
-rw-r--r--drivers/net/bonding/bond_main.c290
-rw-r--r--drivers/net/bonding/bond_sysfs.c133
-rw-r--r--drivers/net/bonding/bonding.h49
-rw-r--r--drivers/net/caif/caif_serial.c61
-rw-r--r--drivers/net/can/Kconfig5
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c10
-rw-r--r--drivers/net/can/c_can/c_can_platform.c6
-rw-r--r--drivers/net/can/cc770/cc770_isa.c5
-rw-r--r--drivers/net/can/cc770/cc770_platform.c4
-rw-r--r--drivers/net/can/flexcan.c52
-rw-r--r--drivers/net/can/grcan.c12
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/led.c4
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c10
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c5
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c6
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c5
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/ethernet/3com/3c509.c19
-rw-r--r--drivers/net/ethernet/3com/3c59x.c42
-rw-r--r--drivers/net/ethernet/3com/Kconfig1
-rw-r--r--drivers/net/ethernet/8390/ne.c1
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/Kconfig6
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adaptec/Kconfig1
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c4
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c4
-rw-r--r--drivers/net/ethernet/allwinner/Kconfig35
-rw-r--r--drivers/net/ethernet/allwinner/Makefile5
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c954
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.h108
-rw-r--r--drivers/net/ethernet/alteon/acenic.c15
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c19
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/apple/bmac.c5
-rw-r--r--drivers/net/ethernet/arc/Kconfig31
-rw-r--r--drivers/net/ethernet/arc/Makefile6
-rw-r--r--drivers/net/ethernet/arc/emac.h214
-rw-r--r--drivers/net/ethernet/arc/emac_main.c819
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c152
-rw-r--r--drivers/net/ethernet/atheros/Kconfig5
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h8
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c132
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c212
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h25
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c173
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c25
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c25
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c27
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c1152
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h86
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h185
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c211
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h75
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c52
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c115
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c687
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c284
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h55
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c89
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h5
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c9
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c425
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c22
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1
-rw-r--r--drivers/net/ethernet/cadence/macb.c327
-rw-r--r--drivers/net/ethernet/cadence/macb.h14
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c116
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/davicom/Kconfig1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c53
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig1
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c14
-rw-r--r--drivers/net/ethernet/dlink/Kconfig1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c66
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c37
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c150
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h61
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c254
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c57
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/ibm/Kconfig3
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c18
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c18
-rw-r--r--drivers/net/ethernet/icplus/Kconfig1
-rw-r--r--drivers/net/ethernet/icplus/ipg.c13
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c30
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h34
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c55
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c120
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h36
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c45
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c126
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h20
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c74
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h134
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c69
-rw-r--r--drivers/net/ethernet/jme.c1
-rw-r--r--drivers/net/ethernet/korina.c7
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c209
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c46
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h145
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c163
-rw-r--r--drivers/net/ethernet/micrel/Kconfig4
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c34
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netx-eth.c5
-rw-r--r--drivers/net/ethernet/nuvoton/Kconfig1
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c17
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c70
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c327
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c63
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c26
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c133
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h59
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c346
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c59
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c62
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c172
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c103
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c226
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c225
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c62
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c126
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c13
-rw-r--r--drivers/net/ethernet/rdc/Kconfig1
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig3
-rw-r--r--drivers/net/ethernet/renesas/Kconfig7
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c521
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h29
-rw-r--r--drivers/net/ethernet/s6gmac.c1
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c42
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/filter.c15
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/nic.c74
-rw-r--r--drivers/net/ethernet/sfc/nic.h4
-rw-r--r--drivers/net/ethernet/sfc/ptp.c13
-rw-r--r--drivers/net/ethernet/sfc/rx.c35
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/sgi/Kconfig1
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/sgi/meth.c1
-rw-r--r--drivers/net/ethernet/silan/sc92031.c14
-rw-r--r--drivers/net/ethernet/sis/Kconfig2
-rw-r--r--drivers/net/ethernet/sis/sis190.c13
-rw-r--r--drivers/net/ethernet/smsc/Kconfig7
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c95
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c160
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c34
-rw-r--r--drivers/net/ethernet/sun/cassini.c18
-rw-r--r--drivers/net/ethernet/sun/niu.c5
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c13
-rw-r--r--drivers/net/ethernet/sun/sunhme.c4
-rw-r--r--drivers/net/ethernet/sun/sunqe.c10
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c18
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c119
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c15
-rw-r--r--drivers/net/ethernet/ti/tlan.c3
-rw-r--r--drivers/net/ethernet/ti/tlan.h1
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c14
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/via/Kconfig5
-rw-r--r--drivers/net/ethernet/via/via-velocity.c507
-rw-r--r--drivers/net/ethernet/via/via-velocity.h8
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c236
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c13
-rw-r--r--drivers/net/hamradio/bpqether.c7
-rw-r--r--drivers/net/hippi/rrunner.c13
-rw-r--r--drivers/net/irda/bfin_sir.c1
-rw-r--r--drivers/net/irda/sh_irda.c1
-rw-r--r--drivers/net/irda/sh_sir.c1
-rw-r--r--drivers/net/macvlan.c12
-rw-r--r--drivers/net/macvtap.c341
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/nlmon.c181
-rw-r--r--drivers/net/phy/Kconfig10
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c128
-rw-r--r--drivers/net/phy/bcm63xx.c4
-rw-r--r--drivers/net/phy/marvell.c108
-rw-r--r--drivers/net/phy/mdio-sun4i.c194
-rw-r--r--drivers/net/phy/phy.c35
-rw-r--r--drivers/net/phy/phy_device.c11
-rw-r--r--drivers/net/phy/spi_ks8995.c14
-rw-r--r--drivers/net/phy/vitesse.c38
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/rionet.c103
-rw-r--r--drivers/net/team/team.c86
-rw-r--r--drivers/net/team/team_mode_loadbalance.c3
-rw-r--r--drivers/net/team/team_mode_roundrobin.c3
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/ax88179_178a.c5
-rw-r--r--drivers/net/usb/cdc_ether.c22
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/kalmia.c45
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/r8152.c14
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vxlan.c788
-rw-r--r--drivers/net/wan/dlci.c2
-rw-r--r--drivers/net/wan/hdlc.c2
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/airo.c3
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig39
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile20
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c295
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h224
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c1189
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h516
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c665
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h369
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c503
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h90
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h137
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c1000
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h368
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c152
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h1338
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c1167
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c510
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h304
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c3069
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2507
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h355
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h990
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h449
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h170
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c417
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h39
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2081
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h3052
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c79
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h14
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c51
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c14
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c89
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c80
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c107
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h345
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h1774
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c430
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h59
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c99
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c101
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c168
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c343
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c182
-rw-r--r--drivers/net/wireless/ath/regd.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig12
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile21
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c38
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c69
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c29
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c64
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c54
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h235
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c205
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h36
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h28
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c60
-rw-r--r--drivers/net/wireless/b43/Kconfig12
-rw-r--r--drivers/net/wireless/b43/main.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c302
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c117
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c52
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c176
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c943
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c2
-rw-r--r--drivers/net/wireless/cw1200/Kconfig30
-rw-r--r--drivers/net/wireless/cw1200/Makefile21
-rw-r--r--drivers/net/wireless/cw1200/bh.c619
-rw-r--r--drivers/net/wireless/cw1200/bh.h28
-rw-r--r--drivers/net/wireless/cw1200/cw1200.h323
-rw-r--r--drivers/net/wireless/cw1200/cw1200_sdio.c425
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c471
-rw-r--r--drivers/net/wireless/cw1200/debug.c428
-rw-r--r--drivers/net/wireless/cw1200/debug.h93
-rw-r--r--drivers/net/wireless/cw1200/fwio.c520
-rw-r--r--drivers/net/wireless/cw1200/fwio.h39
-rw-r--r--drivers/net/wireless/cw1200/hwbus.h33
-rw-r--r--drivers/net/wireless/cw1200/hwio.c312
-rw-r--r--drivers/net/wireless/cw1200/hwio.h247
-rw-r--r--drivers/net/wireless/cw1200/main.c605
-rw-r--r--drivers/net/wireless/cw1200/pm.c367
-rw-r--r--drivers/net/wireless/cw1200/pm.h43
-rw-r--r--drivers/net/wireless/cw1200/queue.c583
-rw-r--r--drivers/net/wireless/cw1200/queue.h116
-rw-r--r--drivers/net/wireless/cw1200/scan.c461
-rw-r--r--drivers/net/wireless/cw1200/scan.h56
-rw-r--r--drivers/net/wireless/cw1200/sta.c2403
-rw-r--r--drivers/net/wireless/cw1200/sta.h123
-rw-r--r--drivers/net/wireless/cw1200/txrx.c1473
-rw-r--r--drivers/net/wireless/cw1200/txrx.h106
-rw-r--r--drivers/net/wireless/cw1200/wsm.c1822
-rw-r--r--drivers/net/wireless/cw1200/wsm.h1870
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c3
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c5
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c18
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c25
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h8
-rw-r--r--drivers/net/wireless/iwlegacy/common.c11
-rw-r--r--drivers/net/wireless/iwlegacy/common.h41
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig10
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h58
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h73
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c107
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c26
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c37
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c67
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c51
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c42
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c471
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c852
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h309
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c197
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c453
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h98
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h233
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c97
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c299
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h204
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c212
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c76
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c218
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c171
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c530
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c41
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c54
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c115
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c68
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c40
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mwifiex/11h.c101
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c122
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/fw.h33
-rw-r--r--drivers/net/wireless/mwifiex/init.c115
-rw-r--r--drivers/net/wireless/mwifiex/join.c7
-rw-r--r--drivers/net/wireless/mwifiex/main.c101
-rw-r--r--drivers/net/wireless/mwifiex/main.h32
-rw-r--r--drivers/net/wireless/mwifiex/scan.c60
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c463
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h340
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c62
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c17
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c11
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c52
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c21
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c25
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c5
-rw-r--r--drivers/net/wireless/mwl8k.c11
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.h2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c3
-rw-r--r--drivers/net/wireless/p54/p54spi.c37
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c66
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c66
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c66
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c835
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c127
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c32
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h21
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c60
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c58
-rw-r--r--drivers/net/wireless/rtlwifi/base.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c30
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c47
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h15
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile2
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c284
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c216
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.h28
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c2
-rw-r--r--drivers/net/xen-netback/common.h14
-rw-r--r--drivers/net/xen-netback/interface.c102
-rw-r--r--drivers/net/xen-netback/netback.c42
-rw-r--r--drivers/net/xen-netback/xenbus.c53
-rw-r--r--drivers/net/xen-netfront.c253
631 files changed, 59414 insertions, 11654 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3835321b8cf3..b45b240889f5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -25,6 +25,9 @@ menuconfig NETDEVICES
25# that for each of the symbols. 25# that for each of the symbols.
26if NETDEVICES 26if NETDEVICES
27 27
28config MII
29 tristate
30
28config NET_CORE 31config NET_CORE
29 default y 32 default y
30 bool "Network core driver support" 33 bool "Network core driver support"
@@ -100,13 +103,6 @@ config NET_FC
100 adaptor below. You also should have said Y to "SCSI support" and 103 adaptor below. You also should have said Y to "SCSI support" and
101 "SCSI generic support". 104 "SCSI generic support".
102 105
103config MII
104 tristate "Generic Media Independent Interface device support"
105 help
106 Most ethernet controllers have MII transceiver either as an external
107 or internal device. It is safe to say Y or M here even if your
108 ethernet card lacks MII.
109
110config IFB 106config IFB
111 tristate "Intermediate Functional Block support" 107 tristate "Intermediate Functional Block support"
112 depends on NET_CLS_ACT 108 depends on NET_CLS_ACT
@@ -244,6 +240,16 @@ config VIRTIO_NET
244 This is the virtual network driver for virtio. It can be used with 240 This is the virtual network driver for virtio. It can be used with
245 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 241 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
246 242
243config NLMON
244 tristate "Virtual netlink monitoring device"
245 ---help---
246 This option enables a monitoring net device for netlink skbs. The
247 purpose of this is to analyze netlink messages with packet sockets.
248 Thus applications like tcpdump will be able to see local netlink
249 messages if they tap into the netlink device, record pcaps for further
250 diagnostics, etc. This is mostly intended for developers or support
251 to debug netlink issues. If unsure, say N.
252
247endif # NET_CORE 253endif # NET_CORE
248 254
249config SUNGEM_PHY 255config SUNGEM_PHY
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ef3d090efedf..3fef8a81c0f6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TUN) += tun.o
22obj-$(CONFIG_VETH) += veth.o 22obj-$(CONFIG_VETH) += veth.o
23obj-$(CONFIG_VIRTIO_NET) += virtio_net.o 23obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
24obj-$(CONFIG_VXLAN) += vxlan.o 24obj-$(CONFIG_VXLAN) += vxlan.o
25obj-$(CONFIG_NLMON) += nlmon.o
25 26
26# 27#
27# Networking Drivers 28# Networking Drivers
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e02cc265723a..4ea8ed150d46 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1056,7 +1056,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
1056 * 1056 *
1057 */ 1057 */
1058 1058
1059static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2) 1059static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
1060{ 1060{
1061 u8 tmp_mac_addr[ETH_ALEN]; 1061 u8 tmp_mac_addr[ETH_ALEN];
1062 1062
@@ -1129,6 +1129,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
1129{ 1129{
1130 int perm_curr_diff; 1130 int perm_curr_diff;
1131 int perm_bond_diff; 1131 int perm_bond_diff;
1132 struct slave *found_slave;
1132 1133
1133 perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr, 1134 perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
1134 slave->dev->dev_addr); 1135 slave->dev->dev_addr);
@@ -1136,21 +1137,12 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
1136 bond->dev->dev_addr); 1137 bond->dev->dev_addr);
1137 1138
1138 if (perm_curr_diff && perm_bond_diff) { 1139 if (perm_curr_diff && perm_bond_diff) {
1139 struct slave *tmp_slave; 1140 found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr);
1140 int i, found = 0;
1141
1142 bond_for_each_slave(bond, tmp_slave, i) {
1143 if (ether_addr_equal_64bits(slave->perm_hwaddr,
1144 tmp_slave->dev->dev_addr)) {
1145 found = 1;
1146 break;
1147 }
1148 }
1149 1141
1150 if (found) { 1142 if (found_slave) {
1151 /* locking: needs RTNL and nothing else */ 1143 /* locking: needs RTNL and nothing else */
1152 alb_swap_mac_addr(bond, slave, tmp_slave); 1144 alb_swap_mac_addr(slave, found_slave);
1153 alb_fasten_mac_swap(bond, slave, tmp_slave); 1145 alb_fasten_mac_swap(bond, slave, found_slave);
1154 } 1146 }
1155 } 1147 }
1156} 1148}
@@ -1175,16 +1167,13 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
1175 * @slave. 1167 * @slave.
1176 * 1168 *
1177 * assumption: this function is called before @slave is attached to the 1169 * assumption: this function is called before @slave is attached to the
1178 * bond slave list. 1170 * bond slave list.
1179 *
1180 * caller must hold the bond lock for write since the mac addresses are compared
1181 * and may be swapped.
1182 */ 1171 */
1183static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave) 1172static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
1184{ 1173{
1185 struct slave *tmp_slave1, *tmp_slave2, *free_mac_slave; 1174 struct slave *tmp_slave1, *free_mac_slave = NULL;
1186 struct slave *has_bond_addr = bond->curr_active_slave; 1175 struct slave *has_bond_addr = bond->curr_active_slave;
1187 int i, j, found = 0; 1176 int i;
1188 1177
1189 if (bond->slave_cnt == 0) { 1178 if (bond->slave_cnt == 0) {
1190 /* this is the first slave */ 1179 /* this is the first slave */
@@ -1196,15 +1185,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1196 * slaves in the bond. 1185 * slaves in the bond.
1197 */ 1186 */
1198 if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) { 1187 if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
1199 bond_for_each_slave(bond, tmp_slave1, i) { 1188 if (!bond_slave_has_mac(bond, slave->dev->dev_addr))
1200 if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
1201 slave->dev->dev_addr)) {
1202 found = 1;
1203 break;
1204 }
1205 }
1206
1207 if (!found)
1208 return 0; 1189 return 0;
1209 1190
1210 /* Try setting slave mac to bond address and fall-through 1191 /* Try setting slave mac to bond address and fall-through
@@ -1215,19 +1196,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1215 /* The slave's address is equal to the address of the bond. 1196 /* The slave's address is equal to the address of the bond.
1216 * Search for a spare address in the bond for this slave. 1197 * Search for a spare address in the bond for this slave.
1217 */ 1198 */
1218 free_mac_slave = NULL;
1219
1220 bond_for_each_slave(bond, tmp_slave1, i) { 1199 bond_for_each_slave(bond, tmp_slave1, i) {
1221 found = 0; 1200 if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
1222 bond_for_each_slave(bond, tmp_slave2, j) {
1223 if (ether_addr_equal_64bits(tmp_slave1->perm_hwaddr,
1224 tmp_slave2->dev->dev_addr)) {
1225 found = 1;
1226 break;
1227 }
1228 }
1229
1230 if (!found) {
1231 /* no slave has tmp_slave1's perm addr 1201 /* no slave has tmp_slave1's perm addr
1232 * as its curr addr 1202 * as its curr addr
1233 */ 1203 */
@@ -1607,15 +1577,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
1607 return res; 1577 return res;
1608 } 1578 }
1609 1579
1610 /* caller must hold the bond lock for write since the mac addresses
1611 * are compared and may be swapped.
1612 */
1613 read_lock(&bond->lock);
1614
1615 res = alb_handle_addr_collision_on_attach(bond, slave); 1580 res = alb_handle_addr_collision_on_attach(bond, slave);
1616
1617 read_unlock(&bond->lock);
1618
1619 if (res) { 1581 if (res) {
1620 return res; 1582 return res;
1621 } 1583 }
@@ -1698,7 +1660,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1698 __acquires(&bond->curr_slave_lock) 1660 __acquires(&bond->curr_slave_lock)
1699{ 1661{
1700 struct slave *swap_slave; 1662 struct slave *swap_slave;
1701 int i;
1702 1663
1703 if (bond->curr_active_slave == new_slave) { 1664 if (bond->curr_active_slave == new_slave) {
1704 return; 1665 return;
@@ -1720,17 +1681,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1720 /* set the new curr_active_slave to the bonds mac address 1681 /* set the new curr_active_slave to the bonds mac address
1721 * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave 1682 * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
1722 */ 1683 */
1723 if (!swap_slave) { 1684 if (!swap_slave)
1724 struct slave *tmp_slave; 1685 swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr);
1725 /* find slave that is holding the bond's mac address */
1726 bond_for_each_slave(bond, tmp_slave, i) {
1727 if (ether_addr_equal_64bits(tmp_slave->dev->dev_addr,
1728 bond->dev->dev_addr)) {
1729 swap_slave = tmp_slave;
1730 break;
1731 }
1732 }
1733 }
1734 1686
1735 /* 1687 /*
1736 * Arrange for swap_slave and new_slave to temporarily be 1688 * Arrange for swap_slave and new_slave to temporarily be
@@ -1750,16 +1702,12 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1750 /* curr_active_slave must be set before calling alb_swap_mac_addr */ 1702 /* curr_active_slave must be set before calling alb_swap_mac_addr */
1751 if (swap_slave) { 1703 if (swap_slave) {
1752 /* swap mac address */ 1704 /* swap mac address */
1753 alb_swap_mac_addr(bond, swap_slave, new_slave); 1705 alb_swap_mac_addr(swap_slave, new_slave);
1754 } else {
1755 /* set the new_slave to the bond mac address */
1756 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1757 }
1758
1759 if (swap_slave) {
1760 alb_fasten_mac_swap(bond, swap_slave, new_slave); 1706 alb_fasten_mac_swap(bond, swap_slave, new_slave);
1761 read_lock(&bond->lock); 1707 read_lock(&bond->lock);
1762 } else { 1708 } else {
1709 /* set the new_slave to the bond mac address */
1710 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1763 read_lock(&bond->lock); 1711 read_lock(&bond->lock);
1764 alb_send_learning_packets(new_slave, bond->dev->dev_addr); 1712 alb_send_learning_packets(new_slave, bond->dev->dev_addr);
1765 } 1713 }
@@ -1776,9 +1724,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1776{ 1724{
1777 struct bonding *bond = netdev_priv(bond_dev); 1725 struct bonding *bond = netdev_priv(bond_dev);
1778 struct sockaddr *sa = addr; 1726 struct sockaddr *sa = addr;
1779 struct slave *slave, *swap_slave; 1727 struct slave *swap_slave;
1780 int res; 1728 int res;
1781 int i;
1782 1729
1783 if (!is_valid_ether_addr(sa->sa_data)) { 1730 if (!is_valid_ether_addr(sa->sa_data)) {
1784 return -EADDRNOTAVAIL; 1731 return -EADDRNOTAVAIL;
@@ -1799,18 +1746,10 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1799 return 0; 1746 return 0;
1800 } 1747 }
1801 1748
1802 swap_slave = NULL; 1749 swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
1803
1804 bond_for_each_slave(bond, slave, i) {
1805 if (ether_addr_equal_64bits(slave->dev->dev_addr,
1806 bond_dev->dev_addr)) {
1807 swap_slave = slave;
1808 break;
1809 }
1810 }
1811 1750
1812 if (swap_slave) { 1751 if (swap_slave) {
1813 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); 1752 alb_swap_mac_addr(swap_slave, bond->curr_active_slave);
1814 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); 1753 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
1815 } else { 1754 } else {
1816 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); 1755 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f97569613526..07f257d44a1e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -104,6 +104,7 @@ static char *xmit_hash_policy;
104static int arp_interval = BOND_LINK_ARP_INTERV; 104static int arp_interval = BOND_LINK_ARP_INTERV;
105static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; 105static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
106static char *arp_validate; 106static char *arp_validate;
107static char *arp_all_targets;
107static char *fail_over_mac; 108static char *fail_over_mac;
108static int all_slaves_active = 0; 109static int all_slaves_active = 0;
109static struct bond_params bonding_defaults; 110static struct bond_params bonding_defaults;
@@ -166,6 +167,8 @@ module_param(arp_validate, charp, 0);
166MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " 167MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
167 "0 for none (default), 1 for active, " 168 "0 for none (default), 1 for active, "
168 "2 for backup, 3 for all"); 169 "2 for backup, 3 for all");
170module_param(arp_all_targets, charp, 0);
171MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
169module_param(fail_over_mac, charp, 0); 172module_param(fail_over_mac, charp, 0);
170MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " 173MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
171 "the same MAC; 0 for none (default), " 174 "the same MAC; 0 for none (default), "
@@ -216,6 +219,12 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
216{ NULL, -1}, 219{ NULL, -1},
217}; 220};
218 221
222const struct bond_parm_tbl arp_all_targets_tbl[] = {
223{ "any", BOND_ARP_TARGETS_ANY},
224{ "all", BOND_ARP_TARGETS_ALL},
225{ NULL, -1},
226};
227
219const struct bond_parm_tbl arp_validate_tbl[] = { 228const struct bond_parm_tbl arp_validate_tbl[] = {
220{ "none", BOND_ARP_VALIDATE_NONE}, 229{ "none", BOND_ARP_VALIDATE_NONE},
221{ "active", BOND_ARP_VALIDATE_ACTIVE}, 230{ "active", BOND_ARP_VALIDATE_ACTIVE},
@@ -706,45 +715,6 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
706 return err; 715 return err;
707} 716}
708 717
709/*
710 * Add a Multicast address to slaves
711 * according to mode
712 */
713static void bond_mc_add(struct bonding *bond, void *addr)
714{
715 if (USES_PRIMARY(bond->params.mode)) {
716 /* write lock already acquired */
717 if (bond->curr_active_slave)
718 dev_mc_add(bond->curr_active_slave->dev, addr);
719 } else {
720 struct slave *slave;
721 int i;
722
723 bond_for_each_slave(bond, slave, i)
724 dev_mc_add(slave->dev, addr);
725 }
726}
727
728/*
729 * Remove a multicast address from slave
730 * according to mode
731 */
732static void bond_mc_del(struct bonding *bond, void *addr)
733{
734 if (USES_PRIMARY(bond->params.mode)) {
735 /* write lock already acquired */
736 if (bond->curr_active_slave)
737 dev_mc_del(bond->curr_active_slave->dev, addr);
738 } else {
739 struct slave *slave;
740 int i;
741 bond_for_each_slave(bond, slave, i) {
742 dev_mc_del(slave->dev, addr);
743 }
744 }
745}
746
747
748static void __bond_resend_igmp_join_requests(struct net_device *dev) 718static void __bond_resend_igmp_join_requests(struct net_device *dev)
749{ 719{
750 struct in_device *in_dev; 720 struct in_device *in_dev;
@@ -810,17 +780,15 @@ static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
810 bond_resend_igmp_join_requests(bond); 780 bond_resend_igmp_join_requests(bond);
811} 781}
812 782
813/* 783/* Flush bond's hardware addresses from slave
814 * flush all members of flush->mc_list from device dev->mc_list
815 */ 784 */
816static void bond_mc_list_flush(struct net_device *bond_dev, 785static void bond_hw_addr_flush(struct net_device *bond_dev,
817 struct net_device *slave_dev) 786 struct net_device *slave_dev)
818{ 787{
819 struct bonding *bond = netdev_priv(bond_dev); 788 struct bonding *bond = netdev_priv(bond_dev);
820 struct netdev_hw_addr *ha;
821 789
822 netdev_for_each_mc_addr(ha, bond_dev) 790 dev_uc_unsync(slave_dev, bond_dev);
823 dev_mc_del(slave_dev, ha->addr); 791 dev_mc_unsync(slave_dev, bond_dev);
824 792
825 if (bond->params.mode == BOND_MODE_8023AD) { 793 if (bond->params.mode == BOND_MODE_8023AD) {
826 /* del lacpdu mc addr from mc list */ 794 /* del lacpdu mc addr from mc list */
@@ -832,22 +800,14 @@ static void bond_mc_list_flush(struct net_device *bond_dev,
832 800
833/*--------------------------- Active slave change ---------------------------*/ 801/*--------------------------- Active slave change ---------------------------*/
834 802
835/* 803/* Update the hardware address list and promisc/allmulti for the new and
836 * Update the mc list and multicast-related flags for the new and 804 * old active slaves (if any). Modes that are !USES_PRIMARY keep all
837 * old active slaves (if any) according to the multicast mode, and 805 * slaves up date at all times; only the USES_PRIMARY modes need to call
838 * promiscuous flags unconditionally. 806 * this function to swap these settings during a failover.
839 */ 807 */
840static void bond_mc_swap(struct bonding *bond, struct slave *new_active, 808static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
841 struct slave *old_active) 809 struct slave *old_active)
842{ 810{
843 struct netdev_hw_addr *ha;
844
845 if (!USES_PRIMARY(bond->params.mode))
846 /* nothing to do - mc list is already up-to-date on
847 * all slaves
848 */
849 return;
850
851 if (old_active) { 811 if (old_active) {
852 if (bond->dev->flags & IFF_PROMISC) 812 if (bond->dev->flags & IFF_PROMISC)
853 dev_set_promiscuity(old_active->dev, -1); 813 dev_set_promiscuity(old_active->dev, -1);
@@ -855,10 +815,7 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
855 if (bond->dev->flags & IFF_ALLMULTI) 815 if (bond->dev->flags & IFF_ALLMULTI)
856 dev_set_allmulti(old_active->dev, -1); 816 dev_set_allmulti(old_active->dev, -1);
857 817
858 netif_addr_lock_bh(bond->dev); 818 bond_hw_addr_flush(bond->dev, old_active->dev);
859 netdev_for_each_mc_addr(ha, bond->dev)
860 dev_mc_del(old_active->dev, ha->addr);
861 netif_addr_unlock_bh(bond->dev);
862 } 819 }
863 820
864 if (new_active) { 821 if (new_active) {
@@ -870,12 +827,29 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
870 dev_set_allmulti(new_active->dev, 1); 827 dev_set_allmulti(new_active->dev, 1);
871 828
872 netif_addr_lock_bh(bond->dev); 829 netif_addr_lock_bh(bond->dev);
873 netdev_for_each_mc_addr(ha, bond->dev) 830 dev_uc_sync(new_active->dev, bond->dev);
874 dev_mc_add(new_active->dev, ha->addr); 831 dev_mc_sync(new_active->dev, bond->dev);
875 netif_addr_unlock_bh(bond->dev); 832 netif_addr_unlock_bh(bond->dev);
876 } 833 }
877} 834}
878 835
836/**
837 * bond_set_dev_addr - clone slave's address to bond
838 * @bond_dev: bond net device
839 * @slave_dev: slave net device
840 *
841 * Should be called with RTNL held.
842 */
843static void bond_set_dev_addr(struct net_device *bond_dev,
844 struct net_device *slave_dev)
845{
846 pr_debug("bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
847 bond_dev, slave_dev, slave_dev->addr_len);
848 memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
849 bond_dev->addr_assign_type = NET_ADDR_STOLEN;
850 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
851}
852
879/* 853/*
880 * bond_do_fail_over_mac 854 * bond_do_fail_over_mac
881 * 855 *
@@ -898,11 +872,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
898 switch (bond->params.fail_over_mac) { 872 switch (bond->params.fail_over_mac) {
899 case BOND_FOM_ACTIVE: 873 case BOND_FOM_ACTIVE:
900 if (new_active) { 874 if (new_active) {
901 memcpy(bond->dev->dev_addr, new_active->dev->dev_addr,
902 new_active->dev->addr_len);
903 write_unlock_bh(&bond->curr_slave_lock); 875 write_unlock_bh(&bond->curr_slave_lock);
904 read_unlock(&bond->lock); 876 read_unlock(&bond->lock);
905 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 877 bond_set_dev_addr(bond->dev, new_active->dev);
906 read_lock(&bond->lock); 878 read_lock(&bond->lock);
907 write_lock_bh(&bond->curr_slave_lock); 879 write_lock_bh(&bond->curr_slave_lock);
908 } 880 }
@@ -1090,7 +1062,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1090 } 1062 }
1091 1063
1092 if (USES_PRIMARY(bond->params.mode)) 1064 if (USES_PRIMARY(bond->params.mode))
1093 bond_mc_swap(bond, new_active, old_active); 1065 bond_hw_addr_swap(bond, new_active, old_active);
1094 1066
1095 if (bond_is_lb(bond)) { 1067 if (bond_is_lb(bond)) {
1096 bond_alb_handle_active_change(bond, new_active); 1068 bond_alb_handle_active_change(bond, new_active);
@@ -1333,17 +1305,6 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
1333 1305
1334/*---------------------------------- IOCTL ----------------------------------*/ 1306/*---------------------------------- IOCTL ----------------------------------*/
1335 1307
1336static void bond_set_dev_addr(struct net_device *bond_dev,
1337 struct net_device *slave_dev)
1338{
1339 pr_debug("bond_dev=%p\n", bond_dev);
1340 pr_debug("slave_dev=%p\n", slave_dev);
1341 pr_debug("slave_dev->addr_len=%d\n", slave_dev->addr_len);
1342 memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
1343 bond_dev->addr_assign_type = NET_ADDR_SET;
1344 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
1345}
1346
1347static netdev_features_t bond_fix_features(struct net_device *dev, 1308static netdev_features_t bond_fix_features(struct net_device *dev,
1348 netdev_features_t features) 1309 netdev_features_t features)
1349{ 1310{
@@ -1425,8 +1386,6 @@ done:
1425static void bond_setup_by_slave(struct net_device *bond_dev, 1386static void bond_setup_by_slave(struct net_device *bond_dev,
1426 struct net_device *slave_dev) 1387 struct net_device *slave_dev)
1427{ 1388{
1428 struct bonding *bond = netdev_priv(bond_dev);
1429
1430 bond_dev->header_ops = slave_dev->header_ops; 1389 bond_dev->header_ops = slave_dev->header_ops;
1431 1390
1432 bond_dev->type = slave_dev->type; 1391 bond_dev->type = slave_dev->type;
@@ -1435,7 +1394,6 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
1435 1394
1436 memcpy(bond_dev->broadcast, slave_dev->broadcast, 1395 memcpy(bond_dev->broadcast, slave_dev->broadcast,
1437 slave_dev->addr_len); 1396 slave_dev->addr_len);
1438 bond->setup_by_slave = 1;
1439} 1397}
1440 1398
1441/* On bonding slaves other than the currently active slave, suppress 1399/* On bonding slaves other than the currently active slave, suppress
@@ -1533,10 +1491,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1533 struct bonding *bond = netdev_priv(bond_dev); 1491 struct bonding *bond = netdev_priv(bond_dev);
1534 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 1492 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1535 struct slave *new_slave = NULL; 1493 struct slave *new_slave = NULL;
1536 struct netdev_hw_addr *ha;
1537 struct sockaddr addr; 1494 struct sockaddr addr;
1538 int link_reporting; 1495 int link_reporting;
1539 int res = 0; 1496 int res = 0, i;
1540 1497
1541 if (!bond->params.use_carrier && 1498 if (!bond->params.use_carrier &&
1542 slave_dev->ethtool_ops->get_link == NULL && 1499 slave_dev->ethtool_ops->get_link == NULL &&
@@ -1643,7 +1600,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1643 1600
1644 /* If this is the first slave, then we need to set the master's hardware 1601 /* If this is the first slave, then we need to set the master's hardware
1645 * address to be the same as the slave's. */ 1602 * address to be the same as the slave's. */
1646 if (bond->slave_cnt == 0 && bond->dev_addr_from_first) 1603 if (!bond->slave_cnt && bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1647 bond_set_dev_addr(bond->dev, slave_dev); 1604 bond_set_dev_addr(bond->dev, slave_dev);
1648 1605
1649 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1606 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1713,10 +1670,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1713 goto err_close; 1670 goto err_close;
1714 } 1671 }
1715 1672
1716 /* If the mode USES_PRIMARY, then the new slave gets the 1673 /* If the mode USES_PRIMARY, then the following is handled by
1717 * master's promisc (and mc) settings only if it becomes the 1674 * bond_change_active_slave().
1718 * curr_active_slave, and that is taken care of later when calling
1719 * bond_change_active()
1720 */ 1675 */
1721 if (!USES_PRIMARY(bond->params.mode)) { 1676 if (!USES_PRIMARY(bond->params.mode)) {
1722 /* set promiscuity level to new slave */ 1677 /* set promiscuity level to new slave */
@@ -1734,9 +1689,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1734 } 1689 }
1735 1690
1736 netif_addr_lock_bh(bond_dev); 1691 netif_addr_lock_bh(bond_dev);
1737 /* upload master's mc_list to new slave */ 1692
1738 netdev_for_each_mc_addr(ha, bond_dev) 1693 dev_mc_sync_multiple(slave_dev, bond_dev);
1739 dev_mc_add(slave_dev, ha->addr); 1694 dev_uc_sync_multiple(slave_dev, bond_dev);
1695
1740 netif_addr_unlock_bh(bond_dev); 1696 netif_addr_unlock_bh(bond_dev);
1741 } 1697 }
1742 1698
@@ -1766,6 +1722,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1766 1722
1767 new_slave->last_arp_rx = jiffies - 1723 new_slave->last_arp_rx = jiffies -
1768 (msecs_to_jiffies(bond->params.arp_interval) + 1); 1724 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1725 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1726 new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
1769 1727
1770 if (bond->params.miimon && !bond->params.use_carrier) { 1728 if (bond->params.miimon && !bond->params.use_carrier) {
1771 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 1729 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1915,11 +1873,9 @@ err_dest_symlinks:
1915 bond_destroy_slave_symlinks(bond_dev, slave_dev); 1873 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1916 1874
1917err_detach: 1875err_detach:
1918 if (!USES_PRIMARY(bond->params.mode)) { 1876 if (!USES_PRIMARY(bond->params.mode))
1919 netif_addr_lock_bh(bond_dev); 1877 bond_hw_addr_flush(bond_dev, slave_dev);
1920 bond_mc_list_flush(bond_dev, slave_dev); 1878
1921 netif_addr_unlock_bh(bond_dev);
1922 }
1923 bond_del_vlans_from_slave(bond, slave_dev); 1879 bond_del_vlans_from_slave(bond, slave_dev);
1924 write_lock_bh(&bond->lock); 1880 write_lock_bh(&bond->lock);
1925 bond_detach_slave(bond, new_slave); 1881 bond_detach_slave(bond, new_slave);
@@ -2089,7 +2045,6 @@ static int __bond_release_one(struct net_device *bond_dev,
2089 if (bond->slave_cnt == 0) { 2045 if (bond->slave_cnt == 0) {
2090 bond_set_carrier(bond); 2046 bond_set_carrier(bond);
2091 eth_hw_addr_random(bond_dev); 2047 eth_hw_addr_random(bond_dev);
2092 bond->dev_addr_from_first = true;
2093 2048
2094 if (bond_vlan_used(bond)) { 2049 if (bond_vlan_used(bond)) {
2095 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 2050 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
@@ -2118,9 +2073,8 @@ static int __bond_release_one(struct net_device *bond_dev,
2118 2073
2119 bond_del_vlans_from_slave(bond, slave_dev); 2074 bond_del_vlans_from_slave(bond, slave_dev);
2120 2075
2121 /* If the mode USES_PRIMARY, then we should only remove its 2076 /* If the mode USES_PRIMARY, then this cases was handled above by
2122 * promisc and mc settings if it was the curr_active_slave, but that was 2077 * bond_change_active_slave(..., NULL)
2123 * already taken care of above when we detached the slave
2124 */ 2078 */
2125 if (!USES_PRIMARY(bond->params.mode)) { 2079 if (!USES_PRIMARY(bond->params.mode)) {
2126 /* unset promiscuity level from slave */ 2080 /* unset promiscuity level from slave */
@@ -2131,10 +2085,7 @@ static int __bond_release_one(struct net_device *bond_dev,
2131 if (bond_dev->flags & IFF_ALLMULTI) 2085 if (bond_dev->flags & IFF_ALLMULTI)
2132 dev_set_allmulti(slave_dev, -1); 2086 dev_set_allmulti(slave_dev, -1);
2133 2087
2134 /* flush master's mc_list from slave */ 2088 bond_hw_addr_flush(bond_dev, slave_dev);
2135 netif_addr_lock_bh(bond_dev);
2136 bond_mc_list_flush(bond_dev, slave_dev);
2137 netif_addr_unlock_bh(bond_dev);
2138 } 2089 }
2139 2090
2140 bond_upper_dev_unlink(bond_dev, slave_dev); 2091 bond_upper_dev_unlink(bond_dev, slave_dev);
@@ -2672,18 +2623,19 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2672static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) 2623static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2673{ 2624{
2674 int i; 2625 int i;
2675 __be32 *targets = bond->params.arp_targets;
2676 2626
2677 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { 2627 if (!sip || !bond_has_this_ip(bond, tip)) {
2678 pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", 2628 pr_debug("bva: sip %pI4 tip %pI4 not found\n", &sip, &tip);
2679 &sip, &tip, i, &targets[i], 2629 return;
2680 bond_has_this_ip(bond, tip)); 2630 }
2681 if (sip == targets[i]) { 2631
2682 if (bond_has_this_ip(bond, tip)) 2632 i = bond_get_targets_ip(bond->params.arp_targets, sip);
2683 slave->last_arp_rx = jiffies; 2633 if (i == -1) {
2684 return; 2634 pr_debug("bva: sip %pI4 not found in targets\n", &sip);
2685 } 2635 return;
2686 } 2636 }
2637 slave->last_arp_rx = jiffies;
2638 slave->target_last_arp_rx[i] = jiffies;
2687} 2639}
2688 2640
2689static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, 2641static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
@@ -2698,6 +2650,10 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2698 return RX_HANDLER_ANOTHER; 2650 return RX_HANDLER_ANOTHER;
2699 2651
2700 read_lock(&bond->lock); 2652 read_lock(&bond->lock);
2653
2654 if (!slave_do_arp_validate(bond, slave))
2655 goto out_unlock;
2656
2701 alen = arp_hdr_len(bond->dev); 2657 alen = arp_hdr_len(bond->dev);
2702 2658
2703 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n", 2659 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
@@ -2737,10 +2693,17 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2737 * configuration, the ARP probe will (hopefully) travel from 2693 * configuration, the ARP probe will (hopefully) travel from
2738 * the active, through one switch, the router, then the other 2694 * the active, through one switch, the router, then the other
2739 * switch before reaching the backup. 2695 * switch before reaching the backup.
2696 *
2697 * We 'trust' the arp requests if there is an active slave and
2698 * it received valid arp reply(s) after it became active. This
2699 * is done to avoid endless looping when we can't reach the
2700 * arp_ip_target and fool ourselves with our own arp requests.
2740 */ 2701 */
2741 if (bond_is_active_slave(slave)) 2702 if (bond_is_active_slave(slave))
2742 bond_validate_arp(bond, slave, sip, tip); 2703 bond_validate_arp(bond, slave, sip, tip);
2743 else 2704 else if (bond->curr_active_slave &&
2705 time_after(slave_last_rx(bond, bond->curr_active_slave),
2706 bond->curr_active_slave->jiffies))
2744 bond_validate_arp(bond, slave, tip, sip); 2707 bond_validate_arp(bond, slave, tip, sip);
2745 2708
2746out_unlock: 2709out_unlock:
@@ -3225,7 +3188,7 @@ static int bond_slave_netdev_event(unsigned long event,
3225 3188
3226 switch (event) { 3189 switch (event) {
3227 case NETDEV_UNREGISTER: 3190 case NETDEV_UNREGISTER:
3228 if (bond->setup_by_slave) 3191 if (bond_dev->type != ARPHRD_ETHER)
3229 bond_release_and_destroy(bond_dev, slave_dev); 3192 bond_release_and_destroy(bond_dev, slave_dev);
3230 else 3193 else
3231 bond_release(bond_dev, slave_dev); 3194 bond_release(bond_dev, slave_dev);
@@ -3289,7 +3252,7 @@ static int bond_slave_netdev_event(unsigned long event,
3289static int bond_netdev_event(struct notifier_block *this, 3252static int bond_netdev_event(struct notifier_block *this,
3290 unsigned long event, void *ptr) 3253 unsigned long event, void *ptr)
3291{ 3254{
3292 struct net_device *event_dev = (struct net_device *)ptr; 3255 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3293 3256
3294 pr_debug("event_dev: %s, event: %lx\n", 3257 pr_debug("event_dev: %s, event: %lx\n",
3295 event_dev ? event_dev->name : "None", 3258 event_dev ? event_dev->name : "None",
@@ -3672,19 +3635,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3672 return res; 3635 return res;
3673} 3636}
3674 3637
3675static bool bond_addr_in_mc_list(unsigned char *addr,
3676 struct netdev_hw_addr_list *list,
3677 int addrlen)
3678{
3679 struct netdev_hw_addr *ha;
3680
3681 netdev_hw_addr_list_for_each(ha, list)
3682 if (!memcmp(ha->addr, addr, addrlen))
3683 return true;
3684
3685 return false;
3686}
3687
3688static void bond_change_rx_flags(struct net_device *bond_dev, int change) 3638static void bond_change_rx_flags(struct net_device *bond_dev, int change)
3689{ 3639{
3690 struct bonding *bond = netdev_priv(bond_dev); 3640 struct bonding *bond = netdev_priv(bond_dev);
@@ -3698,35 +3648,29 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
3698 bond_dev->flags & IFF_ALLMULTI ? 1 : -1); 3648 bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
3699} 3649}
3700 3650
3701static void bond_set_multicast_list(struct net_device *bond_dev) 3651static void bond_set_rx_mode(struct net_device *bond_dev)
3702{ 3652{
3703 struct bonding *bond = netdev_priv(bond_dev); 3653 struct bonding *bond = netdev_priv(bond_dev);
3704 struct netdev_hw_addr *ha; 3654 struct slave *slave;
3705 bool found; 3655 int i;
3706 3656
3707 read_lock(&bond->lock); 3657 read_lock(&bond->lock);
3708 3658
3709 /* looking for addresses to add to slaves' mc list */ 3659 if (USES_PRIMARY(bond->params.mode)) {
3710 netdev_for_each_mc_addr(ha, bond_dev) { 3660 read_lock(&bond->curr_slave_lock);
3711 found = bond_addr_in_mc_list(ha->addr, &bond->mc_list, 3661 slave = bond->curr_active_slave;
3712 bond_dev->addr_len); 3662 if (slave) {
3713 if (!found) 3663 dev_uc_sync(slave->dev, bond_dev);
3714 bond_mc_add(bond, ha->addr); 3664 dev_mc_sync(slave->dev, bond_dev);
3715 } 3665 }
3716 3666 read_unlock(&bond->curr_slave_lock);
3717 /* looking for addresses to delete from slaves' list */ 3667 } else {
3718 netdev_hw_addr_list_for_each(ha, &bond->mc_list) { 3668 bond_for_each_slave(bond, slave, i) {
3719 found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc, 3669 dev_uc_sync_multiple(slave->dev, bond_dev);
3720 bond_dev->addr_len); 3670 dev_mc_sync_multiple(slave->dev, bond_dev);
3721 if (!found) 3671 }
3722 bond_mc_del(bond, ha->addr);
3723 } 3672 }
3724 3673
3725 /* save master's multicast list */
3726 __hw_addr_flush(&bond->mc_list);
3727 __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
3728 bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
3729
3730 read_unlock(&bond->lock); 3674 read_unlock(&bond->lock);
3731} 3675}
3732 3676
@@ -3871,11 +3815,10 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3871 pr_debug("bond=%p, name=%s\n", 3815 pr_debug("bond=%p, name=%s\n",
3872 bond, bond_dev ? bond_dev->name : "None"); 3816 bond, bond_dev ? bond_dev->name : "None");
3873 3817
3874 /* 3818 /* If fail_over_mac is enabled, do nothing and return success.
3875 * If fail_over_mac is set to active, do nothing and return 3819 * Returning an error causes ifenslave to fail.
3876 * success. Returning an error causes ifenslave to fail.
3877 */ 3820 */
3878 if (bond->params.fail_over_mac == BOND_FOM_ACTIVE) 3821 if (bond->params.fail_over_mac)
3879 return 0; 3822 return 0;
3880 3823
3881 if (!is_valid_ether_addr(sa->sa_data)) 3824 if (!is_valid_ether_addr(sa->sa_data))
@@ -4333,7 +4276,7 @@ static const struct net_device_ops bond_netdev_ops = {
4333 .ndo_get_stats64 = bond_get_stats, 4276 .ndo_get_stats64 = bond_get_stats,
4334 .ndo_do_ioctl = bond_do_ioctl, 4277 .ndo_do_ioctl = bond_do_ioctl,
4335 .ndo_change_rx_flags = bond_change_rx_flags, 4278 .ndo_change_rx_flags = bond_change_rx_flags,
4336 .ndo_set_rx_mode = bond_set_multicast_list, 4279 .ndo_set_rx_mode = bond_set_rx_mode,
4337 .ndo_change_mtu = bond_change_mtu, 4280 .ndo_change_mtu = bond_change_mtu,
4338 .ndo_set_mac_address = bond_set_mac_address, 4281 .ndo_set_mac_address = bond_set_mac_address,
4339 .ndo_neigh_setup = bond_neigh_setup, 4282 .ndo_neigh_setup = bond_neigh_setup,
@@ -4438,8 +4381,6 @@ static void bond_uninit(struct net_device *bond_dev)
4438 4381
4439 bond_debug_unregister(bond); 4382 bond_debug_unregister(bond);
4440 4383
4441 __hw_addr_flush(&bond->mc_list);
4442
4443 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { 4384 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
4444 list_del(&vlan->vlan_list); 4385 list_del(&vlan->vlan_list);
4445 kfree(vlan); 4386 kfree(vlan);
@@ -4484,6 +4425,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
4484static int bond_check_params(struct bond_params *params) 4425static int bond_check_params(struct bond_params *params)
4485{ 4426{
4486 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; 4427 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4428 int arp_all_targets_value;
4487 4429
4488 /* 4430 /*
4489 * Convert string parameters. 4431 * Convert string parameters.
@@ -4674,7 +4616,11 @@ static int bond_check_params(struct bond_params *params)
4674 arp_ip_target[i]); 4616 arp_ip_target[i]);
4675 arp_interval = 0; 4617 arp_interval = 0;
4676 } else { 4618 } else {
4677 arp_target[arp_ip_count++] = ip; 4619 if (bond_get_targets_ip(arp_target, ip) == -1)
4620 arp_target[arp_ip_count++] = ip;
4621 else
4622 pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
4623 &ip);
4678 } 4624 }
4679 } 4625 }
4680 4626
@@ -4705,6 +4651,18 @@ static int bond_check_params(struct bond_params *params)
4705 } else 4651 } else
4706 arp_validate_value = 0; 4652 arp_validate_value = 0;
4707 4653
4654 arp_all_targets_value = 0;
4655 if (arp_all_targets) {
4656 arp_all_targets_value = bond_parse_parm(arp_all_targets,
4657 arp_all_targets_tbl);
4658
4659 if (arp_all_targets_value == -1) {
4660 pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
4661 arp_all_targets);
4662 arp_all_targets_value = 0;
4663 }
4664 }
4665
4708 if (miimon) { 4666 if (miimon) {
4709 pr_info("MII link monitoring set to %d ms\n", miimon); 4667 pr_info("MII link monitoring set to %d ms\n", miimon);
4710 } else if (arp_interval) { 4668 } else if (arp_interval) {
@@ -4769,6 +4727,7 @@ static int bond_check_params(struct bond_params *params)
4769 params->num_peer_notif = num_peer_notif; 4727 params->num_peer_notif = num_peer_notif;
4770 params->arp_interval = arp_interval; 4728 params->arp_interval = arp_interval;
4771 params->arp_validate = arp_validate_value; 4729 params->arp_validate = arp_validate_value;
4730 params->arp_all_targets = arp_all_targets_value;
4772 params->updelay = updelay; 4731 params->updelay = updelay;
4773 params->downdelay = downdelay; 4732 params->downdelay = downdelay;
4774 params->use_carrier = use_carrier; 4733 params->use_carrier = use_carrier;
@@ -4845,12 +4804,9 @@ static int bond_init(struct net_device *bond_dev)
4845 4804
4846 /* Ensure valid dev_addr */ 4805 /* Ensure valid dev_addr */
4847 if (is_zero_ether_addr(bond_dev->dev_addr) && 4806 if (is_zero_ether_addr(bond_dev->dev_addr) &&
4848 bond_dev->addr_assign_type == NET_ADDR_PERM) { 4807 bond_dev->addr_assign_type == NET_ADDR_PERM)
4849 eth_hw_addr_random(bond_dev); 4808 eth_hw_addr_random(bond_dev);
4850 bond->dev_addr_from_first = true;
4851 }
4852 4809
4853 __hw_addr_init(&bond->mc_list);
4854 return 0; 4810 return 0;
4855} 4811}
4856 4812
@@ -4923,7 +4879,7 @@ static int __net_init bond_net_init(struct net *net)
4923 4879
4924 bond_create_proc_dir(bn); 4880 bond_create_proc_dir(bn);
4925 bond_create_sysfs(bn); 4881 bond_create_sysfs(bn);
4926 4882
4927 return 0; 4883 return 0;
4928} 4884}
4929 4885
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index d7434e0a610e..dc36a3d7d9e9 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -231,8 +231,7 @@ static ssize_t bonding_show_slaves(struct device *d,
231} 231}
232 232
233/* 233/*
234 * Set the slaves in the current bond. The bond interface must be 234 * Set the slaves in the current bond.
235 * up for this to succeed.
236 * This is supposed to be only thin wrapper for bond_enslave and bond_release. 235 * This is supposed to be only thin wrapper for bond_enslave and bond_release.
237 * All hard work should be done there. 236 * All hard work should be done there.
238 */ 237 */
@@ -363,7 +362,6 @@ static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
363 362
364/* 363/*
365 * Show and set the bonding transmit hash method. 364 * Show and set the bonding transmit hash method.
366 * The bond interface must be down to change the xmit hash policy.
367 */ 365 */
368static ssize_t bonding_show_xmit_hash(struct device *d, 366static ssize_t bonding_show_xmit_hash(struct device *d,
369 struct device_attribute *attr, 367 struct device_attribute *attr,
@@ -383,20 +381,12 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
383 int new_value, ret = count; 381 int new_value, ret = count;
384 struct bonding *bond = to_bond(d); 382 struct bonding *bond = to_bond(d);
385 383
386 if (bond->dev->flags & IFF_UP) {
387 pr_err("%s: Interface is up. Unable to update xmit policy.\n",
388 bond->dev->name);
389 ret = -EPERM;
390 goto out;
391 }
392
393 new_value = bond_parse_parm(buf, xmit_hashtype_tbl); 384 new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
394 if (new_value < 0) { 385 if (new_value < 0) {
395 pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n", 386 pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
396 bond->dev->name, 387 bond->dev->name,
397 (int)strlen(buf) - 1, buf); 388 (int)strlen(buf) - 1, buf);
398 ret = -EINVAL; 389 ret = -EINVAL;
399 goto out;
400 } else { 390 } else {
401 bond->params.xmit_policy = new_value; 391 bond->params.xmit_policy = new_value;
402 bond_set_mode_ops(bond, bond->params.mode); 392 bond_set_mode_ops(bond, bond->params.mode);
@@ -404,7 +394,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
404 bond->dev->name, 394 bond->dev->name,
405 xmit_hashtype_tbl[new_value].modename, new_value); 395 xmit_hashtype_tbl[new_value].modename, new_value);
406 } 396 }
407out: 397
408 return ret; 398 return ret;
409} 399}
410static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, 400static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
@@ -453,6 +443,44 @@ static ssize_t bonding_store_arp_validate(struct device *d,
453 443
454static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, 444static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
455 bonding_store_arp_validate); 445 bonding_store_arp_validate);
446/*
447 * Show and set arp_all_targets.
448 */
449static ssize_t bonding_show_arp_all_targets(struct device *d,
450 struct device_attribute *attr,
451 char *buf)
452{
453 struct bonding *bond = to_bond(d);
454 int value = bond->params.arp_all_targets;
455
456 return sprintf(buf, "%s %d\n", arp_all_targets_tbl[value].modename,
457 value);
458}
459
460static ssize_t bonding_store_arp_all_targets(struct device *d,
461 struct device_attribute *attr,
462 const char *buf, size_t count)
463{
464 struct bonding *bond = to_bond(d);
465 int new_value;
466
467 new_value = bond_parse_parm(buf, arp_all_targets_tbl);
468 if (new_value < 0) {
469 pr_err("%s: Ignoring invalid arp_all_targets value %s\n",
470 bond->dev->name, buf);
471 return -EINVAL;
472 }
473 pr_info("%s: setting arp_all_targets to %s (%d).\n",
474 bond->dev->name, arp_all_targets_tbl[new_value].modename,
475 new_value);
476
477 bond->params.arp_all_targets = new_value;
478
479 return count;
480}
481
482static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
483 bonding_show_arp_all_targets, bonding_store_arp_all_targets);
456 484
457/* 485/*
458 * Show and store fail_over_mac. User only allowed to change the 486 * Show and store fail_over_mac. User only allowed to change the
@@ -600,10 +628,11 @@ static ssize_t bonding_store_arp_targets(struct device *d,
600 struct device_attribute *attr, 628 struct device_attribute *attr,
601 const char *buf, size_t count) 629 const char *buf, size_t count)
602{ 630{
603 __be32 newtarget;
604 int i = 0, done = 0, ret = count;
605 struct bonding *bond = to_bond(d); 631 struct bonding *bond = to_bond(d);
606 __be32 *targets; 632 struct slave *slave;
633 __be32 newtarget, *targets;
634 unsigned long *targets_rx;
635 int ind, i, j, ret = -EINVAL;
607 636
608 targets = bond->params.arp_targets; 637 targets = bond->params.arp_targets;
609 newtarget = in_aton(buf + 1); 638 newtarget = in_aton(buf + 1);
@@ -612,57 +641,63 @@ static ssize_t bonding_store_arp_targets(struct device *d,
612 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { 641 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
613 pr_err("%s: invalid ARP target %pI4 specified for addition\n", 642 pr_err("%s: invalid ARP target %pI4 specified for addition\n",
614 bond->dev->name, &newtarget); 643 bond->dev->name, &newtarget);
615 ret = -EINVAL;
616 goto out; 644 goto out;
617 } 645 }
618 /* look for an empty slot to put the target in, and check for dupes */ 646
619 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { 647 if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
620 if (targets[i] == newtarget) { /* duplicate */ 648 pr_err("%s: ARP target %pI4 is already present\n",
621 pr_err("%s: ARP target %pI4 is already present\n", 649 bond->dev->name, &newtarget);
622 bond->dev->name, &newtarget); 650 goto out;
623 ret = -EINVAL;
624 goto out;
625 }
626 if (targets[i] == 0) {
627 pr_info("%s: adding ARP target %pI4.\n",
628 bond->dev->name, &newtarget);
629 done = 1;
630 targets[i] = newtarget;
631 }
632 } 651 }
633 if (!done) { 652
653 ind = bond_get_targets_ip(targets, 0); /* first free slot */
654 if (ind == -1) {
634 pr_err("%s: ARP target table is full!\n", 655 pr_err("%s: ARP target table is full!\n",
635 bond->dev->name); 656 bond->dev->name);
636 ret = -EINVAL;
637 goto out; 657 goto out;
638 } 658 }
639 659
660 pr_info("%s: adding ARP target %pI4.\n", bond->dev->name,
661 &newtarget);
662 /* not to race with bond_arp_rcv */
663 write_lock_bh(&bond->lock);
664 bond_for_each_slave(bond, slave, i)
665 slave->target_last_arp_rx[ind] = jiffies;
666 targets[ind] = newtarget;
667 write_unlock_bh(&bond->lock);
640 } else if (buf[0] == '-') { 668 } else if (buf[0] == '-') {
641 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { 669 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
642 pr_err("%s: invalid ARP target %pI4 specified for removal\n", 670 pr_err("%s: invalid ARP target %pI4 specified for removal\n",
643 bond->dev->name, &newtarget); 671 bond->dev->name, &newtarget);
644 ret = -EINVAL;
645 goto out; 672 goto out;
646 } 673 }
647 674
648 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { 675 ind = bond_get_targets_ip(targets, newtarget);
649 if (targets[i] == newtarget) { 676 if (ind == -1) {
650 int j; 677 pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
651 pr_info("%s: removing ARP target %pI4.\n",
652 bond->dev->name, &newtarget);
653 for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
654 targets[j] = targets[j+1];
655
656 targets[j] = 0;
657 done = 1;
658 }
659 }
660 if (!done) {
661 pr_info("%s: unable to remove nonexistent ARP target %pI4.\n",
662 bond->dev->name, &newtarget); 678 bond->dev->name, &newtarget);
663 ret = -EINVAL;
664 goto out; 679 goto out;
665 } 680 }
681
682 if (ind == 0 && !targets[1] && bond->params.arp_interval)
683 pr_warn("%s: removing last arp target with arp_interval on\n",
684 bond->dev->name);
685
686 pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
687 &newtarget);
688
689 write_lock_bh(&bond->lock);
690 bond_for_each_slave(bond, slave, i) {
691 targets_rx = slave->target_last_arp_rx;
692 j = ind;
693 for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
694 targets_rx[j] = targets_rx[j+1];
695 targets_rx[j] = 0;
696 }
697 for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
698 targets[i] = targets[i+1];
699 targets[i] = 0;
700 write_unlock_bh(&bond->lock);
666 } else { 701 } else {
667 pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n", 702 pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
668 bond->dev->name); 703 bond->dev->name);
@@ -670,6 +705,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
670 goto out; 705 goto out;
671 } 706 }
672 707
708 ret = count;
673out: 709out:
674 return ret; 710 return ret;
675} 711}
@@ -1645,6 +1681,7 @@ static struct attribute *per_bond_attrs[] = {
1645 &dev_attr_mode.attr, 1681 &dev_attr_mode.attr,
1646 &dev_attr_fail_over_mac.attr, 1682 &dev_attr_fail_over_mac.attr,
1647 &dev_attr_arp_validate.attr, 1683 &dev_attr_arp_validate.attr,
1684 &dev_attr_arp_all_targets.attr,
1648 &dev_attr_arp_interval.attr, 1685 &dev_attr_arp_interval.attr,
1649 &dev_attr_arp_ip_target.attr, 1686 &dev_attr_arp_ip_target.attr,
1650 &dev_attr_downdelay.attr, 1687 &dev_attr_downdelay.attr,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index f989e1529a29..42d1c6599cba 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -144,6 +144,7 @@ struct bond_params {
144 u8 num_peer_notif; 144 u8 num_peer_notif;
145 int arp_interval; 145 int arp_interval;
146 int arp_validate; 146 int arp_validate;
147 int arp_all_targets;
147 int use_carrier; 148 int use_carrier;
148 int fail_over_mac; 149 int fail_over_mac;
149 int updelay; 150 int updelay;
@@ -179,6 +180,7 @@ struct slave {
179 int delay; 180 int delay;
180 unsigned long jiffies; 181 unsigned long jiffies;
181 unsigned long last_arp_rx; 182 unsigned long last_arp_rx;
183 unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
182 s8 link; /* one of BOND_LINK_XXXX */ 184 s8 link; /* one of BOND_LINK_XXXX */
183 s8 new_link; 185 s8 new_link;
184 u8 backup:1, /* indicates backup slave. Value corresponds with 186 u8 backup:1, /* indicates backup slave. Value corresponds with
@@ -224,14 +226,12 @@ struct bonding {
224 rwlock_t lock; 226 rwlock_t lock;
225 rwlock_t curr_slave_lock; 227 rwlock_t curr_slave_lock;
226 u8 send_peer_notif; 228 u8 send_peer_notif;
227 s8 setup_by_slave;
228 u8 igmp_retrans; 229 u8 igmp_retrans;
229#ifdef CONFIG_PROC_FS 230#ifdef CONFIG_PROC_FS
230 struct proc_dir_entry *proc_entry; 231 struct proc_dir_entry *proc_entry;
231 char proc_file_name[IFNAMSIZ]; 232 char proc_file_name[IFNAMSIZ];
232#endif /* CONFIG_PROC_FS */ 233#endif /* CONFIG_PROC_FS */
233 struct list_head bond_list; 234 struct list_head bond_list;
234 struct netdev_hw_addr_list mc_list;
235 int (*xmit_hash_policy)(struct sk_buff *, int); 235 int (*xmit_hash_policy)(struct sk_buff *, int);
236 u16 rr_tx_counter; 236 u16 rr_tx_counter;
237 struct ad_bond_info ad_info; 237 struct ad_bond_info ad_info;
@@ -248,7 +248,6 @@ struct bonding {
248 /* debugging support via debugfs */ 248 /* debugging support via debugfs */
249 struct dentry *debug_dir; 249 struct dentry *debug_dir;
250#endif /* CONFIG_DEBUG_FS */ 250#endif /* CONFIG_DEBUG_FS */
251 bool dev_addr_from_first;
252}; 251};
253 252
254static inline bool bond_vlan_used(struct bonding *bond) 253static inline bool bond_vlan_used(struct bonding *bond)
@@ -323,6 +322,9 @@ static inline bool bond_is_active_slave(struct slave *slave)
323#define BOND_FOM_ACTIVE 1 322#define BOND_FOM_ACTIVE 1
324#define BOND_FOM_FOLLOW 2 323#define BOND_FOM_FOLLOW 2
325 324
325#define BOND_ARP_TARGETS_ANY 0
326#define BOND_ARP_TARGETS_ALL 1
327
326#define BOND_ARP_VALIDATE_NONE 0 328#define BOND_ARP_VALIDATE_NONE 0
327#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE) 329#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE)
328#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) 330#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
@@ -335,11 +337,31 @@ static inline int slave_do_arp_validate(struct bonding *bond,
335 return bond->params.arp_validate & (1 << bond_slave_state(slave)); 337 return bond->params.arp_validate & (1 << bond_slave_state(slave));
336} 338}
337 339
340/* Get the oldest arp which we've received on this slave for bond's
341 * arp_targets.
342 */
343static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
344 struct slave *slave)
345{
346 int i = 1;
347 unsigned long ret = slave->target_last_arp_rx[0];
348
349 for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++)
350 if (time_before(slave->target_last_arp_rx[i], ret))
351 ret = slave->target_last_arp_rx[i];
352
353 return ret;
354}
355
338static inline unsigned long slave_last_rx(struct bonding *bond, 356static inline unsigned long slave_last_rx(struct bonding *bond,
339 struct slave *slave) 357 struct slave *slave)
340{ 358{
341 if (slave_do_arp_validate(bond, slave)) 359 if (slave_do_arp_validate(bond, slave)) {
342 return slave->last_arp_rx; 360 if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
361 return slave_oldest_target_arp_rx(bond, slave);
362 else
363 return slave->last_arp_rx;
364 }
343 365
344 return slave->dev->last_rx; 366 return slave->dev->last_rx;
345} 367}
@@ -465,12 +487,29 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
465 return NULL; 487 return NULL;
466} 488}
467 489
490/* Check if the ip is present in arp ip list, or first free slot if ip == 0
491 * Returns -1 if not found, index if found
492 */
493static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
494{
495 int i;
496
497 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
498 if (targets[i] == ip)
499 return i;
500 else if (targets[i] == 0)
501 break;
502
503 return -1;
504}
505
468/* exported from bond_main.c */ 506/* exported from bond_main.c */
469extern int bond_net_id; 507extern int bond_net_id;
470extern const struct bond_parm_tbl bond_lacp_tbl[]; 508extern const struct bond_parm_tbl bond_lacp_tbl[];
471extern const struct bond_parm_tbl bond_mode_tbl[]; 509extern const struct bond_parm_tbl bond_mode_tbl[];
472extern const struct bond_parm_tbl xmit_hashtype_tbl[]; 510extern const struct bond_parm_tbl xmit_hashtype_tbl[];
473extern const struct bond_parm_tbl arp_validate_tbl[]; 511extern const struct bond_parm_tbl arp_validate_tbl[];
512extern const struct bond_parm_tbl arp_all_targets_tbl[];
474extern const struct bond_parm_tbl fail_over_mac_tbl[]; 513extern const struct bond_parm_tbl fail_over_mac_tbl[];
475extern const struct bond_parm_tbl pri_reselect_tbl[]; 514extern const struct bond_parm_tbl pri_reselect_tbl[];
476extern struct bond_parm_tbl ad_select_tbl[]; 515extern struct bond_parm_tbl ad_select_tbl[];
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 77be3cb0b5fe..34dea95d58db 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -35,8 +35,9 @@ MODULE_ALIAS_LDISC(N_CAIF);
35#define OFF 0 35#define OFF 0
36#define CAIF_MAX_MTU 4096 36#define CAIF_MAX_MTU 4096
37 37
38/*This list is protected by the rtnl lock. */ 38static DEFINE_SPINLOCK(ser_lock);
39static LIST_HEAD(ser_list); 39static LIST_HEAD(ser_list);
40static LIST_HEAD(ser_release_list);
40 41
41static bool ser_loop; 42static bool ser_loop;
42module_param(ser_loop, bool, S_IRUGO); 43module_param(ser_loop, bool, S_IRUGO);
@@ -308,6 +309,28 @@ static void ldisc_tx_wakeup(struct tty_struct *tty)
308} 309}
309 310
310 311
312static void ser_release(struct work_struct *work)
313{
314 struct list_head list;
315 struct ser_device *ser, *tmp;
316
317 spin_lock(&ser_lock);
318 list_replace_init(&ser_release_list, &list);
319 spin_unlock(&ser_lock);
320
321 if (!list_empty(&list)) {
322 rtnl_lock();
323 list_for_each_entry_safe(ser, tmp, &list, node) {
324 dev_close(ser->dev);
325 unregister_netdevice(ser->dev);
326 debugfs_deinit(ser);
327 }
328 rtnl_unlock();
329 }
330}
331
332static DECLARE_WORK(ser_release_work, ser_release);
333
311static int ldisc_open(struct tty_struct *tty) 334static int ldisc_open(struct tty_struct *tty)
312{ 335{
313 struct ser_device *ser; 336 struct ser_device *ser;
@@ -321,6 +344,9 @@ static int ldisc_open(struct tty_struct *tty)
321 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG)) 344 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
322 return -EPERM; 345 return -EPERM;
323 346
347 /* release devices to avoid name collision */
348 ser_release(NULL);
349
324 sprintf(name, "cf%s", tty->name); 350 sprintf(name, "cf%s", tty->name);
325 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); 351 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
326 if (!dev) 352 if (!dev)
@@ -341,7 +367,9 @@ static int ldisc_open(struct tty_struct *tty)
341 return -ENODEV; 367 return -ENODEV;
342 } 368 }
343 369
370 spin_lock(&ser_lock);
344 list_add(&ser->node, &ser_list); 371 list_add(&ser->node, &ser_list);
372 spin_unlock(&ser_lock);
345 rtnl_unlock(); 373 rtnl_unlock();
346 netif_stop_queue(dev); 374 netif_stop_queue(dev);
347 update_tty_status(ser); 375 update_tty_status(ser);
@@ -351,19 +379,13 @@ static int ldisc_open(struct tty_struct *tty)
351static void ldisc_close(struct tty_struct *tty) 379static void ldisc_close(struct tty_struct *tty)
352{ 380{
353 struct ser_device *ser = tty->disc_data; 381 struct ser_device *ser = tty->disc_data;
354 /* Remove may be called inside or outside of rtnl_lock */
355 int islocked = rtnl_is_locked();
356 382
357 if (!islocked)
358 rtnl_lock();
359 /* device is freed automagically by net-sysfs */
360 dev_close(ser->dev);
361 unregister_netdevice(ser->dev);
362 list_del(&ser->node);
363 debugfs_deinit(ser);
364 tty_kref_put(ser->tty); 383 tty_kref_put(ser->tty);
365 if (!islocked) 384
366 rtnl_unlock(); 385 spin_lock(&ser_lock);
386 list_move(&ser->node, &ser_release_list);
387 spin_unlock(&ser_lock);
388 schedule_work(&ser_release_work);
367} 389}
368 390
369/* The line discipline structure. */ 391/* The line discipline structure. */
@@ -438,16 +460,11 @@ static int __init caif_ser_init(void)
438 460
439static void __exit caif_ser_exit(void) 461static void __exit caif_ser_exit(void)
440{ 462{
441 struct ser_device *ser = NULL; 463 spin_lock(&ser_lock);
442 struct list_head *node; 464 list_splice(&ser_list, &ser_release_list);
443 struct list_head *_tmp; 465 spin_unlock(&ser_lock);
444 466 ser_release(NULL);
445 list_for_each_safe(node, _tmp, &ser_list) { 467 cancel_work_sync(&ser_release_work);
446 ser = list_entry(node, struct ser_device, node);
447 dev_close(ser->dev);
448 unregister_netdevice(ser->dev);
449 list_del(node);
450 }
451 tty_unregister_ldisc(N_CAIF); 468 tty_unregister_ldisc(N_CAIF);
452 debugfs_remove_recursive(debugfsdir); 469 debugfs_remove_recursive(debugfsdir);
453} 470}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index e456b70933c2..3c069472eb8b 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -102,12 +102,9 @@ config CAN_JANZ_ICAN3
102 This driver can also be built as a module. If so, the module will be 102 This driver can also be built as a module. If so, the module will be
103 called janz-ican3.ko. 103 called janz-ican3.ko.
104 104
105config HAVE_CAN_FLEXCAN
106 bool
107
108config CAN_FLEXCAN 105config CAN_FLEXCAN
109 tristate "Support for Freescale FLEXCAN based chips" 106 tristate "Support for Freescale FLEXCAN based chips"
110 depends on HAVE_CAN_FLEXCAN 107 depends on ARM || PPC
111 ---help--- 108 ---help---
112 Say Y here if you want to support for Freescale FlexCAN. 109 Say Y here if you want to support for Freescale FlexCAN.
113 110
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index db52f4414def..dbbe97ae121e 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1220,7 +1220,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
1220 goto out; 1220 goto out;
1221 } 1221 }
1222 1222
1223 err = strict_strtoul(buf, 0, &can_id); 1223 err = kstrtoul(buf, 0, &can_id);
1224 if (err) { 1224 if (err) {
1225 ret = err; 1225 ret = err;
1226 goto out; 1226 goto out;
@@ -1264,8 +1264,6 @@ static const struct of_device_id at91_can_dt_ids[] = {
1264 } 1264 }
1265}; 1265};
1266MODULE_DEVICE_TABLE(of, at91_can_dt_ids); 1266MODULE_DEVICE_TABLE(of, at91_can_dt_ids);
1267#else
1268#define at91_can_dt_ids NULL
1269#endif 1267#endif
1270 1268
1271static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev) 1269static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev)
@@ -1393,8 +1391,6 @@ static int at91_can_remove(struct platform_device *pdev)
1393 1391
1394 unregister_netdev(dev); 1392 unregister_netdev(dev);
1395 1393
1396 platform_set_drvdata(pdev, NULL);
1397
1398 iounmap(priv->reg_base); 1394 iounmap(priv->reg_base);
1399 1395
1400 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1396 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1426,7 +1422,7 @@ static struct platform_driver at91_can_driver = {
1426 .driver = { 1422 .driver = {
1427 .name = KBUILD_MODNAME, 1423 .name = KBUILD_MODNAME,
1428 .owner = THIS_MODULE, 1424 .owner = THIS_MODULE,
1429 .of_match_table = at91_can_dt_ids, 1425 .of_match_table = of_match_ptr(at91_can_dt_ids),
1430 }, 1426 },
1431 .id_table = at91_can_id_table, 1427 .id_table = at91_can_id_table,
1432}; 1428};
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index d4a15e82bfc0..a2700d25ff0e 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -580,7 +580,7 @@ static int bfin_can_probe(struct platform_device *pdev)
580 priv->pin_list = pdata; 580 priv->pin_list = pdata;
581 priv->can.clock.freq = get_sclk(); 581 priv->can.clock.freq = get_sclk();
582 582
583 dev_set_drvdata(&pdev->dev, dev); 583 platform_set_drvdata(pdev, dev);
584 SET_NETDEV_DEV(dev, &pdev->dev); 584 SET_NETDEV_DEV(dev, &pdev->dev);
585 585
586 dev->flags |= IFF_ECHO; /* we support local echo */ 586 dev->flags |= IFF_ECHO; /* we support local echo */
@@ -613,7 +613,7 @@ exit:
613 613
614static int bfin_can_remove(struct platform_device *pdev) 614static int bfin_can_remove(struct platform_device *pdev)
615{ 615{
616 struct net_device *dev = dev_get_drvdata(&pdev->dev); 616 struct net_device *dev = platform_get_drvdata(pdev);
617 struct bfin_can_priv *priv = netdev_priv(dev); 617 struct bfin_can_priv *priv = netdev_priv(dev);
618 struct resource *res; 618 struct resource *res;
619 619
@@ -621,8 +621,6 @@ static int bfin_can_remove(struct platform_device *pdev)
621 621
622 unregister_candev(dev); 622 unregister_candev(dev);
623 623
624 dev_set_drvdata(&pdev->dev, NULL);
625
626 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 624 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
627 release_mem_region(res->start, resource_size(res)); 625 release_mem_region(res->start, resource_size(res));
628 626
@@ -635,7 +633,7 @@ static int bfin_can_remove(struct platform_device *pdev)
635#ifdef CONFIG_PM 633#ifdef CONFIG_PM
636static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg) 634static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
637{ 635{
638 struct net_device *dev = dev_get_drvdata(&pdev->dev); 636 struct net_device *dev = platform_get_drvdata(pdev);
639 struct bfin_can_priv *priv = netdev_priv(dev); 637 struct bfin_can_priv *priv = netdev_priv(dev);
640 struct bfin_can_regs __iomem *reg = priv->membase; 638 struct bfin_can_regs __iomem *reg = priv->membase;
641 int timeout = BFIN_CAN_TIMEOUT; 639 int timeout = BFIN_CAN_TIMEOUT;
@@ -658,7 +656,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
658 656
659static int bfin_can_resume(struct platform_device *pdev) 657static int bfin_can_resume(struct platform_device *pdev)
660{ 658{
661 struct net_device *dev = dev_get_drvdata(&pdev->dev); 659 struct net_device *dev = platform_get_drvdata(pdev);
662 struct bfin_can_priv *priv = netdev_priv(dev); 660 struct bfin_can_priv *priv = netdev_priv(dev);
663 struct bfin_can_regs __iomem *reg = priv->membase; 661 struct bfin_can_regs __iomem *reg = priv->membase;
664 662
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index d63b91904f82..b918c7329426 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -201,8 +201,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
201 priv->instance = pdev->id; 201 priv->instance = pdev->id;
202 202
203 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 203 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
204 priv->raminit_ctrlreg = devm_request_and_ioremap(&pdev->dev, res); 204 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
205 if (!priv->raminit_ctrlreg || priv->instance < 0) 205 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
206 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 206 dev_info(&pdev->dev, "control memory is not used for raminit\n");
207 else 207 else
208 priv->raminit = c_can_hw_raminit; 208 priv->raminit = c_can_hw_raminit;
@@ -234,7 +234,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
234 return 0; 234 return 0;
235 235
236exit_free_device: 236exit_free_device:
237 platform_set_drvdata(pdev, NULL);
238 free_c_can_dev(dev); 237 free_c_can_dev(dev);
239exit_iounmap: 238exit_iounmap:
240 iounmap(addr); 239 iounmap(addr);
@@ -255,7 +254,6 @@ static int c_can_plat_remove(struct platform_device *pdev)
255 struct resource *mem; 254 struct resource *mem;
256 255
257 unregister_c_can_dev(dev); 256 unregister_c_can_dev(dev);
258 platform_set_drvdata(pdev, NULL);
259 257
260 free_c_can_dev(dev); 258 free_c_can_dev(dev);
261 iounmap(priv->base); 259 iounmap(priv->base);
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 8eaaac81f320..87a47c0cfd49 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -265,7 +265,7 @@ static int cc770_isa_probe(struct platform_device *pdev)
265 else 265 else
266 priv->clkout = COR_DEFAULT; 266 priv->clkout = COR_DEFAULT;
267 267
268 dev_set_drvdata(&pdev->dev, dev); 268 platform_set_drvdata(pdev, dev);
269 SET_NETDEV_DEV(dev, &pdev->dev); 269 SET_NETDEV_DEV(dev, &pdev->dev);
270 270
271 err = register_cc770dev(dev); 271 err = register_cc770dev(dev);
@@ -293,12 +293,11 @@ static int cc770_isa_probe(struct platform_device *pdev)
293 293
294static int cc770_isa_remove(struct platform_device *pdev) 294static int cc770_isa_remove(struct platform_device *pdev)
295{ 295{
296 struct net_device *dev = dev_get_drvdata(&pdev->dev); 296 struct net_device *dev = platform_get_drvdata(pdev);
297 struct cc770_priv *priv = netdev_priv(dev); 297 struct cc770_priv *priv = netdev_priv(dev);
298 int idx = pdev->id; 298 int idx = pdev->id;
299 299
300 unregister_cc770dev(dev); 300 unregister_cc770dev(dev);
301 dev_set_drvdata(&pdev->dev, NULL);
302 301
303 if (mem[idx]) { 302 if (mem[idx]) {
304 iounmap(priv->reg_base); 303 iounmap(priv->reg_base);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index d0f6bfc45aea..034bdd816a60 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -216,7 +216,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
216 priv->reg_base, dev->irq, priv->can.clock.freq, 216 priv->reg_base, dev->irq, priv->can.clock.freq,
217 priv->cpu_interface, priv->bus_config, priv->clkout); 217 priv->cpu_interface, priv->bus_config, priv->clkout);
218 218
219 dev_set_drvdata(&pdev->dev, dev); 219 platform_set_drvdata(pdev, dev);
220 SET_NETDEV_DEV(dev, &pdev->dev); 220 SET_NETDEV_DEV(dev, &pdev->dev);
221 221
222 err = register_cc770dev(dev); 222 err = register_cc770dev(dev);
@@ -240,7 +240,7 @@ exit_release_mem:
240 240
241static int cc770_platform_remove(struct platform_device *pdev) 241static int cc770_platform_remove(struct platform_device *pdev)
242{ 242{
243 struct net_device *dev = dev_get_drvdata(&pdev->dev); 243 struct net_device *dev = platform_get_drvdata(pdev);
244 struct cc770_priv *priv = netdev_priv(dev); 244 struct cc770_priv *priv = netdev_priv(dev);
245 struct resource *mem; 245 struct resource *mem;
246 246
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 769d29ed106d..7b0be0910f4b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -24,7 +24,6 @@
24#include <linux/can/dev.h> 24#include <linux/can/dev.h>
25#include <linux/can/error.h> 25#include <linux/can/error.h>
26#include <linux/can/led.h> 26#include <linux/can/led.h>
27#include <linux/can/platform/flexcan.h>
28#include <linux/clk.h> 27#include <linux/clk.h>
29#include <linux/delay.h> 28#include <linux/delay.h>
30#include <linux/if_arp.h> 29#include <linux/if_arp.h>
@@ -37,7 +36,7 @@
37#include <linux/of.h> 36#include <linux/of.h>
38#include <linux/of_device.h> 37#include <linux/of_device.h>
39#include <linux/platform_device.h> 38#include <linux/platform_device.h>
40#include <linux/pinctrl/consumer.h> 39#include <linux/regulator/consumer.h>
41 40
42#define DRV_NAME "flexcan" 41#define DRV_NAME "flexcan"
43 42
@@ -212,6 +211,7 @@ struct flexcan_priv {
212 struct clk *clk_per; 211 struct clk *clk_per;
213 struct flexcan_platform_data *pdata; 212 struct flexcan_platform_data *pdata;
214 const struct flexcan_devtype_data *devtype_data; 213 const struct flexcan_devtype_data *devtype_data;
214 struct regulator *reg_xceiver;
215}; 215};
216 216
217static struct flexcan_devtype_data fsl_p1010_devtype_data = { 217static struct flexcan_devtype_data fsl_p1010_devtype_data = {
@@ -259,15 +259,6 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
259} 259}
260#endif 260#endif
261 261
262/*
263 * Swtich transceiver on or off
264 */
265static void flexcan_transceiver_switch(const struct flexcan_priv *priv, int on)
266{
267 if (priv->pdata && priv->pdata->transceiver_switch)
268 priv->pdata->transceiver_switch(on);
269}
270
271static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, 262static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
272 u32 reg_esr) 263 u32 reg_esr)
273{ 264{
@@ -800,7 +791,11 @@ static int flexcan_chip_start(struct net_device *dev)
800 if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) 791 if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
801 flexcan_write(0x0, &regs->rxfgmask); 792 flexcan_write(0x0, &regs->rxfgmask);
802 793
803 flexcan_transceiver_switch(priv, 1); 794 if (priv->reg_xceiver) {
795 err = regulator_enable(priv->reg_xceiver);
796 if (err)
797 goto out;
798 }
804 799
805 /* synchronize with the can bus */ 800 /* synchronize with the can bus */
806 reg_mcr = flexcan_read(&regs->mcr); 801 reg_mcr = flexcan_read(&regs->mcr);
@@ -843,7 +838,8 @@ static void flexcan_chip_stop(struct net_device *dev)
843 reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; 838 reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
844 flexcan_write(reg, &regs->mcr); 839 flexcan_write(reg, &regs->mcr);
845 840
846 flexcan_transceiver_switch(priv, 0); 841 if (priv->reg_xceiver)
842 regulator_disable(priv->reg_xceiver);
847 priv->can.state = CAN_STATE_STOPPED; 843 priv->can.state = CAN_STATE_STOPPED;
848 844
849 return; 845 return;
@@ -1004,16 +1000,11 @@ static int flexcan_probe(struct platform_device *pdev)
1004 struct flexcan_priv *priv; 1000 struct flexcan_priv *priv;
1005 struct resource *mem; 1001 struct resource *mem;
1006 struct clk *clk_ipg = NULL, *clk_per = NULL; 1002 struct clk *clk_ipg = NULL, *clk_per = NULL;
1007 struct pinctrl *pinctrl;
1008 void __iomem *base; 1003 void __iomem *base;
1009 resource_size_t mem_size; 1004 resource_size_t mem_size;
1010 int err, irq; 1005 int err, irq;
1011 u32 clock_freq = 0; 1006 u32 clock_freq = 0;
1012 1007
1013 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1014 if (IS_ERR(pinctrl))
1015 return PTR_ERR(pinctrl);
1016
1017 if (pdev->dev.of_node) 1008 if (pdev->dev.of_node)
1018 of_property_read_u32(pdev->dev.of_node, 1009 of_property_read_u32(pdev->dev.of_node,
1019 "clock-frequency", &clock_freq); 1010 "clock-frequency", &clock_freq);
@@ -1090,6 +1081,10 @@ static int flexcan_probe(struct platform_device *pdev)
1090 priv->pdata = pdev->dev.platform_data; 1081 priv->pdata = pdev->dev.platform_data;
1091 priv->devtype_data = devtype_data; 1082 priv->devtype_data = devtype_data;
1092 1083
1084 priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
1085 if (IS_ERR(priv->reg_xceiver))
1086 priv->reg_xceiver = NULL;
1087
1093 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); 1088 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
1094 1089
1095 dev_set_drvdata(&pdev->dev, dev); 1090 dev_set_drvdata(&pdev->dev, dev);
@@ -1127,7 +1122,6 @@ static int flexcan_remove(struct platform_device *pdev)
1127 struct resource *mem; 1122 struct resource *mem;
1128 1123
1129 unregister_flexcandev(dev); 1124 unregister_flexcandev(dev);
1130 platform_set_drvdata(pdev, NULL);
1131 iounmap(priv->base); 1125 iounmap(priv->base);
1132 1126
1133 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1127 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1138,10 +1132,10 @@ static int flexcan_remove(struct platform_device *pdev)
1138 return 0; 1132 return 0;
1139} 1133}
1140 1134
1141#ifdef CONFIG_PM 1135#ifdef CONFIG_PM_SLEEP
1142static int flexcan_suspend(struct platform_device *pdev, pm_message_t state) 1136static int flexcan_suspend(struct device *device)
1143{ 1137{
1144 struct net_device *dev = platform_get_drvdata(pdev); 1138 struct net_device *dev = dev_get_drvdata(device);
1145 struct flexcan_priv *priv = netdev_priv(dev); 1139 struct flexcan_priv *priv = netdev_priv(dev);
1146 1140
1147 flexcan_chip_disable(priv); 1141 flexcan_chip_disable(priv);
@@ -1155,9 +1149,9 @@ static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
1155 return 0; 1149 return 0;
1156} 1150}
1157 1151
1158static int flexcan_resume(struct platform_device *pdev) 1152static int flexcan_resume(struct device *device)
1159{ 1153{
1160 struct net_device *dev = platform_get_drvdata(pdev); 1154 struct net_device *dev = dev_get_drvdata(device);
1161 struct flexcan_priv *priv = netdev_priv(dev); 1155 struct flexcan_priv *priv = netdev_priv(dev);
1162 1156
1163 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1157 priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -1169,21 +1163,19 @@ static int flexcan_resume(struct platform_device *pdev)
1169 1163
1170 return 0; 1164 return 0;
1171} 1165}
1172#else 1166#endif /* CONFIG_PM_SLEEP */
1173#define flexcan_suspend NULL 1167
1174#define flexcan_resume NULL 1168static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
1175#endif
1176 1169
1177static struct platform_driver flexcan_driver = { 1170static struct platform_driver flexcan_driver = {
1178 .driver = { 1171 .driver = {
1179 .name = DRV_NAME, 1172 .name = DRV_NAME,
1180 .owner = THIS_MODULE, 1173 .owner = THIS_MODULE,
1174 .pm = &flexcan_pm_ops,
1181 .of_match_table = flexcan_of_match, 1175 .of_match_table = flexcan_of_match,
1182 }, 1176 },
1183 .probe = flexcan_probe, 1177 .probe = flexcan_probe,
1184 .remove = flexcan_remove, 1178 .remove = flexcan_remove,
1185 .suspend = flexcan_suspend,
1186 .resume = flexcan_resume,
1187 .id_table = flexcan_id_table, 1179 .id_table = flexcan_id_table,
1188}; 1180};
1189 1181
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 17fbc7a09224..6aa737a24393 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1646,7 +1646,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
1646 if (err) 1646 if (err)
1647 goto exit_free_candev; 1647 goto exit_free_candev;
1648 1648
1649 dev_set_drvdata(&ofdev->dev, dev); 1649 platform_set_drvdata(ofdev, dev);
1650 1650
1651 /* Reset device to allow bit-timing to be set. No need to call 1651 /* Reset device to allow bit-timing to be set. No need to call
1652 * grcan_reset at this stage. That is done in grcan_open. 1652 * grcan_reset at this stage. That is done in grcan_open.
@@ -1683,10 +1683,9 @@ static int grcan_probe(struct platform_device *ofdev)
1683 } 1683 }
1684 1684
1685 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 1685 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
1686 base = devm_request_and_ioremap(&ofdev->dev, res); 1686 base = devm_ioremap_resource(&ofdev->dev, res);
1687 if (!base) { 1687 if (IS_ERR(base)) {
1688 dev_err(&ofdev->dev, "couldn't map IO resource\n"); 1688 err = PTR_ERR(base);
1689 err = -EADDRNOTAVAIL;
1690 goto exit_error; 1689 goto exit_error;
1691 } 1690 }
1692 1691
@@ -1716,13 +1715,12 @@ exit_error:
1716 1715
1717static int grcan_remove(struct platform_device *ofdev) 1716static int grcan_remove(struct platform_device *ofdev)
1718{ 1717{
1719 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 1718 struct net_device *dev = platform_get_drvdata(ofdev);
1720 struct grcan_priv *priv = netdev_priv(dev); 1719 struct grcan_priv *priv = netdev_priv(dev);
1721 1720
1722 unregister_candev(dev); /* Will in turn call grcan_close */ 1721 unregister_candev(dev); /* Will in turn call grcan_close */
1723 1722
1724 irq_dispose_mapping(dev->irq); 1723 irq_dispose_mapping(dev->irq);
1725 dev_set_drvdata(&ofdev->dev, NULL);
1726 netif_napi_del(&priv->napi); 1724 netif_napi_del(&priv->napi);
1727 free_candev(dev); 1725 free_candev(dev);
1728 1726
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index c4bc1d2e2033..36bd6fa1c7f3 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1734,7 +1734,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
1734 unsigned long enable; 1734 unsigned long enable;
1735 int ret; 1735 int ret;
1736 1736
1737 if (strict_strtoul(buf, 0, &enable)) 1737 if (kstrtoul(buf, 0, &enable))
1738 return -EINVAL; 1738 return -EINVAL;
1739 1739
1740 ret = ican3_set_termination(mod, enable); 1740 ret = ican3_set_termination(mod, enable);
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
index f27fca65dc4a..a3d99a8fd2d1 100644
--- a/drivers/net/can/led.c
+++ b/drivers/net/can/led.c
@@ -88,9 +88,9 @@ EXPORT_SYMBOL_GPL(devm_can_led_init);
88 88
89/* NETDEV rename notifier to rename the associated led triggers too */ 89/* NETDEV rename notifier to rename the associated led triggers too */
90static int can_led_notifier(struct notifier_block *nb, unsigned long msg, 90static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
91 void *data) 91 void *ptr)
92{ 92{
93 struct net_device *netdev = data; 93 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
94 struct can_priv *priv = safe_candev_priv(netdev); 94 struct can_priv *priv = safe_candev_priv(netdev);
95 char name[CAN_LED_NAME_SZ]; 95 char name[CAN_LED_NAME_SZ];
96 96
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 668850e441dc..5b0ee8ef5885 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -302,7 +302,7 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
302 goto exit_free_mscan; 302 goto exit_free_mscan;
303 } 303 }
304 304
305 dev_set_drvdata(&ofdev->dev, dev); 305 platform_set_drvdata(ofdev, dev);
306 306
307 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", 307 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
308 priv->reg_base, dev->irq, priv->can.clock.freq); 308 priv->reg_base, dev->irq, priv->can.clock.freq);
@@ -321,11 +321,9 @@ exit_unmap_mem:
321 321
322static int mpc5xxx_can_remove(struct platform_device *ofdev) 322static int mpc5xxx_can_remove(struct platform_device *ofdev)
323{ 323{
324 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 324 struct net_device *dev = platform_get_drvdata(ofdev);
325 struct mscan_priv *priv = netdev_priv(dev); 325 struct mscan_priv *priv = netdev_priv(dev);
326 326
327 dev_set_drvdata(&ofdev->dev, NULL);
328
329 unregister_mscandev(dev); 327 unregister_mscandev(dev);
330 iounmap(priv->reg_base); 328 iounmap(priv->reg_base);
331 irq_dispose_mapping(dev->irq); 329 irq_dispose_mapping(dev->irq);
@@ -338,7 +336,7 @@ static int mpc5xxx_can_remove(struct platform_device *ofdev)
338static struct mscan_regs saved_regs; 336static struct mscan_regs saved_regs;
339static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state) 337static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state)
340{ 338{
341 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 339 struct net_device *dev = platform_get_drvdata(ofdev);
342 struct mscan_priv *priv = netdev_priv(dev); 340 struct mscan_priv *priv = netdev_priv(dev);
343 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; 341 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
344 342
@@ -349,7 +347,7 @@ static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state
349 347
350static int mpc5xxx_can_resume(struct platform_device *ofdev) 348static int mpc5xxx_can_resume(struct platform_device *ofdev)
351{ 349{
352 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 350 struct net_device *dev = platform_get_drvdata(ofdev);
353 struct mscan_priv *priv = netdev_priv(dev); 351 struct mscan_priv *priv = netdev_priv(dev);
354 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; 352 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
355 353
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 5c8da4661489..06a282397fff 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -197,7 +197,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
197 else 197 else
198 priv->cdr = CDR_DEFAULT; 198 priv->cdr = CDR_DEFAULT;
199 199
200 dev_set_drvdata(&pdev->dev, dev); 200 platform_set_drvdata(pdev, dev);
201 SET_NETDEV_DEV(dev, &pdev->dev); 201 SET_NETDEV_DEV(dev, &pdev->dev);
202 202
203 err = register_sja1000dev(dev); 203 err = register_sja1000dev(dev);
@@ -225,12 +225,11 @@ static int sja1000_isa_probe(struct platform_device *pdev)
225 225
226static int sja1000_isa_remove(struct platform_device *pdev) 226static int sja1000_isa_remove(struct platform_device *pdev)
227{ 227{
228 struct net_device *dev = dev_get_drvdata(&pdev->dev); 228 struct net_device *dev = platform_get_drvdata(pdev);
229 struct sja1000_priv *priv = netdev_priv(dev); 229 struct sja1000_priv *priv = netdev_priv(dev);
230 int idx = pdev->id; 230 int idx = pdev->id;
231 231
232 unregister_sja1000dev(dev); 232 unregister_sja1000dev(dev);
233 dev_set_drvdata(&pdev->dev, NULL);
234 233
235 if (mem[idx]) { 234 if (mem[idx]) {
236 iounmap(priv->reg_base); 235 iounmap(priv->reg_base);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 8e0c4a001939..31ad33911167 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -72,13 +72,11 @@ static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
72 72
73static int sja1000_ofp_remove(struct platform_device *ofdev) 73static int sja1000_ofp_remove(struct platform_device *ofdev)
74{ 74{
75 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 75 struct net_device *dev = platform_get_drvdata(ofdev);
76 struct sja1000_priv *priv = netdev_priv(dev); 76 struct sja1000_priv *priv = netdev_priv(dev);
77 struct device_node *np = ofdev->dev.of_node; 77 struct device_node *np = ofdev->dev.of_node;
78 struct resource res; 78 struct resource res;
79 79
80 dev_set_drvdata(&ofdev->dev, NULL);
81
82 unregister_sja1000dev(dev); 80 unregister_sja1000dev(dev);
83 free_sja1000dev(dev); 81 free_sja1000dev(dev);
84 iounmap(priv->reg_base); 82 iounmap(priv->reg_base);
@@ -181,7 +179,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
181 priv->reg_base, dev->irq, priv->can.clock.freq, 179 priv->reg_base, dev->irq, priv->can.clock.freq,
182 priv->ocr, priv->cdr); 180 priv->ocr, priv->cdr);
183 181
184 dev_set_drvdata(&ofdev->dev, dev); 182 platform_set_drvdata(ofdev, dev);
185 SET_NETDEV_DEV(dev, &ofdev->dev); 183 SET_NETDEV_DEV(dev, &ofdev->dev);
186 184
187 err = register_sja1000dev(dev); 185 err = register_sja1000dev(dev);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 21619bb5b869..8e259c541036 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -135,7 +135,7 @@ static int sp_probe(struct platform_device *pdev)
135 break; 135 break;
136 } 136 }
137 137
138 dev_set_drvdata(&pdev->dev, dev); 138 platform_set_drvdata(pdev, dev);
139 SET_NETDEV_DEV(dev, &pdev->dev); 139 SET_NETDEV_DEV(dev, &pdev->dev);
140 140
141 err = register_sja1000dev(dev); 141 err = register_sja1000dev(dev);
@@ -161,12 +161,11 @@ static int sp_probe(struct platform_device *pdev)
161 161
162static int sp_remove(struct platform_device *pdev) 162static int sp_remove(struct platform_device *pdev)
163{ 163{
164 struct net_device *dev = dev_get_drvdata(&pdev->dev); 164 struct net_device *dev = platform_get_drvdata(pdev);
165 struct sja1000_priv *priv = netdev_priv(dev); 165 struct sja1000_priv *priv = netdev_priv(dev);
166 struct resource *res; 166 struct resource *res;
167 167
168 unregister_sja1000dev(dev); 168 unregister_sja1000dev(dev);
169 dev_set_drvdata(&pdev->dev, NULL);
170 169
171 if (priv->reg_base) 170 if (priv->reg_base)
172 iounmap(priv->reg_base); 171 iounmap(priv->reg_base);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 06b7e097d36e..874188ba06f7 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -161,7 +161,7 @@ static void slc_bump(struct slcan *sl)
161 161
162 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ 162 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
163 163
164 if (strict_strtoul(sl->rbuff+1, 16, &ultmp)) 164 if (kstrtoul(sl->rbuff+1, 16, &ultmp))
165 return; 165 return;
166 166
167 cf.can_id = ultmp; 167 cf.can_id = ultmp;
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 3a2b45601ec2..65eef1eea2e2 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -594,7 +594,7 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
594 unsigned long val; 594 unsigned long val;
595 int ret; 595 int ret;
596 596
597 ret = strict_strtoul(buf, 0, &val); 597 ret = kstrtoul(buf, 0, &val);
598 if (ret < 0) 598 if (ret < 0)
599 return ret; 599 return ret;
600 val &= 0xFF; 600 val &= 0xFF;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f21fc37ec578..3a349a22d5bc 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1001,7 +1001,6 @@ static int ti_hecc_remove(struct platform_device *pdev)
1001 iounmap(priv->base); 1001 iounmap(priv->base);
1002 release_mem_region(res->start, resource_size(res)); 1002 release_mem_region(res->start, resource_size(res));
1003 free_candev(ndev); 1003 free_candev(ndev);
1004 platform_set_drvdata(pdev, NULL);
1005 1004
1006 return 0; 1005 return 0;
1007} 1006}
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index adb4bf5eb4b4..ede8daa68275 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -723,25 +723,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
723 pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", 723 pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
724 dev->name, skb->len, inw(ioaddr + EL3_STATUS)); 724 dev->name, skb->len, inw(ioaddr + EL3_STATUS));
725 } 725 }
726#if 0
727#ifndef final_version
728 { /* Error-checking code, delete someday. */
729 ushort status = inw(ioaddr + EL3_STATUS);
730 if (status & 0x0001 && /* IRQ line active, missed one. */
731 inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
732 pr_debug("%s: Missed interrupt, status then %04x now %04x"
733 " Tx %2.2x Rx %4.4x.\n", dev->name, status,
734 inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
735 inw(ioaddr + RX_STATUS));
736 /* Fake interrupt trigger by masking, acknowledge interrupts. */
737 outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
738 outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
739 ioaddr + EL3_CMD);
740 outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
741 }
742 }
743#endif
744#endif
745 /* 726 /*
746 * We lock the driver against other processors. Note 727 * We lock the driver against other processors. Note
747 * we don't need to lock versus the IRQ as we suspended 728 * we don't need to lock versus the IRQ as we suspended
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 072c6f14e8fc..ad5272b348f0 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1012,10 +1012,8 @@ static int vortex_init_one(struct pci_dev *pdev,
1012 goto out; 1012 goto out;
1013 1013
1014 rc = pci_request_regions(pdev, DRV_NAME); 1014 rc = pci_request_regions(pdev, DRV_NAME);
1015 if (rc < 0) { 1015 if (rc < 0)
1016 pci_disable_device(pdev); 1016 goto out_disable;
1017 goto out;
1018 }
1019 1017
1020 unit = vortex_cards_found; 1018 unit = vortex_cards_found;
1021 1019
@@ -1032,23 +1030,24 @@ static int vortex_init_one(struct pci_dev *pdev,
1032 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ 1030 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1033 ioaddr = pci_iomap(pdev, 0, 0); 1031 ioaddr = pci_iomap(pdev, 0, 0);
1034 if (!ioaddr) { 1032 if (!ioaddr) {
1035 pci_release_regions(pdev);
1036 pci_disable_device(pdev);
1037 rc = -ENOMEM; 1033 rc = -ENOMEM;
1038 goto out; 1034 goto out_release;
1039 } 1035 }
1040 1036
1041 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, 1037 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
1042 ent->driver_data, unit); 1038 ent->driver_data, unit);
1043 if (rc < 0) { 1039 if (rc < 0)
1044 pci_iounmap(pdev, ioaddr); 1040 goto out_iounmap;
1045 pci_release_regions(pdev);
1046 pci_disable_device(pdev);
1047 goto out;
1048 }
1049 1041
1050 vortex_cards_found++; 1042 vortex_cards_found++;
1043 goto out;
1051 1044
1045out_iounmap:
1046 pci_iounmap(pdev, ioaddr);
1047out_release:
1048 pci_release_regions(pdev);
1049out_disable:
1050 pci_disable_device(pdev);
1052out: 1051out:
1053 return rc; 1052 return rc;
1054} 1053}
@@ -1473,7 +1472,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1473 1472
1474 if (pdev) { 1473 if (pdev) {
1475 vp->pm_state_valid = 1; 1474 vp->pm_state_valid = 1;
1476 pci_save_state(VORTEX_PCI(vp)); 1475 pci_save_state(pdev);
1477 acpi_set_WOL(dev); 1476 acpi_set_WOL(dev);
1478 } 1477 }
1479 retval = register_netdev(dev); 1478 retval = register_netdev(dev);
@@ -3233,21 +3232,20 @@ static void vortex_remove_one(struct pci_dev *pdev)
3233 vp = netdev_priv(dev); 3232 vp = netdev_priv(dev);
3234 3233
3235 if (vp->cb_fn_base) 3234 if (vp->cb_fn_base)
3236 pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); 3235 pci_iounmap(pdev, vp->cb_fn_base);
3237 3236
3238 unregister_netdev(dev); 3237 unregister_netdev(dev);
3239 3238
3240 if (VORTEX_PCI(vp)) { 3239 pci_set_power_state(pdev, PCI_D0); /* Go active */
3241 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ 3240 if (vp->pm_state_valid)
3242 if (vp->pm_state_valid) 3241 pci_restore_state(pdev);
3243 pci_restore_state(VORTEX_PCI(vp)); 3242 pci_disable_device(pdev);
3244 pci_disable_device(VORTEX_PCI(vp)); 3243
3245 }
3246 /* Should really use issue_and_wait() here */ 3244 /* Should really use issue_and_wait() here */
3247 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), 3245 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3248 vp->ioaddr + EL3_CMD); 3246 vp->ioaddr + EL3_CMD);
3249 3247
3250 pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); 3248 pci_iounmap(pdev, vp->ioaddr);
3251 3249
3252 pci_free_consistent(pdev, 3250 pci_free_consistent(pdev,
3253 sizeof(struct boom_rx_desc) * RX_RING_SIZE 3251 sizeof(struct boom_rx_desc) * RX_RING_SIZE
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 1c71c763f680..f00c76377b44 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -67,7 +67,6 @@ config PCMCIA_3C589
67config VORTEX 67config VORTEX
68 tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" 68 tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
69 depends on (PCI || EISA) && HAS_IOPORT 69 depends on (PCI || EISA) && HAS_IOPORT
70 select NET_CORE
71 select MII 70 select MII
72 ---help--- 71 ---help---
73 This option enables driver support for a large number of 10Mbps and 72 This option enables driver support for a large number of 10Mbps and
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 47618e505355..b2e840513735 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -849,7 +849,6 @@ static int ne_drv_remove(struct platform_device *pdev)
849 free_irq(dev->irq, dev); 849 free_irq(dev->irq, dev);
850 release_region(dev->base_addr, NE_IO_EXTENT); 850 release_region(dev->base_addr, NE_IO_EXTENT);
851 free_netdev(dev); 851 free_netdev(dev);
852 platform_set_drvdata(pdev, NULL);
853 } 852 }
854 return 0; 853 return 0;
855} 854}
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 587a885de259..92201080e07a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -676,7 +676,7 @@ static int ne2k_pci_resume (struct pci_dev *pdev)
676 struct net_device *dev = pci_get_drvdata (pdev); 676 struct net_device *dev = pci_get_drvdata (pdev);
677 int rc; 677 int rc;
678 678
679 pci_set_power_state(pdev, 0); 679 pci_set_power_state(pdev, PCI_D0);
680 pci_restore_state(pdev); 680 pci_restore_state(pdev);
681 681
682 rc = pci_enable_device(pdev); 682 rc = pci_enable_device(pdev);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index ed956e08d38b..2037080c504d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -20,9 +20,11 @@ config SUNGEM_PHY
20source "drivers/net/ethernet/3com/Kconfig" 20source "drivers/net/ethernet/3com/Kconfig"
21source "drivers/net/ethernet/adaptec/Kconfig" 21source "drivers/net/ethernet/adaptec/Kconfig"
22source "drivers/net/ethernet/aeroflex/Kconfig" 22source "drivers/net/ethernet/aeroflex/Kconfig"
23source "drivers/net/ethernet/allwinner/Kconfig"
23source "drivers/net/ethernet/alteon/Kconfig" 24source "drivers/net/ethernet/alteon/Kconfig"
24source "drivers/net/ethernet/amd/Kconfig" 25source "drivers/net/ethernet/amd/Kconfig"
25source "drivers/net/ethernet/apple/Kconfig" 26source "drivers/net/ethernet/apple/Kconfig"
27source "drivers/net/ethernet/arc/Kconfig"
26source "drivers/net/ethernet/atheros/Kconfig" 28source "drivers/net/ethernet/atheros/Kconfig"
27source "drivers/net/ethernet/cadence/Kconfig" 29source "drivers/net/ethernet/cadence/Kconfig"
28source "drivers/net/ethernet/adi/Kconfig" 30source "drivers/net/ethernet/adi/Kconfig"
@@ -63,7 +65,6 @@ config JME
63 tristate "JMicron(R) PCI-Express Gigabit Ethernet support" 65 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
64 depends on PCI 66 depends on PCI
65 select CRC32 67 select CRC32
66 select NET_CORE
67 select MII 68 select MII
68 ---help--- 69 ---help---
69 This driver supports the PCI-Express gigabit ethernet adapters 70 This driver supports the PCI-Express gigabit ethernet adapters
@@ -95,7 +96,6 @@ config FEALNX
95 tristate "Myson MTD-8xx PCI Ethernet support" 96 tristate "Myson MTD-8xx PCI Ethernet support"
96 depends on PCI 97 depends on PCI
97 select CRC32 98 select CRC32
98 select NET_CORE
99 select MII 99 select MII
100 ---help--- 100 ---help---
101 Say Y here to support the Myson MTD-800 family of PCI-based Ethernet 101 Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
@@ -106,7 +106,6 @@ source "drivers/net/ethernet/8390/Kconfig"
106 106
107config NET_NETX 107config NET_NETX
108 tristate "NetX Ethernet support" 108 tristate "NetX Ethernet support"
109 select NET_CORE
110 select MII 109 select MII
111 depends on ARCH_NETX 110 depends on ARCH_NETX
112 ---help--- 111 ---help---
@@ -124,7 +123,6 @@ source "drivers/net/ethernet/oki-semi/Kconfig"
124config ETHOC 123config ETHOC
125 tristate "OpenCores 10/100 Mbps Ethernet MAC support" 124 tristate "OpenCores 10/100 Mbps Ethernet MAC support"
126 depends on HAS_IOMEM && HAS_DMA 125 depends on HAS_IOMEM && HAS_DMA
127 select NET_CORE
128 select MII 126 select MII
129 select PHYLIB 127 select PHYLIB
130 select CRC32 128 select CRC32
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8268d85f9448..390bd0bfaa27 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -6,9 +6,11 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
6obj-$(CONFIG_NET_VENDOR_8390) += 8390/ 6obj-$(CONFIG_NET_VENDOR_8390) += 8390/
7obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ 7obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
8obj-$(CONFIG_GRETH) += aeroflex/ 8obj-$(CONFIG_GRETH) += aeroflex/
9obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
9obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ 10obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
10obj-$(CONFIG_NET_VENDOR_AMD) += amd/ 11obj-$(CONFIG_NET_VENDOR_AMD) += amd/
11obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ 12obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
13obj-$(CONFIG_NET_VENDOR_ARC) += arc/
12obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ 14obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
13obj-$(CONFIG_NET_CADENCE) += cadence/ 15obj-$(CONFIG_NET_CADENCE) += cadence/
14obj-$(CONFIG_NET_BFIN) += adi/ 16obj-$(CONFIG_NET_BFIN) += adi/
diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig
index 0bff571b1bb3..5c804bbe3dab 100644
--- a/drivers/net/ethernet/adaptec/Kconfig
+++ b/drivers/net/ethernet/adaptec/Kconfig
@@ -22,7 +22,6 @@ config ADAPTEC_STARFIRE
22 tristate "Adaptec Starfire/DuraLAN support" 22 tristate "Adaptec Starfire/DuraLAN support"
23 depends on PCI 23 depends on PCI
24 select CRC32 24 select CRC32
25 select NET_CORE
26 select MII 25 select MII
27 ---help--- 26 ---help---
28 Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network 27 Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index a9481606bbcd..f952fff6a9a9 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -23,7 +23,6 @@ config BFIN_MAC
23 tristate "Blackfin on-chip MAC support" 23 tristate "Blackfin on-chip MAC support"
24 depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537) 24 depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
25 select CRC32 25 select CRC32
26 select NET_CORE
27 select MII 26 select MII
28 select PHYLIB 27 select PHYLIB
29 select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE 28 select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index dada66bfe0d6..e904b3838dcc 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1719,7 +1719,6 @@ out_err_mii_probe:
1719 mdiobus_unregister(lp->mii_bus); 1719 mdiobus_unregister(lp->mii_bus);
1720 mdiobus_free(lp->mii_bus); 1720 mdiobus_free(lp->mii_bus);
1721out_err_probe_mac: 1721out_err_probe_mac:
1722 platform_set_drvdata(pdev, NULL);
1723 free_netdev(ndev); 1722 free_netdev(ndev);
1724 1723
1725 return rc; 1724 return rc;
@@ -1732,8 +1731,6 @@ static int bfin_mac_remove(struct platform_device *pdev)
1732 1731
1733 bfin_phc_release(lp); 1732 bfin_phc_release(lp);
1734 1733
1735 platform_set_drvdata(pdev, NULL);
1736
1737 lp->mii_bus->priv = NULL; 1734 lp->mii_bus->priv = NULL;
1738 1735
1739 unregister_netdev(ndev); 1736 unregister_netdev(ndev);
@@ -1868,7 +1865,6 @@ static int bfin_mii_bus_remove(struct platform_device *pdev)
1868 struct bfin_mii_bus_platform_data *mii_bus_pd = 1865 struct bfin_mii_bus_platform_data *mii_bus_pd =
1869 dev_get_platdata(&pdev->dev); 1866 dev_get_platdata(&pdev->dev);
1870 1867
1871 platform_set_drvdata(pdev, NULL);
1872 mdiobus_unregister(miibus); 1868 mdiobus_unregister(miibus);
1873 kfree(miibus->irq); 1869 kfree(miibus->irq);
1874 mdiobus_free(miibus); 1870 mdiobus_free(miibus);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 269295403fc4..7ff4b30d55ea 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1565,7 +1565,7 @@ error1:
1565 1565
1566static int greth_of_remove(struct platform_device *of_dev) 1566static int greth_of_remove(struct platform_device *of_dev)
1567{ 1567{
1568 struct net_device *ndev = dev_get_drvdata(&of_dev->dev); 1568 struct net_device *ndev = platform_get_drvdata(of_dev);
1569 struct greth_private *greth = netdev_priv(ndev); 1569 struct greth_private *greth = netdev_priv(ndev);
1570 1570
1571 /* Free descriptor areas */ 1571 /* Free descriptor areas */
@@ -1573,8 +1573,6 @@ static int greth_of_remove(struct platform_device *of_dev)
1573 1573
1574 dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); 1574 dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1575 1575
1576 dev_set_drvdata(&of_dev->dev, NULL);
1577
1578 if (greth->phy) 1576 if (greth->phy)
1579 phy_stop(greth->phy); 1577 phy_stop(greth->phy);
1580 mdiobus_unregister(greth->mdio); 1578 mdiobus_unregister(greth->mdio);
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
new file mode 100644
index 000000000000..53ad213e865b
--- /dev/null
+++ b/drivers/net/ethernet/allwinner/Kconfig
@@ -0,0 +1,35 @@
1#
2# Allwinner device configuration
3#
4
5config NET_VENDOR_ALLWINNER
6 bool "Allwinner devices"
7 default y
8 depends on ARCH_SUNXI
9 ---help---
10 If you have a network (Ethernet) card belonging to this
11 class, say Y and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly
15 affect the kernel: saying N will just cause the configurator
16 to skip all the questions about Allwinner cards. If you say Y,
17 you will be asked for your specific card in the following
18 questions.
19
20if NET_VENDOR_ALLWINNER
21
22config SUN4I_EMAC
23 tristate "Allwinner A10 EMAC support"
24 depends on ARCH_SUNXI
25 depends on OF
26 select CRC32
27 select MII
28 select PHYLIB
29 ---help---
30 Support for Allwinner A10 EMAC ethernet driver.
31
32 To compile this driver as a module, choose M here. The module
33 will be called sun4i-emac.
34
35endif # NET_VENDOR_ALLWINNER
diff --git a/drivers/net/ethernet/allwinner/Makefile b/drivers/net/ethernet/allwinner/Makefile
new file mode 100644
index 000000000000..03129f796514
--- /dev/null
+++ b/drivers/net/ethernet/allwinner/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Allwinner device drivers.
3#
4
5obj-$(CONFIG_SUN4I_EMAC) += sun4i-emac.o
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
new file mode 100644
index 000000000000..50b853a79d77
--- /dev/null
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -0,0 +1,954 @@
1/*
2 * Allwinner EMAC Fast Ethernet driver for Linux.
3 *
4 * Copyright 2012-2013 Stefan Roese <sr@denx.de>
5 * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * Based on the Linux driver provided by Allwinner:
8 * Copyright (C) 1997 Sten Wang
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/clk.h>
16#include <linux/etherdevice.h>
17#include <linux/ethtool.h>
18#include <linux/gpio.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/mii.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/of_mdio.h>
28#include <linux/of_net.h>
29#include <linux/of_platform.h>
30#include <linux/platform_device.h>
31#include <linux/phy.h>
32
33#include "sun4i-emac.h"
34
35#define DRV_NAME "sun4i-emac"
36#define DRV_VERSION "1.02"
37
38#define EMAC_MAX_FRAME_LEN 0x0600
39
40/* Transmit timeout, default 5 seconds. */
41static int watchdog = 5000;
42module_param(watchdog, int, 0400);
43MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
44
45/* EMAC register address locking.
46 *
47 * The EMAC uses an address register to control where data written
48 * to the data register goes. This means that the address register
49 * must be preserved over interrupts or similar calls.
50 *
51 * During interrupt and other critical calls, a spinlock is used to
52 * protect the system, but the calls themselves save the address
53 * in the address register in case they are interrupting another
54 * access to the device.
55 *
56 * For general accesses a lock is provided so that calls which are
57 * allowed to sleep are serialised so that the address register does
58 * not need to be saved. This lock also serves to serialise access
59 * to the EEPROM and PHY access registers which are shared between
60 * these two devices.
61 */
62
63/* The driver supports the original EMACE, and now the two newer
64 * devices, EMACA and EMACB.
65 */
66
67struct emac_board_info {
68 struct clk *clk;
69 struct device *dev;
70 struct platform_device *pdev;
71 spinlock_t lock;
72 void __iomem *membase;
73 u32 msg_enable;
74 struct net_device *ndev;
75 struct sk_buff *skb_last;
76 u16 tx_fifo_stat;
77
78 int emacrx_completed_flag;
79
80 struct phy_device *phy_dev;
81 struct device_node *phy_node;
82 unsigned int link;
83 unsigned int speed;
84 unsigned int duplex;
85
86 phy_interface_t phy_interface;
87};
88
89static void emac_update_speed(struct net_device *dev)
90{
91 struct emac_board_info *db = netdev_priv(dev);
92 unsigned int reg_val;
93
94 /* set EMAC SPEED, depend on PHY */
95 reg_val = readl(db->membase + EMAC_MAC_SUPP_REG);
96 reg_val &= ~(0x1 << 8);
97 if (db->speed == SPEED_100)
98 reg_val |= 1 << 8;
99 writel(reg_val, db->membase + EMAC_MAC_SUPP_REG);
100}
101
102static void emac_update_duplex(struct net_device *dev)
103{
104 struct emac_board_info *db = netdev_priv(dev);
105 unsigned int reg_val;
106
107 /* set duplex depend on phy */
108 reg_val = readl(db->membase + EMAC_MAC_CTL1_REG);
109 reg_val &= ~EMAC_MAC_CTL1_DUPLEX_EN;
110 if (db->duplex)
111 reg_val |= EMAC_MAC_CTL1_DUPLEX_EN;
112 writel(reg_val, db->membase + EMAC_MAC_CTL1_REG);
113}
114
115static void emac_handle_link_change(struct net_device *dev)
116{
117 struct emac_board_info *db = netdev_priv(dev);
118 struct phy_device *phydev = db->phy_dev;
119 unsigned long flags;
120 int status_change = 0;
121
122 if (phydev->link) {
123 if (db->speed != phydev->speed) {
124 spin_lock_irqsave(&db->lock, flags);
125 db->speed = phydev->speed;
126 emac_update_speed(dev);
127 spin_unlock_irqrestore(&db->lock, flags);
128 status_change = 1;
129 }
130
131 if (db->duplex != phydev->duplex) {
132 spin_lock_irqsave(&db->lock, flags);
133 db->duplex = phydev->duplex;
134 emac_update_duplex(dev);
135 spin_unlock_irqrestore(&db->lock, flags);
136 status_change = 1;
137 }
138 }
139
140 if (phydev->link != db->link) {
141 if (!phydev->link) {
142 db->speed = 0;
143 db->duplex = -1;
144 }
145 db->link = phydev->link;
146
147 status_change = 1;
148 }
149
150 if (status_change)
151 phy_print_status(phydev);
152}
153
154static int emac_mdio_probe(struct net_device *dev)
155{
156 struct emac_board_info *db = netdev_priv(dev);
157
158 /* to-do: PHY interrupts are currently not supported */
159
160 /* attach the mac to the phy */
161 db->phy_dev = of_phy_connect(db->ndev, db->phy_node,
162 &emac_handle_link_change, 0,
163 db->phy_interface);
164 if (!db->phy_dev) {
165 netdev_err(db->ndev, "could not find the PHY\n");
166 return -ENODEV;
167 }
168
169 /* mask with MAC supported features */
170 db->phy_dev->supported &= PHY_BASIC_FEATURES;
171 db->phy_dev->advertising = db->phy_dev->supported;
172
173 db->link = 0;
174 db->speed = 0;
175 db->duplex = -1;
176
177 return 0;
178}
179
180static void emac_mdio_remove(struct net_device *dev)
181{
182 struct emac_board_info *db = netdev_priv(dev);
183
184 phy_disconnect(db->phy_dev);
185 db->phy_dev = NULL;
186}
187
188static void emac_reset(struct emac_board_info *db)
189{
190 dev_dbg(db->dev, "resetting device\n");
191
192 /* RESET device */
193 writel(0, db->membase + EMAC_CTL_REG);
194 udelay(200);
195 writel(EMAC_CTL_RESET, db->membase + EMAC_CTL_REG);
196 udelay(200);
197}
198
199static void emac_outblk_32bit(void __iomem *reg, void *data, int count)
200{
201 writesl(reg, data, round_up(count, 4) / 4);
202}
203
204static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
205{
206 readsl(reg, data, round_up(count, 4) / 4);
207}
208
209static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
210{
211 struct emac_board_info *dm = netdev_priv(dev);
212 struct phy_device *phydev = dm->phy_dev;
213
214 if (!netif_running(dev))
215 return -EINVAL;
216
217 if (!phydev)
218 return -ENODEV;
219
220 return phy_mii_ioctl(phydev, rq, cmd);
221}
222
223/* ethtool ops */
224static void emac_get_drvinfo(struct net_device *dev,
225 struct ethtool_drvinfo *info)
226{
227 strlcpy(info->driver, DRV_NAME, sizeof(DRV_NAME));
228 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
229 strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
230}
231
232static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
233{
234 struct emac_board_info *dm = netdev_priv(dev);
235 struct phy_device *phydev = dm->phy_dev;
236
237 if (!phydev)
238 return -ENODEV;
239
240 return phy_ethtool_gset(phydev, cmd);
241}
242
243static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
244{
245 struct emac_board_info *dm = netdev_priv(dev);
246 struct phy_device *phydev = dm->phy_dev;
247
248 if (!phydev)
249 return -ENODEV;
250
251 return phy_ethtool_sset(phydev, cmd);
252}
253
254static const struct ethtool_ops emac_ethtool_ops = {
255 .get_drvinfo = emac_get_drvinfo,
256 .get_settings = emac_get_settings,
257 .set_settings = emac_set_settings,
258 .get_link = ethtool_op_get_link,
259};
260
261static unsigned int emac_setup(struct net_device *ndev)
262{
263 struct emac_board_info *db = netdev_priv(ndev);
264 unsigned int reg_val;
265
266 /* set up TX */
267 reg_val = readl(db->membase + EMAC_TX_MODE_REG);
268
269 writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN,
270 db->membase + EMAC_TX_MODE_REG);
271
272 /* set up RX */
273 reg_val = readl(db->membase + EMAC_RX_CTL_REG);
274
275 writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN |
276 EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN |
277 EMAC_RX_CTL_ACCEPT_MULTICAST_EN |
278 EMAC_RX_CTL_ACCEPT_BROADCAST_EN,
279 db->membase + EMAC_RX_CTL_REG);
280
281 /* set MAC */
282 /* set MAC CTL0 */
283 reg_val = readl(db->membase + EMAC_MAC_CTL0_REG);
284 writel(reg_val | EMAC_MAC_CTL0_RX_FLOW_CTL_EN |
285 EMAC_MAC_CTL0_TX_FLOW_CTL_EN,
286 db->membase + EMAC_MAC_CTL0_REG);
287
288 /* set MAC CTL1 */
289 reg_val = readl(db->membase + EMAC_MAC_CTL1_REG);
290 reg_val |= EMAC_MAC_CTL1_LEN_CHECK_EN;
291 reg_val |= EMAC_MAC_CTL1_CRC_EN;
292 reg_val |= EMAC_MAC_CTL1_PAD_EN;
293 writel(reg_val, db->membase + EMAC_MAC_CTL1_REG);
294
295 /* set up IPGT */
296 writel(EMAC_MAC_IPGT_FULL_DUPLEX, db->membase + EMAC_MAC_IPGT_REG);
297
298 /* set up IPGR */
299 writel((EMAC_MAC_IPGR_IPG1 << 8) | EMAC_MAC_IPGR_IPG2,
300 db->membase + EMAC_MAC_IPGR_REG);
301
302 /* set up Collison window */
303 writel((EMAC_MAC_CLRT_COLLISION_WINDOW << 8) | EMAC_MAC_CLRT_RM,
304 db->membase + EMAC_MAC_CLRT_REG);
305
306 /* set up Max Frame Length */
307 writel(EMAC_MAX_FRAME_LEN,
308 db->membase + EMAC_MAC_MAXF_REG);
309
310 return 0;
311}
312
313static unsigned int emac_powerup(struct net_device *ndev)
314{
315 struct emac_board_info *db = netdev_priv(ndev);
316 unsigned int reg_val;
317
318 /* initial EMAC */
319 /* flush RX FIFO */
320 reg_val = readl(db->membase + EMAC_RX_CTL_REG);
321 reg_val |= 0x8;
322 writel(reg_val, db->membase + EMAC_RX_CTL_REG);
323 udelay(1);
324
325 /* initial MAC */
326 /* soft reset MAC */
327 reg_val = readl(db->membase + EMAC_MAC_CTL0_REG);
328 reg_val &= ~EMAC_MAC_CTL0_SOFT_RESET;
329 writel(reg_val, db->membase + EMAC_MAC_CTL0_REG);
330
331 /* set MII clock */
332 reg_val = readl(db->membase + EMAC_MAC_MCFG_REG);
333 reg_val &= (~(0xf << 2));
334 reg_val |= (0xD << 2);
335 writel(reg_val, db->membase + EMAC_MAC_MCFG_REG);
336
337 /* clear RX counter */
338 writel(0x0, db->membase + EMAC_RX_FBC_REG);
339
340 /* disable all interrupt and clear interrupt status */
341 writel(0, db->membase + EMAC_INT_CTL_REG);
342 reg_val = readl(db->membase + EMAC_INT_STA_REG);
343 writel(reg_val, db->membase + EMAC_INT_STA_REG);
344
345 udelay(1);
346
347 /* set up EMAC */
348 emac_setup(ndev);
349
350 /* set mac_address to chip */
351 writel(ndev->dev_addr[0] << 16 | ndev->dev_addr[1] << 8 | ndev->
352 dev_addr[2], db->membase + EMAC_MAC_A1_REG);
353 writel(ndev->dev_addr[3] << 16 | ndev->dev_addr[4] << 8 | ndev->
354 dev_addr[5], db->membase + EMAC_MAC_A0_REG);
355
356 mdelay(1);
357
358 return 0;
359}
360
361static int emac_set_mac_address(struct net_device *dev, void *p)
362{
363 struct sockaddr *addr = p;
364 struct emac_board_info *db = netdev_priv(dev);
365
366 if (netif_running(dev))
367 return -EBUSY;
368
369 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
370
371 writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev->
372 dev_addr[2], db->membase + EMAC_MAC_A1_REG);
373 writel(dev->dev_addr[3] << 16 | dev->dev_addr[4] << 8 | dev->
374 dev_addr[5], db->membase + EMAC_MAC_A0_REG);
375
376 return 0;
377}
378
379/* Initialize emac board */
380static void emac_init_device(struct net_device *dev)
381{
382 struct emac_board_info *db = netdev_priv(dev);
383 unsigned long flags;
384 unsigned int reg_val;
385
386 spin_lock_irqsave(&db->lock, flags);
387
388 emac_update_speed(dev);
389 emac_update_duplex(dev);
390
391 /* enable RX/TX */
392 reg_val = readl(db->membase + EMAC_CTL_REG);
393 writel(reg_val | EMAC_CTL_RESET | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN,
394 db->membase + EMAC_CTL_REG);
395
396 /* enable RX/TX0/RX Hlevel interrup */
397 reg_val = readl(db->membase + EMAC_INT_CTL_REG);
398 reg_val |= (0xf << 0) | (0x01 << 8);
399 writel(reg_val, db->membase + EMAC_INT_CTL_REG);
400
401 spin_unlock_irqrestore(&db->lock, flags);
402}
403
404/* Our watchdog timed out. Called by the networking layer */
405static void emac_timeout(struct net_device *dev)
406{
407 struct emac_board_info *db = netdev_priv(dev);
408 unsigned long flags;
409
410 if (netif_msg_timer(db))
411 dev_err(db->dev, "tx time out.\n");
412
413 /* Save previous register address */
414 spin_lock_irqsave(&db->lock, flags);
415
416 netif_stop_queue(dev);
417 emac_reset(db);
418 emac_init_device(dev);
419 /* We can accept TX packets again */
420 dev->trans_start = jiffies;
421 netif_wake_queue(dev);
422
423 /* Restore previous register address */
424 spin_unlock_irqrestore(&db->lock, flags);
425}
426
427/* Hardware start transmission.
428 * Send a packet to media from the upper layer.
429 */
430static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
431{
432 struct emac_board_info *db = netdev_priv(dev);
433 unsigned long channel;
434 unsigned long flags;
435
436 channel = db->tx_fifo_stat & 3;
437 if (channel == 3)
438 return 1;
439
440 channel = (channel == 1 ? 1 : 0);
441
442 spin_lock_irqsave(&db->lock, flags);
443
444 writel(channel, db->membase + EMAC_TX_INS_REG);
445
446 emac_outblk_32bit(db->membase + EMAC_TX_IO_DATA_REG,
447 skb->data, skb->len);
448 dev->stats.tx_bytes += skb->len;
449
450 db->tx_fifo_stat |= 1 << channel;
451 /* TX control: First packet immediately send, second packet queue */
452 if (channel == 0) {
453 /* set TX len */
454 writel(skb->len, db->membase + EMAC_TX_PL0_REG);
455 /* start translate from fifo to phy */
456 writel(readl(db->membase + EMAC_TX_CTL0_REG) | 1,
457 db->membase + EMAC_TX_CTL0_REG);
458
459 /* save the time stamp */
460 dev->trans_start = jiffies;
461 } else if (channel == 1) {
462 /* set TX len */
463 writel(skb->len, db->membase + EMAC_TX_PL1_REG);
464 /* start translate from fifo to phy */
465 writel(readl(db->membase + EMAC_TX_CTL1_REG) | 1,
466 db->membase + EMAC_TX_CTL1_REG);
467
468 /* save the time stamp */
469 dev->trans_start = jiffies;
470 }
471
472 if ((db->tx_fifo_stat & 3) == 3) {
473 /* Second packet */
474 netif_stop_queue(dev);
475 }
476
477 spin_unlock_irqrestore(&db->lock, flags);
478
479 /* free this SKB */
480 dev_kfree_skb(skb);
481
482 return NETDEV_TX_OK;
483}
484
485/* EMAC interrupt handler
486 * receive the packet to upper layer, free the transmitted packet
487 */
488static void emac_tx_done(struct net_device *dev, struct emac_board_info *db,
489 unsigned int tx_status)
490{
491 /* One packet sent complete */
492 db->tx_fifo_stat &= ~(tx_status & 3);
493 if (3 == (tx_status & 3))
494 dev->stats.tx_packets += 2;
495 else
496 dev->stats.tx_packets++;
497
498 if (netif_msg_tx_done(db))
499 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
500
501 netif_wake_queue(dev);
502}
503
504/* Received a packet and pass to upper layer
505 */
506static void emac_rx(struct net_device *dev)
507{
508 struct emac_board_info *db = netdev_priv(dev);
509 struct sk_buff *skb;
510 u8 *rdptr;
511 bool good_packet;
512 static int rxlen_last;
513 unsigned int reg_val;
514 u32 rxhdr, rxstatus, rxcount, rxlen;
515
516 /* Check packet ready or not */
517 while (1) {
518 /* race warning: the first packet might arrive with
519 * the interrupts disabled, but the second will fix
520 * it
521 */
522 rxcount = readl(db->membase + EMAC_RX_FBC_REG);
523
524 if (netif_msg_rx_status(db))
525 dev_dbg(db->dev, "RXCount: %x\n", rxcount);
526
527 if ((db->skb_last != NULL) && (rxlen_last > 0)) {
528 dev->stats.rx_bytes += rxlen_last;
529
530 /* Pass to upper layer */
531 db->skb_last->protocol = eth_type_trans(db->skb_last,
532 dev);
533 netif_rx(db->skb_last);
534 dev->stats.rx_packets++;
535 db->skb_last = NULL;
536 rxlen_last = 0;
537
538 reg_val = readl(db->membase + EMAC_RX_CTL_REG);
539 reg_val &= ~EMAC_RX_CTL_DMA_EN;
540 writel(reg_val, db->membase + EMAC_RX_CTL_REG);
541 }
542
543 if (!rxcount) {
544 db->emacrx_completed_flag = 1;
545 reg_val = readl(db->membase + EMAC_INT_CTL_REG);
546 reg_val |= (0xf << 0) | (0x01 << 8);
547 writel(reg_val, db->membase + EMAC_INT_CTL_REG);
548
549 /* had one stuck? */
550 rxcount = readl(db->membase + EMAC_RX_FBC_REG);
551 if (!rxcount)
552 return;
553 }
554
555 reg_val = readl(db->membase + EMAC_RX_IO_DATA_REG);
556 if (netif_msg_rx_status(db))
557 dev_dbg(db->dev, "receive header: %x\n", reg_val);
558 if (reg_val != EMAC_UNDOCUMENTED_MAGIC) {
559 /* disable RX */
560 reg_val = readl(db->membase + EMAC_CTL_REG);
561 writel(reg_val & ~EMAC_CTL_RX_EN,
562 db->membase + EMAC_CTL_REG);
563
564 /* Flush RX FIFO */
565 reg_val = readl(db->membase + EMAC_RX_CTL_REG);
566 writel(reg_val | (1 << 3),
567 db->membase + EMAC_RX_CTL_REG);
568
569 do {
570 reg_val = readl(db->membase + EMAC_RX_CTL_REG);
571 } while (reg_val & (1 << 3));
572
573 /* enable RX */
574 reg_val = readl(db->membase + EMAC_CTL_REG);
575 writel(reg_val | EMAC_CTL_RX_EN,
576 db->membase + EMAC_CTL_REG);
577 reg_val = readl(db->membase + EMAC_INT_CTL_REG);
578 reg_val |= (0xf << 0) | (0x01 << 8);
579 writel(reg_val, db->membase + EMAC_INT_CTL_REG);
580
581 db->emacrx_completed_flag = 1;
582
583 return;
584 }
585
586 /* A packet ready now & Get status/length */
587 good_packet = true;
588
589 emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
590 &rxhdr, sizeof(rxhdr));
591
592 if (netif_msg_rx_status(db))
593 dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr)));
594
595 rxlen = EMAC_RX_IO_DATA_LEN(rxhdr);
596 rxstatus = EMAC_RX_IO_DATA_STATUS(rxhdr);
597
598 if (netif_msg_rx_status(db))
599 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
600 rxstatus, rxlen);
601
602 /* Packet Status check */
603 if (rxlen < 0x40) {
604 good_packet = false;
605 if (netif_msg_rx_err(db))
606 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
607 }
608
609 if (unlikely(!(rxstatus & EMAC_RX_IO_DATA_STATUS_OK))) {
610 good_packet = false;
611
612 if (rxstatus & EMAC_RX_IO_DATA_STATUS_CRC_ERR) {
613 if (netif_msg_rx_err(db))
614 dev_dbg(db->dev, "crc error\n");
615 dev->stats.rx_crc_errors++;
616 }
617
618 if (rxstatus & EMAC_RX_IO_DATA_STATUS_LEN_ERR) {
619 if (netif_msg_rx_err(db))
620 dev_dbg(db->dev, "length error\n");
621 dev->stats.rx_length_errors++;
622 }
623 }
624
625 /* Move data from EMAC */
626 skb = dev_alloc_skb(rxlen + 4);
627 if (good_packet && skb) {
628 skb_reserve(skb, 2);
629 rdptr = (u8 *) skb_put(skb, rxlen - 4);
630
631 /* Read received packet from RX SRAM */
632 if (netif_msg_rx_status(db))
633 dev_dbg(db->dev, "RxLen %x\n", rxlen);
634
635 emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
636 rdptr, rxlen);
637 dev->stats.rx_bytes += rxlen;
638
639 /* Pass to upper layer */
640 skb->protocol = eth_type_trans(skb, dev);
641 netif_rx(skb);
642 dev->stats.rx_packets++;
643 }
644 }
645}
646
647static irqreturn_t emac_interrupt(int irq, void *dev_id)
648{
649 struct net_device *dev = dev_id;
650 struct emac_board_info *db = netdev_priv(dev);
651 int int_status;
652 unsigned long flags;
653 unsigned int reg_val;
654
655 /* A real interrupt coming */
656
657 /* holders of db->lock must always block IRQs */
658 spin_lock_irqsave(&db->lock, flags);
659
660 /* Disable all interrupts */
661 writel(0, db->membase + EMAC_INT_CTL_REG);
662
663 /* Got EMAC interrupt status */
664 /* Got ISR */
665 int_status = readl(db->membase + EMAC_INT_STA_REG);
666 /* Clear ISR status */
667 writel(int_status, db->membase + EMAC_INT_STA_REG);
668
669 if (netif_msg_intr(db))
670 dev_dbg(db->dev, "emac interrupt %02x\n", int_status);
671
672 /* Received the coming packet */
673 if ((int_status & 0x100) && (db->emacrx_completed_flag == 1)) {
674 /* carrier lost */
675 db->emacrx_completed_flag = 0;
676 emac_rx(dev);
677 }
678
679 /* Transmit Interrupt check */
680 if (int_status & (0x01 | 0x02))
681 emac_tx_done(dev, db, int_status);
682
683 if (int_status & (0x04 | 0x08))
684 netdev_info(dev, " ab : %x\n", int_status);
685
686 /* Re-enable interrupt mask */
687 if (db->emacrx_completed_flag == 1) {
688 reg_val = readl(db->membase + EMAC_INT_CTL_REG);
689 reg_val |= (0xf << 0) | (0x01 << 8);
690 writel(reg_val, db->membase + EMAC_INT_CTL_REG);
691 }
692 spin_unlock_irqrestore(&db->lock, flags);
693
694 return IRQ_HANDLED;
695}
696
697#ifdef CONFIG_NET_POLL_CONTROLLER
698/*
699 * Used by netconsole
700 */
701static void emac_poll_controller(struct net_device *dev)
702{
703 disable_irq(dev->irq);
704 emac_interrupt(dev->irq, dev);
705 enable_irq(dev->irq);
706}
707#endif
708
709/* Open the interface.
710 * The interface is opened whenever "ifconfig" actives it.
711 */
712static int emac_open(struct net_device *dev)
713{
714 struct emac_board_info *db = netdev_priv(dev);
715 int ret;
716
717 if (netif_msg_ifup(db))
718 dev_dbg(db->dev, "enabling %s\n", dev->name);
719
720 if (devm_request_irq(db->dev, dev->irq, &emac_interrupt,
721 0, dev->name, dev))
722 return -EAGAIN;
723
724 /* Initialize EMAC board */
725 emac_reset(db);
726 emac_init_device(dev);
727
728 ret = emac_mdio_probe(dev);
729 if (ret < 0) {
730 netdev_err(dev, "cannot probe MDIO bus\n");
731 return ret;
732 }
733
734 phy_start(db->phy_dev);
735 netif_start_queue(dev);
736
737 return 0;
738}
739
740static void emac_shutdown(struct net_device *dev)
741{
742 unsigned int reg_val;
743 struct emac_board_info *db = netdev_priv(dev);
744
745 /* Disable all interrupt */
746 writel(0, db->membase + EMAC_INT_CTL_REG);
747
748 /* clear interupt status */
749 reg_val = readl(db->membase + EMAC_INT_STA_REG);
750 writel(reg_val, db->membase + EMAC_INT_STA_REG);
751
752 /* Disable RX/TX */
753 reg_val = readl(db->membase + EMAC_CTL_REG);
754 reg_val &= ~(EMAC_CTL_TX_EN | EMAC_CTL_RX_EN | EMAC_CTL_RESET);
755 writel(reg_val, db->membase + EMAC_CTL_REG);
756}
757
758/* Stop the interface.
759 * The interface is stopped when it is brought.
760 */
761static int emac_stop(struct net_device *ndev)
762{
763 struct emac_board_info *db = netdev_priv(ndev);
764
765 if (netif_msg_ifdown(db))
766 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
767
768 netif_stop_queue(ndev);
769 netif_carrier_off(ndev);
770
771 phy_stop(db->phy_dev);
772
773 emac_mdio_remove(ndev);
774
775 emac_shutdown(ndev);
776
777 return 0;
778}
779
780static const struct net_device_ops emac_netdev_ops = {
781 .ndo_open = emac_open,
782 .ndo_stop = emac_stop,
783 .ndo_start_xmit = emac_start_xmit,
784 .ndo_tx_timeout = emac_timeout,
785 .ndo_do_ioctl = emac_ioctl,
786 .ndo_change_mtu = eth_change_mtu,
787 .ndo_validate_addr = eth_validate_addr,
788 .ndo_set_mac_address = emac_set_mac_address,
789#ifdef CONFIG_NET_POLL_CONTROLLER
790 .ndo_poll_controller = emac_poll_controller,
791#endif
792};
793
794/* Search EMAC board, allocate space and register it
795 */
796static int emac_probe(struct platform_device *pdev)
797{
798 struct device_node *np = pdev->dev.of_node;
799 struct emac_board_info *db;
800 struct net_device *ndev;
801 int ret = 0;
802 const char *mac_addr;
803
804 ndev = alloc_etherdev(sizeof(struct emac_board_info));
805 if (!ndev) {
806 dev_err(&pdev->dev, "could not allocate device.\n");
807 return -ENOMEM;
808 }
809
810 SET_NETDEV_DEV(ndev, &pdev->dev);
811
812 db = netdev_priv(ndev);
813 memset(db, 0, sizeof(*db));
814
815 db->dev = &pdev->dev;
816 db->ndev = ndev;
817 db->pdev = pdev;
818
819 spin_lock_init(&db->lock);
820
821 db->membase = of_iomap(np, 0);
822 if (!db->membase) {
823 dev_err(&pdev->dev, "failed to remap registers\n");
824 ret = -ENOMEM;
825 goto out;
826 }
827
828 /* fill in parameters for net-dev structure */
829 ndev->base_addr = (unsigned long)db->membase;
830 ndev->irq = irq_of_parse_and_map(np, 0);
831 if (ndev->irq == -ENXIO) {
832 netdev_err(ndev, "No irq resource\n");
833 ret = ndev->irq;
834 goto out;
835 }
836
837 db->clk = devm_clk_get(&pdev->dev, NULL);
838 if (IS_ERR(db->clk))
839 goto out;
840
841 clk_prepare_enable(db->clk);
842
843 db->phy_node = of_parse_phandle(np, "phy", 0);
844 if (!db->phy_node) {
845 dev_err(&pdev->dev, "no associated PHY\n");
846 ret = -ENODEV;
847 goto out;
848 }
849
850 /* Read MAC-address from DT */
851 mac_addr = of_get_mac_address(np);
852 if (mac_addr)
853 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
854
855 /* Check if the MAC address is valid, if not get a random one */
856 if (!is_valid_ether_addr(ndev->dev_addr)) {
857 eth_hw_addr_random(ndev);
858 dev_warn(&pdev->dev, "using random MAC address %pM\n",
859 ndev->dev_addr);
860 }
861
862 db->emacrx_completed_flag = 1;
863 emac_powerup(ndev);
864 emac_reset(db);
865
866 ether_setup(ndev);
867
868 ndev->netdev_ops = &emac_netdev_ops;
869 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
870 ndev->ethtool_ops = &emac_ethtool_ops;
871
872 platform_set_drvdata(pdev, ndev);
873
874 /* Carrier starts down, phylib will bring it up */
875 netif_carrier_off(ndev);
876
877 ret = register_netdev(ndev);
878 if (ret) {
879 dev_err(&pdev->dev, "Registering netdev failed!\n");
880 ret = -ENODEV;
881 goto out;
882 }
883
884 dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n",
885 ndev->name, db->membase, ndev->irq, ndev->dev_addr);
886
887 return 0;
888
889out:
890 dev_err(db->dev, "not found (%d).\n", ret);
891
892 free_netdev(ndev);
893
894 return ret;
895}
896
897static int emac_remove(struct platform_device *pdev)
898{
899 struct net_device *ndev = platform_get_drvdata(pdev);
900
901 unregister_netdev(ndev);
902 free_netdev(ndev);
903
904 dev_dbg(&pdev->dev, "released and freed device\n");
905 return 0;
906}
907
908static int emac_suspend(struct platform_device *dev, pm_message_t state)
909{
910 struct net_device *ndev = platform_get_drvdata(dev);
911
912 netif_carrier_off(ndev);
913 netif_device_detach(ndev);
914 emac_shutdown(ndev);
915
916 return 0;
917}
918
919static int emac_resume(struct platform_device *dev)
920{
921 struct net_device *ndev = platform_get_drvdata(dev);
922 struct emac_board_info *db = netdev_priv(ndev);
923
924 emac_reset(db);
925 emac_init_device(ndev);
926 netif_device_attach(ndev);
927
928 return 0;
929}
930
931static const struct of_device_id emac_of_match[] = {
932 {.compatible = "allwinner,sun4i-emac",},
933 {},
934};
935
936MODULE_DEVICE_TABLE(of, emac_of_match);
937
938static struct platform_driver emac_driver = {
939 .driver = {
940 .name = "sun4i-emac",
941 .of_match_table = emac_of_match,
942 },
943 .probe = emac_probe,
944 .remove = emac_remove,
945 .suspend = emac_suspend,
946 .resume = emac_resume,
947};
948
949module_platform_driver(emac_driver);
950
951MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
952MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
953MODULE_DESCRIPTION("Allwinner A10 emac network driver");
954MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h
new file mode 100644
index 000000000000..38c72d9ec600
--- /dev/null
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.h
@@ -0,0 +1,108 @@
1/*
2 * Allwinner EMAC Fast Ethernet driver for Linux.
3 *
4 * Copyright 2012 Stefan Roese <sr@denx.de>
5 * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * Based on the Linux driver provided by Allwinner:
8 * Copyright (C) 1997 Sten Wang
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#ifndef _SUN4I_EMAC_H_
16#define _SUN4I_EMAC_H_
17
18#define EMAC_CTL_REG (0x00)
19#define EMAC_CTL_RESET (1 << 0)
20#define EMAC_CTL_TX_EN (1 << 1)
21#define EMAC_CTL_RX_EN (1 << 2)
22#define EMAC_TX_MODE_REG (0x04)
23#define EMAC_TX_MODE_ABORTED_FRAME_EN (1 << 0)
24#define EMAC_TX_MODE_DMA_EN (1 << 1)
25#define EMAC_TX_FLOW_REG (0x08)
26#define EMAC_TX_CTL0_REG (0x0c)
27#define EMAC_TX_CTL1_REG (0x10)
28#define EMAC_TX_INS_REG (0x14)
29#define EMAC_TX_PL0_REG (0x18)
30#define EMAC_TX_PL1_REG (0x1c)
31#define EMAC_TX_STA_REG (0x20)
32#define EMAC_TX_IO_DATA_REG (0x24)
33#define EMAC_TX_IO_DATA1_REG (0x28)
34#define EMAC_TX_TSVL0_REG (0x2c)
35#define EMAC_TX_TSVH0_REG (0x30)
36#define EMAC_TX_TSVL1_REG (0x34)
37#define EMAC_TX_TSVH1_REG (0x38)
38#define EMAC_RX_CTL_REG (0x3c)
39#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1)
40#define EMAC_RX_CTL_DMA_EN (1 << 2)
41#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4)
42#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5)
43#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6)
44#define EMAC_RX_CTL_PASS_LEN_ERR_EN (1 << 7)
45#define EMAC_RX_CTL_PASS_LEN_OOR_EN (1 << 8)
46#define EMAC_RX_CTL_ACCEPT_UNICAST_EN (1 << 16)
47#define EMAC_RX_CTL_DA_FILTER_EN (1 << 17)
48#define EMAC_RX_CTL_ACCEPT_MULTICAST_EN (1 << 20)
49#define EMAC_RX_CTL_HASH_FILTER_EN (1 << 21)
50#define EMAC_RX_CTL_ACCEPT_BROADCAST_EN (1 << 22)
51#define EMAC_RX_CTL_SA_FILTER_EN (1 << 24)
52#define EMAC_RX_CTL_SA_FILTER_INVERT_EN (1 << 25)
53#define EMAC_RX_HASH0_REG (0x40)
54#define EMAC_RX_HASH1_REG (0x44)
55#define EMAC_RX_STA_REG (0x48)
56#define EMAC_RX_IO_DATA_REG (0x4c)
57#define EMAC_RX_IO_DATA_LEN(x) (x & 0xffff)
58#define EMAC_RX_IO_DATA_STATUS(x) ((x >> 16) & 0xffff)
59#define EMAC_RX_IO_DATA_STATUS_CRC_ERR (1 << 4)
60#define EMAC_RX_IO_DATA_STATUS_LEN_ERR (3 << 5)
61#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7)
62#define EMAC_RX_FBC_REG (0x50)
63#define EMAC_INT_CTL_REG (0x54)
64#define EMAC_INT_STA_REG (0x58)
65#define EMAC_MAC_CTL0_REG (0x5c)
66#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2)
67#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3)
68#define EMAC_MAC_CTL0_SOFT_RESET (1 << 15)
69#define EMAC_MAC_CTL1_REG (0x60)
70#define EMAC_MAC_CTL1_DUPLEX_EN (1 << 0)
71#define EMAC_MAC_CTL1_LEN_CHECK_EN (1 << 1)
72#define EMAC_MAC_CTL1_HUGE_FRAME_EN (1 << 2)
73#define EMAC_MAC_CTL1_DELAYED_CRC_EN (1 << 3)
74#define EMAC_MAC_CTL1_CRC_EN (1 << 4)
75#define EMAC_MAC_CTL1_PAD_EN (1 << 5)
76#define EMAC_MAC_CTL1_PAD_CRC_EN (1 << 6)
77#define EMAC_MAC_CTL1_AD_SHORT_FRAME_EN (1 << 7)
78#define EMAC_MAC_CTL1_BACKOFF_DIS (1 << 12)
79#define EMAC_MAC_IPGT_REG (0x64)
80#define EMAC_MAC_IPGT_HALF_DUPLEX (0x12)
81#define EMAC_MAC_IPGT_FULL_DUPLEX (0x15)
82#define EMAC_MAC_IPGR_REG (0x68)
83#define EMAC_MAC_IPGR_IPG1 (0x0c)
84#define EMAC_MAC_IPGR_IPG2 (0x12)
85#define EMAC_MAC_CLRT_REG (0x6c)
86#define EMAC_MAC_CLRT_COLLISION_WINDOW (0x37)
87#define EMAC_MAC_CLRT_RM (0x0f)
88#define EMAC_MAC_MAXF_REG (0x70)
89#define EMAC_MAC_SUPP_REG (0x74)
90#define EMAC_MAC_TEST_REG (0x78)
91#define EMAC_MAC_MCFG_REG (0x7c)
92#define EMAC_MAC_A0_REG (0x98)
93#define EMAC_MAC_A1_REG (0x9c)
94#define EMAC_MAC_A2_REG (0xa0)
95#define EMAC_SAFX_L_REG0 (0xa4)
96#define EMAC_SAFX_H_REG0 (0xa8)
97#define EMAC_SAFX_L_REG1 (0xac)
98#define EMAC_SAFX_H_REG1 (0xb0)
99#define EMAC_SAFX_L_REG2 (0xb4)
100#define EMAC_SAFX_H_REG2 (0xb8)
101#define EMAC_SAFX_L_REG3 (0xbc)
102#define EMAC_SAFX_H_REG3 (0xc0)
103
104#define EMAC_PHY_DUPLEX (1 << 8)
105
106#define EMAC_EEPROM_MAGIC (0x444d394b)
107#define EMAC_UNDOCUMENTED_MAGIC (0x0143414d)
108#endif /* _SUN4I_EMAC_H_ */
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b7894f8af9d1..219be1bf3cfc 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -702,19 +702,6 @@ static struct pci_driver acenic_pci_driver = {
702 .remove = acenic_remove_one, 702 .remove = acenic_remove_one,
703}; 703};
704 704
705static int __init acenic_init(void)
706{
707 return pci_register_driver(&acenic_pci_driver);
708}
709
710static void __exit acenic_exit(void)
711{
712 pci_unregister_driver(&acenic_pci_driver);
713}
714
715module_init(acenic_init);
716module_exit(acenic_exit);
717
718static void ace_free_descriptors(struct net_device *dev) 705static void ace_free_descriptors(struct net_device *dev)
719{ 706{
720 struct ace_private *ap = netdev_priv(dev); 707 struct ace_private *ap = netdev_priv(dev);
@@ -3199,3 +3186,5 @@ static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
3199 ap->name, offset); 3186 ap->name, offset);
3200 goto out; 3187 goto out;
3201} 3188}
3189
3190module_pci_driver(acenic_pci_driver);
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 13d74aa4033d..562df46e0a82 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -34,7 +34,6 @@ config AMD8111_ETH
34 tristate "AMD 8111 (new PCI LANCE) support" 34 tristate "AMD 8111 (new PCI LANCE) support"
35 depends on PCI 35 depends on PCI
36 select CRC32 36 select CRC32
37 select NET_CORE
38 select MII 37 select MII
39 ---help--- 38 ---help---
40 If you have an AMD 8111-based PCI LANCE ethernet card, 39 If you have an AMD 8111-based PCI LANCE ethernet card,
@@ -60,7 +59,6 @@ config PCNET32
60 tristate "AMD PCnet32 PCI support" 59 tristate "AMD PCnet32 PCI support"
61 depends on PCI 60 depends on PCI
62 select CRC32 61 select CRC32
63 select NET_CORE
64 select MII 62 select MII
65 ---help--- 63 ---help---
66 If you have a PCnet32 or PCnetPCI based network (Ethernet) card, 64 If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 8e6b665a6726..1b1429d5d5c2 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1813,7 +1813,7 @@ static const struct net_device_ops amd8111e_netdev_ops = {
1813static int amd8111e_probe_one(struct pci_dev *pdev, 1813static int amd8111e_probe_one(struct pci_dev *pdev,
1814 const struct pci_device_id *ent) 1814 const struct pci_device_id *ent)
1815{ 1815{
1816 int err,i,pm_cap; 1816 int err, i;
1817 unsigned long reg_addr,reg_len; 1817 unsigned long reg_addr,reg_len;
1818 struct amd8111e_priv* lp; 1818 struct amd8111e_priv* lp;
1819 struct net_device* dev; 1819 struct net_device* dev;
@@ -1842,7 +1842,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
1842 pci_set_master(pdev); 1842 pci_set_master(pdev);
1843 1843
1844 /* Find power-management capability. */ 1844 /* Find power-management capability. */
1845 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ 1845 if (!pdev->pm_cap) {
1846 printk(KERN_ERR "amd8111e: No Power Management capability, " 1846 printk(KERN_ERR "amd8111e: No Power Management capability, "
1847 "exiting.\n"); 1847 "exiting.\n");
1848 err = -ENODEV; 1848 err = -ENODEV;
@@ -1875,7 +1875,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
1875 lp = netdev_priv(dev); 1875 lp = netdev_priv(dev);
1876 lp->pci_dev = pdev; 1876 lp->pci_dev = pdev;
1877 lp->amd8111e_net_dev = dev; 1877 lp->amd8111e_net_dev = dev;
1878 lp->pm_cap = pm_cap; 1878 lp->pm_cap = pdev->pm_cap;
1879 1879
1880 spin_lock_init(&lp->lock); 1880 spin_lock_init(&lp->lock);
1881 1881
@@ -1981,15 +1981,4 @@ static struct pci_driver amd8111e_driver = {
1981 .resume = amd8111e_resume 1981 .resume = amd8111e_resume
1982}; 1982};
1983 1983
1984static int __init amd8111e_init(void) 1984module_pci_driver(amd8111e_driver);
1985{
1986 return pci_register_driver(&amd8111e_driver);
1987}
1988
1989static void __exit amd8111e_cleanup(void)
1990{
1991 pci_unregister_driver(&amd8111e_driver);
1992}
1993
1994module_init(amd8111e_init);
1995module_exit(amd8111e_cleanup);
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 688aede742c7..ceb45bc963a9 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1301,8 +1301,6 @@ static int au1000_remove(struct platform_device *pdev)
1301 int i; 1301 int i;
1302 struct resource *base, *macen; 1302 struct resource *base, *macen;
1303 1303
1304 platform_set_drvdata(pdev, NULL);
1305
1306 unregister_netdev(dev); 1304 unregister_netdev(dev);
1307 mdiobus_unregister(aup->mii_bus); 1305 mdiobus_unregister(aup->mii_bus);
1308 mdiobus_free(aup->mii_bus); 1306 mdiobus_free(aup->mii_bus);
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index f47b780892e9..ece56831a647 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1470,7 +1470,7 @@ no_link_test:
1470 goto fail; 1470 goto fail;
1471 } 1471 }
1472 1472
1473 dev_set_drvdata(&op->dev, lp); 1473 platform_set_drvdata(op, lp);
1474 1474
1475 printk(KERN_INFO "%s: LANCE %pM\n", 1475 printk(KERN_INFO "%s: LANCE %pM\n",
1476 dev->name, dev->dev_addr); 1476 dev->name, dev->dev_addr);
@@ -1501,7 +1501,7 @@ static int sunlance_sbus_probe(struct platform_device *op)
1501 1501
1502static int sunlance_sbus_remove(struct platform_device *op) 1502static int sunlance_sbus_remove(struct platform_device *op)
1503{ 1503{
1504 struct lance_private *lp = dev_get_drvdata(&op->dev); 1504 struct lance_private *lp = platform_get_drvdata(op);
1505 struct net_device *net_dev = lp->dev; 1505 struct net_device *net_dev = lp->dev;
1506 1506
1507 unregister_netdev(net_dev); 1507 unregister_netdev(net_dev);
@@ -1510,8 +1510,6 @@ static int sunlance_sbus_remove(struct platform_device *op)
1510 1510
1511 free_netdev(net_dev); 1511 free_netdev(net_dev);
1512 1512
1513 dev_set_drvdata(&op->dev, NULL);
1514
1515 return 0; 1513 return 0;
1516} 1514}
1517 1515
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index f36bbd6d5085..a597b766f080 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1016,7 +1016,6 @@ static void bmac_set_multicast(struct net_device *dev)
1016static void bmac_set_multicast(struct net_device *dev) 1016static void bmac_set_multicast(struct net_device *dev)
1017{ 1017{
1018 struct netdev_hw_addr *ha; 1018 struct netdev_hw_addr *ha;
1019 int i;
1020 unsigned short rx_cfg; 1019 unsigned short rx_cfg;
1021 u32 crc; 1020 u32 crc;
1022 1021
@@ -1030,14 +1029,12 @@ static void bmac_set_multicast(struct net_device *dev)
1030 rx_cfg |= RxPromiscEnable; 1029 rx_cfg |= RxPromiscEnable;
1031 bmwrite(dev, RXCFG, rx_cfg); 1030 bmwrite(dev, RXCFG, rx_cfg);
1032 } else { 1031 } else {
1033 u16 hash_table[4]; 1032 u16 hash_table[4] = { 0 };
1034 1033
1035 rx_cfg = bmread(dev, RXCFG); 1034 rx_cfg = bmread(dev, RXCFG);
1036 rx_cfg &= ~RxPromiscEnable; 1035 rx_cfg &= ~RxPromiscEnable;
1037 bmwrite(dev, RXCFG, rx_cfg); 1036 bmwrite(dev, RXCFG, rx_cfg);
1038 1037
1039 for(i = 0; i < 4; i++) hash_table[i] = 0;
1040
1041 netdev_for_each_mc_addr(ha, dev) { 1038 netdev_for_each_mc_addr(ha, dev) {
1042 crc = ether_crc_le(6, ha->addr); 1039 crc = ether_crc_le(6, ha->addr);
1043 crc >>= 26; 1040 crc >>= 26;
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
new file mode 100644
index 000000000000..514c57fd26f1
--- /dev/null
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -0,0 +1,31 @@
1#
2# ARC EMAC network device configuration
3#
4
5config NET_VENDOR_ARC
6 bool "ARC devices"
7 default y
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about ARC cards. If you say Y, you will be asked for
16 your specific card in the following questions.
17
18if NET_VENDOR_ARC
19
20config ARC_EMAC
21 tristate "ARC EMAC support"
22 select MII
23 select PHYLIB
24 depends on OF_IRQ
25 depends on OF_NET
26 ---help---
27 On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
28 non-standard on-chip ethernet device ARC EMAC 10/100 is used.
29 Say Y here if you have such a board. If unsure, say N.
30
31endif # NET_VENDOR_ARC
diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile
new file mode 100644
index 000000000000..00c8657637d5
--- /dev/null
+++ b/drivers/net/ethernet/arc/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the ARC network device drivers.
3#
4
5arc_emac-objs := emac_main.o emac_mdio.o
6obj-$(CONFIG_ARC_EMAC) += arc_emac.o
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
new file mode 100644
index 000000000000..dc08678bf9a4
--- /dev/null
+++ b/drivers/net/ethernet/arc/emac.h
@@ -0,0 +1,214 @@
1/*
2 * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
3 *
4 * Registers and bits definitions of ARC EMAC
5 */
6
7#ifndef ARC_EMAC_H
8#define ARC_EMAC_H
9
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/netdevice.h>
13#include <linux/phy.h>
14
15/* STATUS and ENABLE Register bit masks */
16#define TXINT_MASK (1<<0) /* Transmit interrupt */
17#define RXINT_MASK (1<<1) /* Receive interrupt */
18#define ERR_MASK (1<<2) /* Error interrupt */
19#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */
20#define MSER_MASK (1<<4) /* Missed packet counter error */
21#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */
22#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */
23#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */
24#define MDIO_MASK (1<<12) /* MDIO complete interrupt */
25#define TXPL_MASK (1<<31) /* Force polling of BD by EMAC */
26
27/* CONTROL Register bit masks */
28#define EN_MASK (1<<0) /* VMAC enable */
29#define TXRN_MASK (1<<3) /* TX enable */
30#define RXRN_MASK (1<<4) /* RX enable */
31#define DSBC_MASK (1<<8) /* Disable receive broadcast */
32#define ENFL_MASK (1<<10) /* Enable Full-duplex */
33#define PROM_MASK (1<<11) /* Promiscuous mode */
34
35/* Buffer descriptor INFO bit masks */
36#define OWN_MASK (1<<31) /* 0-CPU owns buffer, 1-EMAC owns buffer */
37#define FIRST_MASK (1<<16) /* First buffer in chain */
38#define LAST_MASK (1<<17) /* Last buffer in chain */
39#define LEN_MASK 0x000007FF /* last 11 bits */
40#define CRLS (1<<21)
41#define DEFR (1<<22)
42#define DROP (1<<23)
43#define RTRY (1<<24)
44#define LTCL (1<<28)
45#define UFLO (1<<29)
46
47#define FOR_EMAC OWN_MASK
48#define FOR_CPU 0
49
50/* ARC EMAC register set combines entries for MAC and MDIO */
51enum {
52 R_ID = 0,
53 R_STATUS,
54 R_ENABLE,
55 R_CTRL,
56 R_POLLRATE,
57 R_RXERR,
58 R_MISS,
59 R_TX_RING,
60 R_RX_RING,
61 R_ADDRL,
62 R_ADDRH,
63 R_LAFL,
64 R_LAFH,
65 R_MDIO,
66};
67
68#define TX_TIMEOUT (400*HZ/1000) /* Transmission timeout */
69
70#define ARC_EMAC_NAPI_WEIGHT 40 /* Workload for NAPI */
71
72#define EMAC_BUFFER_SIZE 1536 /* EMAC buffer size */
73
74/**
75 * struct arc_emac_bd - EMAC buffer descriptor (BD).
76 *
77 * @info: Contains status information on the buffer itself.
78 * @data: 32-bit byte addressable pointer to the packet data.
79 */
80struct arc_emac_bd {
81 __le32 info;
82 dma_addr_t data;
83};
84
85/* Number of Rx/Tx BD's */
86#define RX_BD_NUM 128
87#define TX_BD_NUM 128
88
89#define RX_RING_SZ (RX_BD_NUM * sizeof(struct arc_emac_bd))
90#define TX_RING_SZ (TX_BD_NUM * sizeof(struct arc_emac_bd))
91
92/**
93 * struct buffer_state - Stores Rx/Tx buffer state.
94 * @sk_buff: Pointer to socket buffer.
95 * @addr: Start address of DMA-mapped memory region.
96 * @len: Length of DMA-mapped memory region.
97 */
98struct buffer_state {
99 struct sk_buff *skb;
100 DEFINE_DMA_UNMAP_ADDR(addr);
101 DEFINE_DMA_UNMAP_LEN(len);
102};
103
104/**
105 * struct arc_emac_priv - Storage of EMAC's private information.
106 * @dev: Pointer to the current device.
107 * @ndev: Pointer to the current network device.
108 * @phy_dev: Pointer to attached PHY device.
109 * @bus: Pointer to the current MII bus.
110 * @regs: Base address of EMAC memory-mapped control registers.
111 * @napi: Structure for NAPI.
112 * @stats: Network device statistics.
113 * @rxbd: Pointer to Rx BD ring.
114 * @txbd: Pointer to Tx BD ring.
115 * @rxbd_dma: DMA handle for Rx BD ring.
116 * @txbd_dma: DMA handle for Tx BD ring.
117 * @rx_buff: Storage for Rx buffers states.
118 * @tx_buff: Storage for Tx buffers states.
119 * @txbd_curr: Index of Tx BD to use on the next "ndo_start_xmit".
120 * @txbd_dirty: Index of Tx BD to free on the next Tx interrupt.
121 * @last_rx_bd: Index of the last Rx BD we've got from EMAC.
122 * @link: PHY's last seen link state.
123 * @duplex: PHY's last set duplex mode.
124 * @speed: PHY's last set speed.
125 * @max_speed: Maximum supported by current system network data-rate.
126 */
127struct arc_emac_priv {
128 /* Devices */
129 struct device *dev;
130 struct net_device *ndev;
131 struct phy_device *phy_dev;
132 struct mii_bus *bus;
133
134 void __iomem *regs;
135
136 struct napi_struct napi;
137 struct net_device_stats stats;
138
139 struct arc_emac_bd *rxbd;
140 struct arc_emac_bd *txbd;
141
142 dma_addr_t rxbd_dma;
143 dma_addr_t txbd_dma;
144
145 struct buffer_state rx_buff[RX_BD_NUM];
146 struct buffer_state tx_buff[TX_BD_NUM];
147 unsigned int txbd_curr;
148 unsigned int txbd_dirty;
149
150 unsigned int last_rx_bd;
151
152 unsigned int link;
153 unsigned int duplex;
154 unsigned int speed;
155 unsigned int max_speed;
156};
157
158/**
159 * arc_reg_set - Sets EMAC register with provided value.
160 * @priv: Pointer to ARC EMAC private data structure.
161 * @reg: Register offset from base address.
162 * @value: Value to set in register.
163 */
164static inline void arc_reg_set(struct arc_emac_priv *priv, int reg, int value)
165{
166 iowrite32(value, priv->regs + reg * sizeof(int));
167}
168
169/**
170 * arc_reg_get - Gets value of specified EMAC register.
171 * @priv: Pointer to ARC EMAC private data structure.
172 * @reg: Register offset from base address.
173 *
174 * returns: Value of requested register.
175 */
176static inline unsigned int arc_reg_get(struct arc_emac_priv *priv, int reg)
177{
178 return ioread32(priv->regs + reg * sizeof(int));
179}
180
181/**
182 * arc_reg_or - Applies mask to specified EMAC register - ("reg" | "mask").
183 * @priv: Pointer to ARC EMAC private data structure.
184 * @reg: Register offset from base address.
185 * @mask: Mask to apply to specified register.
186 *
187 * This function reads initial register value, then applies provided mask
188 * to it and then writes register back.
189 */
190static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask)
191{
192 unsigned int value = arc_reg_get(priv, reg);
193 arc_reg_set(priv, reg, value | mask);
194}
195
196/**
197 * arc_reg_clr - Applies mask to specified EMAC register - ("reg" & ~"mask").
198 * @priv: Pointer to ARC EMAC private data structure.
199 * @reg: Register offset from base address.
200 * @mask: Mask to apply to specified register.
201 *
202 * This function reads initial register value, then applies provided mask
203 * to it and then writes register back.
204 */
205static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask)
206{
207 unsigned int value = arc_reg_get(priv, reg);
208 arc_reg_set(priv, reg, value & ~mask);
209}
210
211int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv);
212int arc_mdio_remove(struct arc_emac_priv *priv);
213
214#endif /* ARC_EMAC_H */
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
new file mode 100644
index 000000000000..f1b121ee5525
--- /dev/null
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -0,0 +1,819 @@
1/*
2 * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Driver for the ARC EMAC 10100 (hardware revision 5)
9 *
10 * Contributors:
11 * Amit Bhor
12 * Sameer Dhavale
13 * Vineet Gupta
14 */
15
16#include <linux/etherdevice.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/of_address.h>
21#include <linux/of_irq.h>
22#include <linux/of_mdio.h>
23#include <linux/of_net.h>
24#include <linux/of_platform.h>
25
26#include "emac.h"
27
28#define DRV_NAME "arc_emac"
29#define DRV_VERSION "1.0"
30
31/**
32 * arc_emac_adjust_link - Adjust the PHY link duplex.
33 * @ndev: Pointer to the net_device structure.
34 *
35 * This function is called to change the duplex setting after auto negotiation
36 * is done by the PHY.
37 */
38static void arc_emac_adjust_link(struct net_device *ndev)
39{
40 struct arc_emac_priv *priv = netdev_priv(ndev);
41 struct phy_device *phy_dev = priv->phy_dev;
42 unsigned int reg, state_changed = 0;
43
44 if (priv->link != phy_dev->link) {
45 priv->link = phy_dev->link;
46 state_changed = 1;
47 }
48
49 if (priv->speed != phy_dev->speed) {
50 priv->speed = phy_dev->speed;
51 state_changed = 1;
52 }
53
54 if (priv->duplex != phy_dev->duplex) {
55 reg = arc_reg_get(priv, R_CTRL);
56
57 if (DUPLEX_FULL == phy_dev->duplex)
58 reg |= ENFL_MASK;
59 else
60 reg &= ~ENFL_MASK;
61
62 arc_reg_set(priv, R_CTRL, reg);
63 priv->duplex = phy_dev->duplex;
64 state_changed = 1;
65 }
66
67 if (state_changed)
68 phy_print_status(phy_dev);
69}
70
71/**
72 * arc_emac_get_settings - Get PHY settings.
73 * @ndev: Pointer to net_device structure.
74 * @cmd: Pointer to ethtool_cmd structure.
75 *
76 * This implements ethtool command for getting PHY settings. If PHY could
77 * not be found, the function returns -ENODEV. This function calls the
78 * relevant PHY ethtool API to get the PHY settings.
79 * Issue "ethtool ethX" under linux prompt to execute this function.
80 */
81static int arc_emac_get_settings(struct net_device *ndev,
82 struct ethtool_cmd *cmd)
83{
84 struct arc_emac_priv *priv = netdev_priv(ndev);
85
86 return phy_ethtool_gset(priv->phy_dev, cmd);
87}
88
89/**
90 * arc_emac_set_settings - Set PHY settings as passed in the argument.
91 * @ndev: Pointer to net_device structure.
92 * @cmd: Pointer to ethtool_cmd structure.
93 *
94 * This implements ethtool command for setting various PHY settings. If PHY
95 * could not be found, the function returns -ENODEV. This function calls the
96 * relevant PHY ethtool API to set the PHY.
97 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
98 * function.
99 */
100static int arc_emac_set_settings(struct net_device *ndev,
101 struct ethtool_cmd *cmd)
102{
103 struct arc_emac_priv *priv = netdev_priv(ndev);
104
105 if (!capable(CAP_NET_ADMIN))
106 return -EPERM;
107
108 return phy_ethtool_sset(priv->phy_dev, cmd);
109}
110
111/**
112 * arc_emac_get_drvinfo - Get EMAC driver information.
113 * @ndev: Pointer to net_device structure.
114 * @info: Pointer to ethtool_drvinfo structure.
115 *
116 * This implements ethtool command for getting the driver information.
117 * Issue "ethtool -i ethX" under linux prompt to execute this function.
118 */
119static void arc_emac_get_drvinfo(struct net_device *ndev,
120 struct ethtool_drvinfo *info)
121{
122 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
123 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
124}
125
126static const struct ethtool_ops arc_emac_ethtool_ops = {
127 .get_settings = arc_emac_get_settings,
128 .set_settings = arc_emac_set_settings,
129 .get_drvinfo = arc_emac_get_drvinfo,
130 .get_link = ethtool_op_get_link,
131};
132
133#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK)
134
135/**
136 * arc_emac_tx_clean - clears processed by EMAC Tx BDs.
137 * @ndev: Pointer to the network device.
138 */
139static void arc_emac_tx_clean(struct net_device *ndev)
140{
141 struct arc_emac_priv *priv = netdev_priv(ndev);
142 struct net_device_stats *stats = &priv->stats;
143 unsigned int i;
144
145 for (i = 0; i < TX_BD_NUM; i++) {
146 unsigned int *txbd_dirty = &priv->txbd_dirty;
147 struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty];
148 struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty];
149 struct sk_buff *skb = tx_buff->skb;
150 unsigned int info = le32_to_cpu(txbd->info);
151
152 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
153
154 if ((info & FOR_EMAC) || !txbd->data)
155 break;
156
157 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
158 stats->tx_errors++;
159 stats->tx_dropped++;
160
161 if (info & DEFR)
162 stats->tx_carrier_errors++;
163
164 if (info & LTCL)
165 stats->collisions++;
166
167 if (info & UFLO)
168 stats->tx_fifo_errors++;
169 } else if (likely(info & FIRST_OR_LAST_MASK)) {
170 stats->tx_packets++;
171 stats->tx_bytes += skb->len;
172 }
173
174 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
175 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
176
177 /* return the sk_buff to system */
178 dev_kfree_skb_irq(skb);
179
180 txbd->data = 0;
181 txbd->info = 0;
182
183 if (netif_queue_stopped(ndev))
184 netif_wake_queue(ndev);
185 }
186}
187
188/**
189 * arc_emac_rx - processing of Rx packets.
190 * @ndev: Pointer to the network device.
191 * @budget: How many BDs to process on 1 call.
192 *
193 * returns: Number of processed BDs
194 *
195 * Iterate through Rx BDs and deliver received packages to upper layer.
196 */
197static int arc_emac_rx(struct net_device *ndev, int budget)
198{
199 struct arc_emac_priv *priv = netdev_priv(ndev);
200 unsigned int work_done;
201
202 for (work_done = 0; work_done <= budget; work_done++) {
203 unsigned int *last_rx_bd = &priv->last_rx_bd;
204 struct net_device_stats *stats = &priv->stats;
205 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
206 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
207 unsigned int pktlen, info = le32_to_cpu(rxbd->info);
208 struct sk_buff *skb;
209 dma_addr_t addr;
210
211 if (unlikely((info & OWN_MASK) == FOR_EMAC))
212 break;
213
214 /* Make a note that we saw a packet at this BD.
215 * So next time, driver starts from this + 1
216 */
217 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
218
219 if (unlikely((info & FIRST_OR_LAST_MASK) !=
220 FIRST_OR_LAST_MASK)) {
221 /* We pre-allocate buffers of MTU size so incoming
222 * packets won't be split/chained.
223 */
224 if (net_ratelimit())
225 netdev_err(ndev, "incomplete packet received\n");
226
227 /* Return ownership to EMAC */
228 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
229 stats->rx_errors++;
230 stats->rx_length_errors++;
231 continue;
232 }
233
234 pktlen = info & LEN_MASK;
235 stats->rx_packets++;
236 stats->rx_bytes += pktlen;
237 skb = rx_buff->skb;
238 skb_put(skb, pktlen);
239 skb->dev = ndev;
240 skb->protocol = eth_type_trans(skb, ndev);
241
242 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
243 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
244
245 /* Prepare the BD for next cycle */
246 rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
247 EMAC_BUFFER_SIZE);
248 if (unlikely(!rx_buff->skb)) {
249 stats->rx_errors++;
250 /* Because receive_skb is below, increment rx_dropped */
251 stats->rx_dropped++;
252 continue;
253 }
254
255 /* receive_skb only if new skb was allocated to avoid holes */
256 netif_receive_skb(skb);
257
258 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
259 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
260 if (dma_mapping_error(&ndev->dev, addr)) {
261 if (net_ratelimit())
262 netdev_err(ndev, "cannot dma map\n");
263 dev_kfree_skb(rx_buff->skb);
264 stats->rx_errors++;
265 continue;
266 }
267 dma_unmap_addr_set(rx_buff, addr, addr);
268 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
269
270 rxbd->data = cpu_to_le32(addr);
271
272 /* Make sure pointer to data buffer is set */
273 wmb();
274
275 /* Return ownership to EMAC */
276 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
277 }
278
279 return work_done;
280}
281
282/**
283 * arc_emac_poll - NAPI poll handler.
284 * @napi: Pointer to napi_struct structure.
285 * @budget: How many BDs to process on 1 call.
286 *
287 * returns: Number of processed BDs
288 */
289static int arc_emac_poll(struct napi_struct *napi, int budget)
290{
291 struct net_device *ndev = napi->dev;
292 struct arc_emac_priv *priv = netdev_priv(ndev);
293 unsigned int work_done;
294
295 arc_emac_tx_clean(ndev);
296
297 work_done = arc_emac_rx(ndev, budget);
298 if (work_done < budget) {
299 napi_complete(napi);
300 arc_reg_or(priv, R_ENABLE, RXINT_MASK);
301 }
302
303 return work_done;
304}
305
306/**
307 * arc_emac_intr - Global interrupt handler for EMAC.
308 * @irq: irq number.
309 * @dev_instance: device instance.
310 *
311 * returns: IRQ_HANDLED for all cases.
312 *
313 * ARC EMAC has only 1 interrupt line, and depending on bits raised in
314 * STATUS register we may tell what is a reason for interrupt to fire.
315 */
316static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
317{
318 struct net_device *ndev = dev_instance;
319 struct arc_emac_priv *priv = netdev_priv(ndev);
320 struct net_device_stats *stats = &priv->stats;
321 unsigned int status;
322
323 status = arc_reg_get(priv, R_STATUS);
324 status &= ~MDIO_MASK;
325
326 /* Reset all flags except "MDIO complete" */
327 arc_reg_set(priv, R_STATUS, status);
328
329 if (status & RXINT_MASK) {
330 if (likely(napi_schedule_prep(&priv->napi))) {
331 arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
332 __napi_schedule(&priv->napi);
333 }
334 }
335
336 if (status & ERR_MASK) {
337 /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding
338 * 8-bit error counter overrun.
339 */
340
341 if (status & MSER_MASK) {
342 stats->rx_missed_errors += 0x100;
343 stats->rx_errors += 0x100;
344 }
345
346 if (status & RXCR_MASK) {
347 stats->rx_crc_errors += 0x100;
348 stats->rx_errors += 0x100;
349 }
350
351 if (status & RXFR_MASK) {
352 stats->rx_frame_errors += 0x100;
353 stats->rx_errors += 0x100;
354 }
355
356 if (status & RXFL_MASK) {
357 stats->rx_over_errors += 0x100;
358 stats->rx_errors += 0x100;
359 }
360 }
361
362 return IRQ_HANDLED;
363}
364
365/**
366 * arc_emac_open - Open the network device.
367 * @ndev: Pointer to the network device.
368 *
369 * returns: 0, on success or non-zero error value on failure.
370 *
371 * This function sets the MAC address, requests and enables an IRQ
372 * for the EMAC device and starts the Tx queue.
373 * It also connects to the phy device.
374 */
375static int arc_emac_open(struct net_device *ndev)
376{
377 struct arc_emac_priv *priv = netdev_priv(ndev);
378 struct phy_device *phy_dev = priv->phy_dev;
379 int i;
380
381 phy_dev->autoneg = AUTONEG_ENABLE;
382 phy_dev->speed = 0;
383 phy_dev->duplex = 0;
384 phy_dev->advertising = phy_dev->supported;
385
386 if (priv->max_speed > 100) {
387 phy_dev->advertising &= PHY_GBIT_FEATURES;
388 } else if (priv->max_speed <= 100) {
389 phy_dev->advertising &= PHY_BASIC_FEATURES;
390 if (priv->max_speed <= 10) {
391 phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
392 phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
393 }
394 }
395
396 priv->last_rx_bd = 0;
397
398 /* Allocate and set buffers for Rx BD's */
399 for (i = 0; i < RX_BD_NUM; i++) {
400 dma_addr_t addr;
401 unsigned int *last_rx_bd = &priv->last_rx_bd;
402 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
403 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
404
405 rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
406 EMAC_BUFFER_SIZE);
407 if (unlikely(!rx_buff->skb))
408 return -ENOMEM;
409
410 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
411 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
412 if (dma_mapping_error(&ndev->dev, addr)) {
413 netdev_err(ndev, "cannot dma map\n");
414 dev_kfree_skb(rx_buff->skb);
415 return -ENOMEM;
416 }
417 dma_unmap_addr_set(rx_buff, addr, addr);
418 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
419
420 rxbd->data = cpu_to_le32(addr);
421
422 /* Make sure pointer to data buffer is set */
423 wmb();
424
425 /* Return ownership to EMAC */
426 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
427
428 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
429 }
430
431 /* Clean Tx BD's */
432 memset(priv->txbd, 0, TX_RING_SZ);
433
434 /* Initialize logical address filter */
435 arc_reg_set(priv, R_LAFL, 0);
436 arc_reg_set(priv, R_LAFH, 0);
437
438 /* Set BD ring pointers for device side */
439 arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
440 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
441
442 /* Enable interrupts */
443 arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
444
445 /* Set CONTROL */
446 arc_reg_set(priv, R_CTRL,
447 (RX_BD_NUM << 24) | /* RX BD table length */
448 (TX_BD_NUM << 16) | /* TX BD table length */
449 TXRN_MASK | RXRN_MASK);
450
451 napi_enable(&priv->napi);
452
453 /* Enable EMAC */
454 arc_reg_or(priv, R_CTRL, EN_MASK);
455
456 phy_start_aneg(priv->phy_dev);
457
458 netif_start_queue(ndev);
459
460 return 0;
461}
462
463/**
464 * arc_emac_stop - Close the network device.
465 * @ndev: Pointer to the network device.
466 *
467 * This function stops the Tx queue, disables interrupts and frees the IRQ for
468 * the EMAC device.
469 * It also disconnects the PHY device associated with the EMAC device.
470 */
471static int arc_emac_stop(struct net_device *ndev)
472{
473 struct arc_emac_priv *priv = netdev_priv(ndev);
474
475 napi_disable(&priv->napi);
476 netif_stop_queue(ndev);
477
478 /* Disable interrupts */
479 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
480
481 /* Disable EMAC */
482 arc_reg_clr(priv, R_CTRL, EN_MASK);
483
484 return 0;
485}
486
487/**
488 * arc_emac_stats - Get system network statistics.
489 * @ndev: Pointer to net_device structure.
490 *
491 * Returns the address of the device statistics structure.
492 * Statistics are updated in interrupt handler.
493 */
494static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
495{
496 struct arc_emac_priv *priv = netdev_priv(ndev);
497 struct net_device_stats *stats = &priv->stats;
498 unsigned long miss, rxerr;
499 u8 rxcrc, rxfram, rxoflow;
500
501 rxerr = arc_reg_get(priv, R_RXERR);
502 miss = arc_reg_get(priv, R_MISS);
503
504 rxcrc = rxerr;
505 rxfram = rxerr >> 8;
506 rxoflow = rxerr >> 16;
507
508 stats->rx_errors += miss;
509 stats->rx_errors += rxcrc + rxfram + rxoflow;
510
511 stats->rx_over_errors += rxoflow;
512 stats->rx_frame_errors += rxfram;
513 stats->rx_crc_errors += rxcrc;
514 stats->rx_missed_errors += miss;
515
516 return stats;
517}
518
519/**
520 * arc_emac_tx - Starts the data transmission.
521 * @skb: sk_buff pointer that contains data to be Transmitted.
522 * @ndev: Pointer to net_device structure.
523 *
524 * returns: NETDEV_TX_OK, on success
525 * NETDEV_TX_BUSY, if any of the descriptors are not free.
526 *
527 * This function is invoked from upper layers to initiate transmission.
528 */
529static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
530{
531 struct arc_emac_priv *priv = netdev_priv(ndev);
532 unsigned int len, *txbd_curr = &priv->txbd_curr;
533 struct net_device_stats *stats = &priv->stats;
534 __le32 *info = &priv->txbd[*txbd_curr].info;
535 dma_addr_t addr;
536
537 if (skb_padto(skb, ETH_ZLEN))
538 return NETDEV_TX_OK;
539
540 len = max_t(unsigned int, ETH_ZLEN, skb->len);
541
542 /* EMAC still holds this buffer in its possession.
543 * CPU must not modify this buffer descriptor
544 */
545 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
546 netif_stop_queue(ndev);
547 return NETDEV_TX_BUSY;
548 }
549
550 addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
551 DMA_TO_DEVICE);
552
553 if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
554 stats->tx_dropped++;
555 stats->tx_errors++;
556 dev_kfree_skb(skb);
557 return NETDEV_TX_OK;
558 }
559 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
560 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
561
562 priv->tx_buff[*txbd_curr].skb = skb;
563 priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
564
565 /* Make sure pointer to data buffer is set */
566 wmb();
567
568 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
569
570 /* Increment index to point to the next BD */
571 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
572
573 /* Get "info" of the next BD */
574 info = &priv->txbd[*txbd_curr].info;
575
576 /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
577 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
578 netif_stop_queue(ndev);
579
580 arc_reg_set(priv, R_STATUS, TXPL_MASK);
581
582 skb_tx_timestamp(skb);
583
584 return NETDEV_TX_OK;
585}
586
587/**
588 * arc_emac_set_address - Set the MAC address for this device.
589 * @ndev: Pointer to net_device structure.
590 * @p: 6 byte Address to be written as MAC address.
591 *
592 * This function copies the HW address from the sockaddr structure to the
593 * net_device structure and updates the address in HW.
594 *
595 * returns: -EBUSY if the net device is busy or 0 if the address is set
596 * successfully.
597 */
598static int arc_emac_set_address(struct net_device *ndev, void *p)
599{
600 struct arc_emac_priv *priv = netdev_priv(ndev);
601 struct sockaddr *addr = p;
602 unsigned int addr_low, addr_hi;
603
604 if (netif_running(ndev))
605 return -EBUSY;
606
607 if (!is_valid_ether_addr(addr->sa_data))
608 return -EADDRNOTAVAIL;
609
610 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
611
612 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
613 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
614
615 arc_reg_set(priv, R_ADDRL, addr_low);
616 arc_reg_set(priv, R_ADDRH, addr_hi);
617
618 return 0;
619}
620
621static const struct net_device_ops arc_emac_netdev_ops = {
622 .ndo_open = arc_emac_open,
623 .ndo_stop = arc_emac_stop,
624 .ndo_start_xmit = arc_emac_tx,
625 .ndo_set_mac_address = arc_emac_set_address,
626 .ndo_get_stats = arc_emac_stats,
627};
628
629static int arc_emac_probe(struct platform_device *pdev)
630{
631 struct resource res_regs, res_irq;
632 struct device_node *phy_node;
633 struct arc_emac_priv *priv;
634 struct net_device *ndev;
635 const char *mac_addr;
636 unsigned int id, clock_frequency;
637 int err;
638
639 if (!pdev->dev.of_node)
640 return -ENODEV;
641
642 /* Get PHY from device tree */
643 phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0);
644 if (!phy_node) {
645 dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n");
646 return -ENODEV;
647 }
648
649 /* Get EMAC registers base address from device tree */
650 err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs);
651 if (err) {
652 dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n");
653 return -ENODEV;
654 }
655
656 /* Get CPU clock frequency from device tree */
657 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
658 &clock_frequency)) {
659 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
660 return -EINVAL;
661 }
662
663 /* Get IRQ from device tree */
664 err = of_irq_to_resource(pdev->dev.of_node, 0, &res_irq);
665 if (!err) {
666 dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n");
667 return -ENODEV;
668 }
669
670 ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
671 if (!ndev)
672 return -ENOMEM;
673
674 SET_NETDEV_DEV(ndev, &pdev->dev);
675
676 ndev->netdev_ops = &arc_emac_netdev_ops;
677 ndev->ethtool_ops = &arc_emac_ethtool_ops;
678 ndev->watchdog_timeo = TX_TIMEOUT;
679 /* FIXME :: no multicast support yet */
680 ndev->flags &= ~IFF_MULTICAST;
681
682 priv = netdev_priv(ndev);
683 priv->dev = &pdev->dev;
684 priv->ndev = ndev;
685
686 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
687 if (IS_ERR(priv->regs)) {
688 err = PTR_ERR(priv->regs);
689 goto out;
690 }
691 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
692
693 id = arc_reg_get(priv, R_ID);
694
695 /* Check for EMAC revision 5 or 7, magic number */
696 if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
697 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
698 err = -ENODEV;
699 goto out;
700 }
701 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
702
703 /* Set poll rate so that it polls every 1 ms */
704 arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
705
706 /* Get max speed of operation from device tree */
707 if (of_property_read_u32(pdev->dev.of_node, "max-speed",
708 &priv->max_speed)) {
709 dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
710 err = -EINVAL;
711 goto out;
712 }
713
714 ndev->irq = res_irq.start;
715 dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
716
717 /* Register interrupt handler for device */
718 err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0,
719 ndev->name, ndev);
720 if (err) {
721 dev_err(&pdev->dev, "could not allocate IRQ\n");
722 goto out;
723 }
724
725 /* Get MAC address from device tree */
726 mac_addr = of_get_mac_address(pdev->dev.of_node);
727
728 if (!mac_addr || !is_valid_ether_addr(mac_addr))
729 eth_hw_addr_random(ndev);
730 else
731 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
732
733 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
734
735 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
736 priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ,
737 &priv->rxbd_dma, GFP_KERNEL);
738
739 if (!priv->rxbd) {
740 dev_err(&pdev->dev, "failed to allocate data buffers\n");
741 err = -ENOMEM;
742 goto out;
743 }
744
745 priv->txbd = priv->rxbd + RX_BD_NUM;
746
747 priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ;
748 dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
749 (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma);
750
751 err = arc_mdio_probe(pdev, priv);
752 if (err) {
753 dev_err(&pdev->dev, "failed to probe MII bus\n");
754 goto out;
755 }
756
757 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
758 PHY_INTERFACE_MODE_MII);
759 if (!priv->phy_dev) {
760 dev_err(&pdev->dev, "of_phy_connect() failed\n");
761 err = -ENODEV;
762 goto out;
763 }
764
765 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
766 priv->phy_dev->drv->name, priv->phy_dev->phy_id);
767
768 netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
769
770 err = register_netdev(ndev);
771 if (err) {
772 netif_napi_del(&priv->napi);
773 dev_err(&pdev->dev, "failed to register network device\n");
774 goto out;
775 }
776
777 return 0;
778
779out:
780 free_netdev(ndev);
781 return err;
782}
783
784static int arc_emac_remove(struct platform_device *pdev)
785{
786 struct net_device *ndev = platform_get_drvdata(pdev);
787 struct arc_emac_priv *priv = netdev_priv(ndev);
788
789 phy_disconnect(priv->phy_dev);
790 priv->phy_dev = NULL;
791 arc_mdio_remove(priv);
792 unregister_netdev(ndev);
793 netif_napi_del(&priv->napi);
794 free_netdev(ndev);
795
796 return 0;
797}
798
799static const struct of_device_id arc_emac_dt_ids[] = {
800 { .compatible = "snps,arc-emac" },
801 { /* Sentinel */ }
802};
803MODULE_DEVICE_TABLE(of, arc_emac_dt_ids);
804
805static struct platform_driver arc_emac_driver = {
806 .probe = arc_emac_probe,
807 .remove = arc_emac_remove,
808 .driver = {
809 .name = DRV_NAME,
810 .owner = THIS_MODULE,
811 .of_match_table = arc_emac_dt_ids,
812 },
813};
814
815module_platform_driver(arc_emac_driver);
816
817MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>");
818MODULE_DESCRIPTION("ARC EMAC driver");
819MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
new file mode 100644
index 000000000000..26ba2423f33a
--- /dev/null
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -0,0 +1,152 @@
1/*
2 * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
3 *
4 * MDIO implementation for ARC EMAC
5 */
6
7#include <linux/delay.h>
8#include <linux/of_mdio.h>
9#include <linux/platform_device.h>
10
11#include "emac.h"
12
13/* Number of seconds we wait for "MDIO complete" flag to appear */
14#define ARC_MDIO_COMPLETE_POLL_COUNT 1
15
16/**
17 * arc_mdio_complete_wait - Waits until MDIO transaction is completed.
18 * @priv: Pointer to ARC EMAC private data structure.
19 *
20 * returns: 0 on success, -ETIMEDOUT on a timeout.
21 */
22static int arc_mdio_complete_wait(struct arc_emac_priv *priv)
23{
24 unsigned int i;
25
26 for (i = 0; i < ARC_MDIO_COMPLETE_POLL_COUNT * 40; i++) {
27 unsigned int status = arc_reg_get(priv, R_STATUS);
28
29 status &= MDIO_MASK;
30
31 if (status) {
32 /* Reset "MDIO complete" flag */
33 arc_reg_set(priv, R_STATUS, status);
34 return 0;
35 }
36
37 msleep(25);
38 }
39
40 return -ETIMEDOUT;
41}
42
43/**
44 * arc_mdio_read - MDIO interface read function.
45 * @bus: Pointer to MII bus structure.
46 * @phy_addr: Address of the PHY device.
47 * @reg_num: PHY register to read.
48 *
49 * returns: The register contents on success, -ETIMEDOUT on a timeout.
50 *
51 * Reads the contents of the requested register from the requested PHY
52 * address.
53 */
54static int arc_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
55{
56 struct arc_emac_priv *priv = bus->priv;
57 unsigned int value;
58 int error;
59
60 arc_reg_set(priv, R_MDIO,
61 0x60020000 | (phy_addr << 23) | (reg_num << 18));
62
63 error = arc_mdio_complete_wait(priv);
64 if (error < 0)
65 return error;
66
67 value = arc_reg_get(priv, R_MDIO) & 0xffff;
68
69 dev_dbg(priv->dev, "arc_mdio_read(phy_addr=%i, reg_num=%x) = %x\n",
70 phy_addr, reg_num, value);
71
72 return value;
73}
74
75/**
76 * arc_mdio_write - MDIO interface write function.
77 * @bus: Pointer to MII bus structure.
78 * @phy_addr: Address of the PHY device.
79 * @reg_num: PHY register to write to.
80 * @value: Value to be written into the register.
81 *
82 * returns: 0 on success, -ETIMEDOUT on a timeout.
83 *
84 * Writes the value to the requested register.
85 */
86static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
87 int reg_num, u16 value)
88{
89 struct arc_emac_priv *priv = bus->priv;
90
91 dev_dbg(priv->dev,
92 "arc_mdio_write(phy_addr=%i, reg_num=%x, value=%x)\n",
93 phy_addr, reg_num, value);
94
95 arc_reg_set(priv, R_MDIO,
96 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value);
97
98 return arc_mdio_complete_wait(priv);
99}
100
101/**
102 * arc_mdio_probe - MDIO probe function.
103 * @pdev: Pointer to platform device.
104 * @priv: Pointer to ARC EMAC private data structure.
105 *
106 * returns: 0 on success, -ENOMEM when mdiobus_alloc
107 * (to allocate memory for MII bus structure) fails.
108 *
109 * Sets up and registers the MDIO interface.
110 */
111int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv)
112{
113 struct mii_bus *bus;
114 int error;
115
116 bus = mdiobus_alloc();
117 if (!bus)
118 return -ENOMEM;
119
120 priv->bus = bus;
121 bus->priv = priv;
122 bus->parent = priv->dev;
123 bus->name = "Synopsys MII Bus",
124 bus->read = &arc_mdio_read;
125 bus->write = &arc_mdio_write;
126
127 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
128
129 error = of_mdiobus_register(bus, pdev->dev.of_node);
130 if (error) {
131 dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name);
132 mdiobus_free(bus);
133 return error;
134 }
135
136 return 0;
137}
138
139/**
140 * arc_mdio_remove - MDIO remove function.
141 * @priv: Pointer to ARC EMAC private data structure.
142 *
143 * Unregisters the MDIO and frees any associate memory for MII bus.
144 */
145int arc_mdio_remove(struct arc_emac_priv *priv)
146{
147 mdiobus_unregister(priv->bus);
148 mdiobus_free(priv->bus);
149 priv->bus = NULL;
150
151 return 0;
152}
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index ad6aa1e98348..58ad37c733bc 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -22,7 +22,6 @@ config ATL2
22 tristate "Atheros L2 Fast Ethernet support" 22 tristate "Atheros L2 Fast Ethernet support"
23 depends on PCI 23 depends on PCI
24 select CRC32 24 select CRC32
25 select NET_CORE
26 select MII 25 select MII
27 ---help--- 26 ---help---
28 This driver supports the Atheros L2 fast ethernet adapter. 27 This driver supports the Atheros L2 fast ethernet adapter.
@@ -34,7 +33,6 @@ config ATL1
34 tristate "Atheros/Attansic L1 Gigabit Ethernet support" 33 tristate "Atheros/Attansic L1 Gigabit Ethernet support"
35 depends on PCI 34 depends on PCI
36 select CRC32 35 select CRC32
37 select NET_CORE
38 select MII 36 select MII
39 ---help--- 37 ---help---
40 This driver supports the Atheros/Attansic L1 gigabit ethernet 38 This driver supports the Atheros/Attansic L1 gigabit ethernet
@@ -47,7 +45,6 @@ config ATL1E
47 tristate "Atheros L1E Gigabit Ethernet support" 45 tristate "Atheros L1E Gigabit Ethernet support"
48 depends on PCI 46 depends on PCI
49 select CRC32 47 select CRC32
50 select NET_CORE
51 select MII 48 select MII
52 ---help--- 49 ---help---
53 This driver supports the Atheros L1E gigabit ethernet adapter. 50 This driver supports the Atheros L1E gigabit ethernet adapter.
@@ -59,7 +56,6 @@ config ATL1C
59 tristate "Atheros L1C Gigabit Ethernet support" 56 tristate "Atheros L1C Gigabit Ethernet support"
60 depends on PCI 57 depends on PCI
61 select CRC32 58 select CRC32
62 select NET_CORE
63 select MII 59 select MII
64 ---help--- 60 ---help---
65 This driver supports the Atheros L1C gigabit ethernet adapter. 61 This driver supports the Atheros L1C gigabit ethernet adapter.
@@ -71,7 +67,6 @@ config ALX
71 tristate "Qualcomm Atheros AR816x/AR817x support" 67 tristate "Qualcomm Atheros AR816x/AR817x support"
72 depends on PCI 68 depends on PCI
73 select CRC32 69 select CRC32
74 select NET_CORE
75 select MDIO 70 select MDIO
76 help 71 help
77 This driver supports the Qualcomm Atheros L1F ethernet adapter, 72 This driver supports the Qualcomm Atheros L1F ethernet adapter,
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 50b3ae2b143d..d71103dbf2cd 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -85,16 +85,16 @@ struct alx_priv {
85 struct { 85 struct {
86 dma_addr_t dma; 86 dma_addr_t dma;
87 void *virt; 87 void *virt;
88 int size; 88 unsigned int size;
89 } descmem; 89 } descmem;
90 90
91 /* protect int_mask updates */ 91 /* protect int_mask updates */
92 spinlock_t irq_lock; 92 spinlock_t irq_lock;
93 u32 int_mask; 93 u32 int_mask;
94 94
95 int tx_ringsz; 95 unsigned int tx_ringsz;
96 int rx_ringsz; 96 unsigned int rx_ringsz;
97 int rxbuf_size; 97 unsigned int rxbuf_size;
98 98
99 struct napi_struct napi; 99 struct napi_struct napi;
100 struct alx_tx_queue txq; 100 struct alx_tx_queue txq;
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index 6fa2aec2bc81..45b36507abc1 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -46,21 +46,37 @@
46#include "reg.h" 46#include "reg.h"
47#include "hw.h" 47#include "hw.h"
48 48
49static u32 alx_get_supported_speeds(struct alx_hw *hw)
50{
51 u32 supported = SUPPORTED_10baseT_Half |
52 SUPPORTED_10baseT_Full |
53 SUPPORTED_100baseT_Half |
54 SUPPORTED_100baseT_Full;
55
56 if (alx_hw_giga(hw))
57 supported |= SUPPORTED_1000baseT_Full;
58
59 BUILD_BUG_ON(SUPPORTED_10baseT_Half != ADVERTISED_10baseT_Half);
60 BUILD_BUG_ON(SUPPORTED_10baseT_Full != ADVERTISED_10baseT_Full);
61 BUILD_BUG_ON(SUPPORTED_100baseT_Half != ADVERTISED_100baseT_Half);
62 BUILD_BUG_ON(SUPPORTED_100baseT_Full != ADVERTISED_100baseT_Full);
63 BUILD_BUG_ON(SUPPORTED_1000baseT_Full != ADVERTISED_1000baseT_Full);
64
65 return supported;
66}
49 67
50static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 68static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
51{ 69{
52 struct alx_priv *alx = netdev_priv(netdev); 70 struct alx_priv *alx = netdev_priv(netdev);
53 struct alx_hw *hw = &alx->hw; 71 struct alx_hw *hw = &alx->hw;
54 72
55 ecmd->supported = SUPPORTED_10baseT_Half | 73 ecmd->supported = SUPPORTED_Autoneg |
56 SUPPORTED_10baseT_Full |
57 SUPPORTED_100baseT_Half |
58 SUPPORTED_100baseT_Full |
59 SUPPORTED_Autoneg |
60 SUPPORTED_TP | 74 SUPPORTED_TP |
61 SUPPORTED_Pause; 75 SUPPORTED_Pause |
76 SUPPORTED_Asym_Pause;
62 if (alx_hw_giga(hw)) 77 if (alx_hw_giga(hw))
63 ecmd->supported |= SUPPORTED_1000baseT_Full; 78 ecmd->supported |= SUPPORTED_1000baseT_Full;
79 ecmd->supported |= alx_get_supported_speeds(hw);
64 80
65 ecmd->advertising = ADVERTISED_TP; 81 ecmd->advertising = ADVERTISED_TP;
66 if (hw->adv_cfg & ADVERTISED_Autoneg) 82 if (hw->adv_cfg & ADVERTISED_Autoneg)
@@ -68,6 +84,7 @@ static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
68 84
69 ecmd->port = PORT_TP; 85 ecmd->port = PORT_TP;
70 ecmd->phy_address = 0; 86 ecmd->phy_address = 0;
87
71 if (hw->adv_cfg & ADVERTISED_Autoneg) 88 if (hw->adv_cfg & ADVERTISED_Autoneg)
72 ecmd->autoneg = AUTONEG_ENABLE; 89 ecmd->autoneg = AUTONEG_ENABLE;
73 else 90 else
@@ -85,14 +102,8 @@ static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
85 } 102 }
86 } 103 }
87 104
88 if (hw->link_speed != SPEED_UNKNOWN) { 105 ethtool_cmd_speed_set(ecmd, hw->link_speed);
89 ethtool_cmd_speed_set(ecmd, 106 ecmd->duplex = hw->duplex;
90 hw->link_speed - hw->link_speed % 10);
91 ecmd->duplex = hw->link_speed % 10;
92 } else {
93 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
94 ecmd->duplex = DUPLEX_UNKNOWN;
95 }
96 107
97 return 0; 108 return 0;
98} 109}
@@ -106,28 +117,15 @@ static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
106 ASSERT_RTNL(); 117 ASSERT_RTNL();
107 118
108 if (ecmd->autoneg == AUTONEG_ENABLE) { 119 if (ecmd->autoneg == AUTONEG_ENABLE) {
109 if (ecmd->advertising & ADVERTISED_1000baseT_Half) 120 if (ecmd->advertising & ~alx_get_supported_speeds(hw))
110 return -EINVAL; 121 return -EINVAL;
111 adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; 122 adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
112 } else { 123 } else {
113 int speed = ethtool_cmd_speed(ecmd); 124 adv_cfg = alx_speed_to_ethadv(ethtool_cmd_speed(ecmd),
114 125 ecmd->duplex);
115 switch (speed + ecmd->duplex) { 126
116 case SPEED_10 + DUPLEX_HALF: 127 if (!adv_cfg || adv_cfg == ADVERTISED_1000baseT_Full)
117 adv_cfg = ADVERTISED_10baseT_Half;
118 break;
119 case SPEED_10 + DUPLEX_FULL:
120 adv_cfg = ADVERTISED_10baseT_Full;
121 break;
122 case SPEED_100 + DUPLEX_HALF:
123 adv_cfg = ADVERTISED_100baseT_Half;
124 break;
125 case SPEED_100 + DUPLEX_FULL:
126 adv_cfg = ADVERTISED_100baseT_Full;
127 break;
128 default:
129 return -EINVAL; 128 return -EINVAL;
130 }
131 } 129 }
132 130
133 hw->adv_cfg = adv_cfg; 131 hw->adv_cfg = adv_cfg;
@@ -140,21 +138,10 @@ static void alx_get_pauseparam(struct net_device *netdev,
140 struct alx_priv *alx = netdev_priv(netdev); 138 struct alx_priv *alx = netdev_priv(netdev);
141 struct alx_hw *hw = &alx->hw; 139 struct alx_hw *hw = &alx->hw;
142 140
143 if (hw->flowctrl & ALX_FC_ANEG && 141 pause->autoneg = !!(hw->flowctrl & ALX_FC_ANEG &&
144 hw->adv_cfg & ADVERTISED_Autoneg) 142 hw->adv_cfg & ADVERTISED_Autoneg);
145 pause->autoneg = AUTONEG_ENABLE; 143 pause->tx_pause = !!(hw->flowctrl & ALX_FC_TX);
146 else 144 pause->rx_pause = !!(hw->flowctrl & ALX_FC_RX);
147 pause->autoneg = AUTONEG_DISABLE;
148
149 if (hw->flowctrl & ALX_FC_TX)
150 pause->tx_pause = 1;
151 else
152 pause->tx_pause = 0;
153
154 if (hw->flowctrl & ALX_FC_RX)
155 pause->rx_pause = 1;
156 else
157 pause->rx_pause = 0;
158} 145}
159 146
160 147
@@ -187,7 +174,8 @@ static int alx_set_pauseparam(struct net_device *netdev,
187 174
188 if (reconfig_phy) { 175 if (reconfig_phy) {
189 err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); 176 err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
190 return err; 177 if (err)
178 return err;
191 } 179 }
192 180
193 /* flow control on mac */ 181 /* flow control on mac */
@@ -213,60 +201,12 @@ static void alx_set_msglevel(struct net_device *netdev, u32 data)
213 alx->msg_enable = data; 201 alx->msg_enable = data;
214} 202}
215 203
216static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
217{
218 struct alx_priv *alx = netdev_priv(netdev);
219 struct alx_hw *hw = &alx->hw;
220
221 wol->supported = WAKE_MAGIC | WAKE_PHY;
222 wol->wolopts = 0;
223
224 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
225 wol->wolopts |= WAKE_MAGIC;
226 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
227 wol->wolopts |= WAKE_PHY;
228}
229
230static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
231{
232 struct alx_priv *alx = netdev_priv(netdev);
233 struct alx_hw *hw = &alx->hw;
234
235 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
236 WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
237 return -EOPNOTSUPP;
238
239 hw->sleep_ctrl = 0;
240
241 if (wol->wolopts & WAKE_MAGIC)
242 hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
243 if (wol->wolopts & WAKE_PHY)
244 hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
245
246 device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
247
248 return 0;
249}
250
251static void alx_get_drvinfo(struct net_device *netdev,
252 struct ethtool_drvinfo *drvinfo)
253{
254 struct alx_priv *alx = netdev_priv(netdev);
255
256 strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
257 strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
258 sizeof(drvinfo->bus_info));
259}
260
261const struct ethtool_ops alx_ethtool_ops = { 204const struct ethtool_ops alx_ethtool_ops = {
262 .get_settings = alx_get_settings, 205 .get_settings = alx_get_settings,
263 .set_settings = alx_set_settings, 206 .set_settings = alx_set_settings,
264 .get_pauseparam = alx_get_pauseparam, 207 .get_pauseparam = alx_get_pauseparam,
265 .set_pauseparam = alx_set_pauseparam, 208 .set_pauseparam = alx_set_pauseparam,
266 .get_drvinfo = alx_get_drvinfo,
267 .get_msglevel = alx_get_msglevel, 209 .get_msglevel = alx_get_msglevel,
268 .set_msglevel = alx_set_msglevel, 210 .set_msglevel = alx_set_msglevel,
269 .get_wol = alx_get_wol,
270 .set_wol = alx_set_wol,
271 .get_link = ethtool_op_get_link, 211 .get_link = ethtool_op_get_link,
272}; 212};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
index 220a16ad0e49..1e8c24a3cb4e 100644
--- a/drivers/net/ethernet/atheros/alx/hw.c
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -282,8 +282,8 @@ static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
282 mac1 = alx_read_mem32(hw, ALX_STAD1); 282 mac1 = alx_read_mem32(hw, ALX_STAD1);
283 283
284 /* addr should be big-endian */ 284 /* addr should be big-endian */
285 *(__be32 *)(addr + 2) = cpu_to_be32(mac0); 285 put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2));
286 *(__be16 *)addr = cpu_to_be16(mac1); 286 put_unaligned(cpu_to_be16(mac1), (__be16 *)addr);
287 287
288 return is_valid_ether_addr(addr); 288 return is_valid_ether_addr(addr);
289} 289}
@@ -326,22 +326,12 @@ void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
326 u32 val; 326 u32 val;
327 327
328 /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ 328 /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
329 val = be32_to_cpu(*(__be32 *)(addr + 2)); 329 val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2)));
330 alx_write_mem32(hw, ALX_STAD0, val); 330 alx_write_mem32(hw, ALX_STAD0, val);
331 val = be16_to_cpu(*(__be16 *)addr); 331 val = be16_to_cpu(get_unaligned((__be16 *)addr));
332 alx_write_mem32(hw, ALX_STAD1, val); 332 alx_write_mem32(hw, ALX_STAD1, val);
333} 333}
334 334
335static void alx_enable_osc(struct alx_hw *hw)
336{
337 u32 val;
338
339 /* rising edge */
340 val = alx_read_mem32(hw, ALX_MISC);
341 alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
342 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
343}
344
345static void alx_reset_osc(struct alx_hw *hw, u8 rev) 335static void alx_reset_osc(struct alx_hw *hw, u8 rev)
346{ 336{
347 u32 val, val2; 337 u32 val, val2;
@@ -624,12 +614,12 @@ void alx_start_mac(struct alx_hw *hw)
624 alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); 614 alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
625 615
626 mac = hw->rx_ctrl; 616 mac = hw->rx_ctrl;
627 if (hw->link_speed % 10 == DUPLEX_FULL) 617 if (hw->duplex == DUPLEX_FULL)
628 mac |= ALX_MAC_CTRL_FULLD; 618 mac |= ALX_MAC_CTRL_FULLD;
629 else 619 else
630 mac &= ~ALX_MAC_CTRL_FULLD; 620 mac &= ~ALX_MAC_CTRL_FULLD;
631 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, 621 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
632 hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : 622 hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
633 ALX_MAC_CTRL_SPEED_10_100); 623 ALX_MAC_CTRL_SPEED_10_100);
634 mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; 624 mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
635 hw->rx_ctrl = mac; 625 hw->rx_ctrl = mac;
@@ -790,28 +780,22 @@ void alx_post_phy_link(struct alx_hw *hw)
790 u16 phy_val, len, agc; 780 u16 phy_val, len, agc;
791 u8 revid = alx_hw_revision(hw); 781 u8 revid = alx_hw_revision(hw);
792 bool adj_th = revid == ALX_REV_B0; 782 bool adj_th = revid == ALX_REV_B0;
793 int speed;
794
795 if (hw->link_speed == SPEED_UNKNOWN)
796 speed = SPEED_UNKNOWN;
797 else
798 speed = hw->link_speed - hw->link_speed % 10;
799 783
800 if (revid != ALX_REV_B0 && !alx_is_rev_a(revid)) 784 if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
801 return; 785 return;
802 786
803 /* 1000BT/AZ, wrong cable length */ 787 /* 1000BT/AZ, wrong cable length */
804 if (speed != SPEED_UNKNOWN) { 788 if (hw->link_speed != SPEED_UNKNOWN) {
805 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, 789 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
806 &phy_val); 790 &phy_val);
807 len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN); 791 len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
808 alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); 792 alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
809 agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA); 793 agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
810 794
811 if ((speed == SPEED_1000 && 795 if ((hw->link_speed == SPEED_1000 &&
812 (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || 796 (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
813 (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) || 797 (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
814 (speed == SPEED_100 && 798 (hw->link_speed == SPEED_100 &&
815 (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || 799 (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
816 (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) { 800 (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
817 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, 801 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
@@ -831,10 +815,10 @@ void alx_post_phy_link(struct alx_hw *hw)
831 815
832 /* threshold adjust */ 816 /* threshold adjust */
833 if (adj_th && hw->lnk_patch) { 817 if (adj_th && hw->lnk_patch) {
834 if (speed == SPEED_100) { 818 if (hw->link_speed == SPEED_100) {
835 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, 819 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
836 ALX_MSE16DB_UP); 820 ALX_MSE16DB_UP);
837 } else if (speed == SPEED_1000) { 821 } else if (hw->link_speed == SPEED_1000) {
838 /* 822 /*
839 * Giga link threshold, raise the tolerance of 823 * Giga link threshold, raise the tolerance of
840 * noise 50% 824 * noise 50%
@@ -864,66 +848,6 @@ void alx_post_phy_link(struct alx_hw *hw)
864 } 848 }
865} 849}
866 850
867
868/* NOTE:
869 * 1. phy link must be established before calling this function
870 * 2. wol option (pattern,magic,link,etc.) is configed before call it.
871 */
872int alx_pre_suspend(struct alx_hw *hw, int speed)
873{
874 u32 master, mac, phy, val;
875 int err = 0;
876
877 master = alx_read_mem32(hw, ALX_MASTER);
878 master &= ~ALX_MASTER_PCLKSEL_SRDS;
879 mac = hw->rx_ctrl;
880 /* 10/100 half */
881 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
882 mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
883
884 phy = alx_read_mem32(hw, ALX_PHY_CTRL);
885 phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
886 phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
887 ALX_PHY_CTRL_HIB_EN;
888
889 /* without any activity */
890 if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
891 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
892 if (err)
893 return err;
894 phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
895 } else {
896 if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
897 mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
898 if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
899 mac |= ALX_MAC_CTRL_TX_EN;
900 if (speed % 10 == DUPLEX_FULL)
901 mac |= ALX_MAC_CTRL_FULLD;
902 if (speed >= SPEED_1000)
903 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
904 ALX_MAC_CTRL_SPEED_1000);
905 phy |= ALX_PHY_CTRL_DSPRST_OUT;
906 err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
907 ALX_MIIEXT_S3DIG10,
908 ALX_MIIEXT_S3DIG10_SL);
909 if (err)
910 return err;
911 }
912
913 alx_enable_osc(hw);
914 hw->rx_ctrl = mac;
915 alx_write_mem32(hw, ALX_MASTER, master);
916 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
917 alx_write_mem32(hw, ALX_PHY_CTRL, phy);
918
919 /* set val of PDLL D3PLLOFF */
920 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
921 val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
922 alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
923
924 return 0;
925}
926
927bool alx_phy_configured(struct alx_hw *hw) 851bool alx_phy_configured(struct alx_hw *hw)
928{ 852{
929 u32 cfg, hw_cfg; 853 u32 cfg, hw_cfg;
@@ -938,7 +862,7 @@ bool alx_phy_configured(struct alx_hw *hw)
938 return cfg == hw_cfg; 862 return cfg == hw_cfg;
939} 863}
940 864
941int alx_get_phy_link(struct alx_hw *hw, int *speed) 865int alx_read_phy_link(struct alx_hw *hw)
942{ 866{
943 struct pci_dev *pdev = hw->pdev; 867 struct pci_dev *pdev = hw->pdev;
944 u16 bmsr, giga; 868 u16 bmsr, giga;
@@ -953,7 +877,8 @@ int alx_get_phy_link(struct alx_hw *hw, int *speed)
953 return err; 877 return err;
954 878
955 if (!(bmsr & BMSR_LSTATUS)) { 879 if (!(bmsr & BMSR_LSTATUS)) {
956 *speed = SPEED_UNKNOWN; 880 hw->link_speed = SPEED_UNKNOWN;
881 hw->duplex = DUPLEX_UNKNOWN;
957 return 0; 882 return 0;
958 } 883 }
959 884
@@ -967,20 +892,20 @@ int alx_get_phy_link(struct alx_hw *hw, int *speed)
967 892
968 switch (giga & ALX_GIGA_PSSR_SPEED) { 893 switch (giga & ALX_GIGA_PSSR_SPEED) {
969 case ALX_GIGA_PSSR_1000MBS: 894 case ALX_GIGA_PSSR_1000MBS:
970 *speed = SPEED_1000; 895 hw->link_speed = SPEED_1000;
971 break; 896 break;
972 case ALX_GIGA_PSSR_100MBS: 897 case ALX_GIGA_PSSR_100MBS:
973 *speed = SPEED_100; 898 hw->link_speed = SPEED_100;
974 break; 899 break;
975 case ALX_GIGA_PSSR_10MBS: 900 case ALX_GIGA_PSSR_10MBS:
976 *speed = SPEED_10; 901 hw->link_speed = SPEED_10;
977 break; 902 break;
978 default: 903 default:
979 goto wrong_speed; 904 goto wrong_speed;
980 } 905 }
981 906
982 *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; 907 hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
983 return 1; 908 return 0;
984 909
985wrong_speed: 910wrong_speed:
986 dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga); 911 dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
@@ -995,26 +920,6 @@ int alx_clear_phy_intr(struct alx_hw *hw)
995 return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); 920 return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
996} 921}
997 922
998int alx_config_wol(struct alx_hw *hw)
999{
1000 u32 wol = 0;
1001 int err = 0;
1002
1003 /* turn on magic packet event */
1004 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
1005 wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
1006
1007 /* turn on link up event */
1008 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
1009 wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
1010 /* only link up can wake up */
1011 err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
1012 }
1013 alx_write_mem32(hw, ALX_WOL0, wol);
1014
1015 return err;
1016}
1017
1018void alx_disable_rss(struct alx_hw *hw) 923void alx_disable_rss(struct alx_hw *hw)
1019{ 924{
1020 u32 ctrl = alx_read_mem32(hw, ALX_RXQ0); 925 u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
@@ -1126,85 +1031,6 @@ void alx_configure_basic(struct alx_hw *hw)
1126 alx_write_mem32(hw, ALX_WRR, val); 1031 alx_write_mem32(hw, ALX_WRR, val);
1127} 1032}
1128 1033
1129static inline u32 alx_speed_to_ethadv(int speed)
1130{
1131 switch (speed) {
1132 case SPEED_1000 + DUPLEX_FULL:
1133 return ADVERTISED_1000baseT_Full;
1134 case SPEED_100 + DUPLEX_FULL:
1135 return ADVERTISED_100baseT_Full;
1136 case SPEED_100 + DUPLEX_HALF:
1137 return ADVERTISED_10baseT_Half;
1138 case SPEED_10 + DUPLEX_FULL:
1139 return ADVERTISED_10baseT_Full;
1140 case SPEED_10 + DUPLEX_HALF:
1141 return ADVERTISED_10baseT_Half;
1142 default:
1143 return 0;
1144 }
1145}
1146
1147int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
1148{
1149 int i, err, spd;
1150 u16 lpa;
1151
1152 err = alx_get_phy_link(hw, &spd);
1153 if (err < 0)
1154 return err;
1155
1156 if (spd == SPEED_UNKNOWN)
1157 return 0;
1158
1159 err = alx_read_phy_reg(hw, MII_LPA, &lpa);
1160 if (err)
1161 return err;
1162
1163 if (!(lpa & LPA_LPACK)) {
1164 *speed = spd;
1165 return 0;
1166 }
1167
1168 if (lpa & LPA_10FULL)
1169 *speed = SPEED_10 + DUPLEX_FULL;
1170 else if (lpa & LPA_10HALF)
1171 *speed = SPEED_10 + DUPLEX_HALF;
1172 else if (lpa & LPA_100FULL)
1173 *speed = SPEED_100 + DUPLEX_FULL;
1174 else
1175 *speed = SPEED_100 + DUPLEX_HALF;
1176
1177 if (*speed != spd) {
1178 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
1179 if (err)
1180 return err;
1181 err = alx_setup_speed_duplex(hw,
1182 alx_speed_to_ethadv(*speed) |
1183 ADVERTISED_Autoneg,
1184 ALX_FC_ANEG | ALX_FC_RX |
1185 ALX_FC_TX);
1186 if (err)
1187 return err;
1188
1189 /* wait for linkup */
1190 for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
1191 int speed2;
1192
1193 msleep(100);
1194
1195 err = alx_get_phy_link(hw, &speed2);
1196 if (err < 0)
1197 return err;
1198 if (speed2 != SPEED_UNKNOWN)
1199 break;
1200 }
1201 if (i == ALX_MAX_SETUP_LNK_CYCLE)
1202 return -ETIMEDOUT;
1203 }
1204
1205 return 0;
1206}
1207
1208bool alx_get_phy_info(struct alx_hw *hw) 1034bool alx_get_phy_info(struct alx_hw *hw)
1209{ 1035{
1210 u16 devs1, devs2; 1036 u16 devs1, devs2;
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index 65e723d2172a..96f3b4381e17 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -412,12 +412,11 @@ struct alx_hw {
412 u32 smb_timer; 412 u32 smb_timer;
413 /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */ 413 /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
414 int link_speed; 414 int link_speed;
415 u8 duplex;
415 416
416 /* auto-neg advertisement or force mode config */ 417 /* auto-neg advertisement or force mode config */
417 u32 adv_cfg;
418 u8 flowctrl; 418 u8 flowctrl;
419 419 u32 adv_cfg;
420 u32 sleep_ctrl;
421 420
422 spinlock_t mdio_lock; 421 spinlock_t mdio_lock;
423 struct mdio_if_info mdio; 422 struct mdio_if_info mdio;
@@ -478,14 +477,12 @@ void alx_reset_pcie(struct alx_hw *hw);
478void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); 477void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
479int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); 478int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
480void alx_post_phy_link(struct alx_hw *hw); 479void alx_post_phy_link(struct alx_hw *hw);
481int alx_pre_suspend(struct alx_hw *hw, int speed);
482int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); 480int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
483int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); 481int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
484int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); 482int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
485int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); 483int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
486int alx_get_phy_link(struct alx_hw *hw, int *speed); 484int alx_read_phy_link(struct alx_hw *hw);
487int alx_clear_phy_intr(struct alx_hw *hw); 485int alx_clear_phy_intr(struct alx_hw *hw);
488int alx_config_wol(struct alx_hw *hw);
489void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc); 486void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
490void alx_start_mac(struct alx_hw *hw); 487void alx_start_mac(struct alx_hw *hw);
491int alx_reset_mac(struct alx_hw *hw); 488int alx_reset_mac(struct alx_hw *hw);
@@ -493,7 +490,21 @@ void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
493bool alx_phy_configured(struct alx_hw *hw); 490bool alx_phy_configured(struct alx_hw *hw);
494void alx_configure_basic(struct alx_hw *hw); 491void alx_configure_basic(struct alx_hw *hw);
495void alx_disable_rss(struct alx_hw *hw); 492void alx_disable_rss(struct alx_hw *hw);
496int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
497bool alx_get_phy_info(struct alx_hw *hw); 493bool alx_get_phy_info(struct alx_hw *hw);
498 494
495static inline u32 alx_speed_to_ethadv(int speed, u8 duplex)
496{
497 if (speed == SPEED_1000 && duplex == DUPLEX_FULL)
498 return ADVERTISED_1000baseT_Full;
499 if (speed == SPEED_100 && duplex == DUPLEX_FULL)
500 return ADVERTISED_100baseT_Full;
501 if (speed == SPEED_100 && duplex== DUPLEX_HALF)
502 return ADVERTISED_100baseT_Half;
503 if (speed == SPEED_10 && duplex == DUPLEX_FULL)
504 return ADVERTISED_10baseT_Full;
505 if (speed == SPEED_10 && duplex == DUPLEX_HALF)
506 return ADVERTISED_10baseT_Half;
507 return 0;
508}
509
499#endif 510#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 418de8b13165..0e0b242a9dd4 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -706,12 +706,12 @@ static int alx_init_sw(struct alx_priv *alx)
706 alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); 706 alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
707 alx->tx_ringsz = 256; 707 alx->tx_ringsz = 256;
708 alx->rx_ringsz = 512; 708 alx->rx_ringsz = 512;
709 hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
710 hw->imt = 200; 709 hw->imt = 200;
711 alx->int_mask = ALX_ISR_MISC; 710 alx->int_mask = ALX_ISR_MISC;
712 hw->dma_chnl = hw->max_dma_chnl; 711 hw->dma_chnl = hw->max_dma_chnl;
713 hw->ith_tpd = alx->tx_ringsz / 3; 712 hw->ith_tpd = alx->tx_ringsz / 3;
714 hw->link_speed = SPEED_UNKNOWN; 713 hw->link_speed = SPEED_UNKNOWN;
714 hw->duplex = DUPLEX_UNKNOWN;
715 hw->adv_cfg = ADVERTISED_Autoneg | 715 hw->adv_cfg = ADVERTISED_Autoneg |
716 ADVERTISED_10baseT_Half | 716 ADVERTISED_10baseT_Half |
717 ADVERTISED_10baseT_Full | 717 ADVERTISED_10baseT_Full |
@@ -758,6 +758,7 @@ static void alx_halt(struct alx_priv *alx)
758 758
759 alx_netif_stop(alx); 759 alx_netif_stop(alx);
760 hw->link_speed = SPEED_UNKNOWN; 760 hw->link_speed = SPEED_UNKNOWN;
761 hw->duplex = DUPLEX_UNKNOWN;
761 762
762 alx_reset_mac(hw); 763 alx_reset_mac(hw);
763 764
@@ -869,18 +870,18 @@ static void __alx_stop(struct alx_priv *alx)
869 alx_free_rings(alx); 870 alx_free_rings(alx);
870} 871}
871 872
872static const char *alx_speed_desc(u16 speed) 873static const char *alx_speed_desc(struct alx_hw *hw)
873{ 874{
874 switch (speed) { 875 switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) {
875 case SPEED_1000 + DUPLEX_FULL: 876 case ADVERTISED_1000baseT_Full:
876 return "1 Gbps Full"; 877 return "1 Gbps Full";
877 case SPEED_100 + DUPLEX_FULL: 878 case ADVERTISED_100baseT_Full:
878 return "100 Mbps Full"; 879 return "100 Mbps Full";
879 case SPEED_100 + DUPLEX_HALF: 880 case ADVERTISED_100baseT_Half:
880 return "100 Mbps Half"; 881 return "100 Mbps Half";
881 case SPEED_10 + DUPLEX_FULL: 882 case ADVERTISED_10baseT_Full:
882 return "10 Mbps Full"; 883 return "10 Mbps Full";
883 case SPEED_10 + DUPLEX_HALF: 884 case ADVERTISED_10baseT_Half:
884 return "10 Mbps Half"; 885 return "10 Mbps Half";
885 default: 886 default:
886 return "Unknown speed"; 887 return "Unknown speed";
@@ -891,7 +892,8 @@ static void alx_check_link(struct alx_priv *alx)
891{ 892{
892 struct alx_hw *hw = &alx->hw; 893 struct alx_hw *hw = &alx->hw;
893 unsigned long flags; 894 unsigned long flags;
894 int speed, old_speed; 895 int old_speed;
896 u8 old_duplex;
895 int err; 897 int err;
896 898
897 /* clear PHY internal interrupt status, otherwise the main 899 /* clear PHY internal interrupt status, otherwise the main
@@ -899,7 +901,9 @@ static void alx_check_link(struct alx_priv *alx)
899 */ 901 */
900 alx_clear_phy_intr(hw); 902 alx_clear_phy_intr(hw);
901 903
902 err = alx_get_phy_link(hw, &speed); 904 old_speed = hw->link_speed;
905 old_duplex = hw->duplex;
906 err = alx_read_phy_link(hw);
903 if (err < 0) 907 if (err < 0)
904 goto reset; 908 goto reset;
905 909
@@ -908,15 +912,12 @@ static void alx_check_link(struct alx_priv *alx)
908 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 912 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
909 spin_unlock_irqrestore(&alx->irq_lock, flags); 913 spin_unlock_irqrestore(&alx->irq_lock, flags);
910 914
911 old_speed = hw->link_speed; 915 if (old_speed == hw->link_speed)
912
913 if (old_speed == speed)
914 return; 916 return;
915 hw->link_speed = speed;
916 917
917 if (speed != SPEED_UNKNOWN) { 918 if (hw->link_speed != SPEED_UNKNOWN) {
918 netif_info(alx, link, alx->dev, 919 netif_info(alx, link, alx->dev,
919 "NIC Up: %s\n", alx_speed_desc(speed)); 920 "NIC Up: %s\n", alx_speed_desc(hw));
920 alx_post_phy_link(hw); 921 alx_post_phy_link(hw);
921 alx_enable_aspm(hw, true, true); 922 alx_enable_aspm(hw, true, true);
922 alx_start_mac(hw); 923 alx_start_mac(hw);
@@ -959,65 +960,6 @@ static int alx_stop(struct net_device *netdev)
959 return 0; 960 return 0;
960} 961}
961 962
962static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
963{
964 struct alx_priv *alx = pci_get_drvdata(pdev);
965 struct net_device *netdev = alx->dev;
966 struct alx_hw *hw = &alx->hw;
967 int err, speed;
968
969 netif_device_detach(netdev);
970
971 if (netif_running(netdev))
972 __alx_stop(alx);
973
974#ifdef CONFIG_PM_SLEEP
975 err = pci_save_state(pdev);
976 if (err)
977 return err;
978#endif
979
980 err = alx_select_powersaving_speed(hw, &speed);
981 if (err)
982 return err;
983 err = alx_clear_phy_intr(hw);
984 if (err)
985 return err;
986 err = alx_pre_suspend(hw, speed);
987 if (err)
988 return err;
989 err = alx_config_wol(hw);
990 if (err)
991 return err;
992
993 *wol_en = false;
994 if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
995 netif_info(alx, wol, netdev,
996 "wol: ctrl=%X, speed=%X\n",
997 hw->sleep_ctrl, speed);
998 device_set_wakeup_enable(&pdev->dev, true);
999 *wol_en = true;
1000 }
1001
1002 pci_disable_device(pdev);
1003
1004 return 0;
1005}
1006
1007static void alx_shutdown(struct pci_dev *pdev)
1008{
1009 int err;
1010 bool wol_en;
1011
1012 err = __alx_shutdown(pdev, &wol_en);
1013 if (!err) {
1014 pci_wake_from_d3(pdev, wol_en);
1015 pci_set_power_state(pdev, PCI_D3hot);
1016 } else {
1017 dev_err(&pdev->dev, "shutdown fail %d\n", err);
1018 }
1019}
1020
1021static void alx_link_check(struct work_struct *work) 963static void alx_link_check(struct work_struct *work)
1022{ 964{
1023 struct alx_priv *alx; 965 struct alx_priv *alx;
@@ -1396,8 +1338,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1396 goto out_unmap; 1338 goto out_unmap;
1397 } 1339 }
1398 1340
1399 device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
1400
1401 netdev_info(netdev, 1341 netdev_info(netdev,
1402 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", 1342 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1403 netdev->dev_addr); 1343 netdev->dev_addr);
@@ -1442,22 +1382,12 @@ static void alx_remove(struct pci_dev *pdev)
1442static int alx_suspend(struct device *dev) 1382static int alx_suspend(struct device *dev)
1443{ 1383{
1444 struct pci_dev *pdev = to_pci_dev(dev); 1384 struct pci_dev *pdev = to_pci_dev(dev);
1445 int err; 1385 struct alx_priv *alx = pci_get_drvdata(pdev);
1446 bool wol_en;
1447
1448 err = __alx_shutdown(pdev, &wol_en);
1449 if (err) {
1450 dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
1451 return err;
1452 }
1453
1454 if (wol_en) {
1455 pci_prepare_to_sleep(pdev);
1456 } else {
1457 pci_wake_from_d3(pdev, false);
1458 pci_set_power_state(pdev, PCI_D3hot);
1459 }
1460 1386
1387 if (!netif_running(alx->dev))
1388 return 0;
1389 netif_device_detach(alx->dev);
1390 __alx_stop(alx);
1461 return 0; 1391 return 0;
1462} 1392}
1463 1393
@@ -1465,49 +1395,20 @@ static int alx_resume(struct device *dev)
1465{ 1395{
1466 struct pci_dev *pdev = to_pci_dev(dev); 1396 struct pci_dev *pdev = to_pci_dev(dev);
1467 struct alx_priv *alx = pci_get_drvdata(pdev); 1397 struct alx_priv *alx = pci_get_drvdata(pdev);
1468 struct net_device *netdev = alx->dev;
1469 struct alx_hw *hw = &alx->hw;
1470 int err;
1471
1472 pci_set_power_state(pdev, PCI_D0);
1473 pci_restore_state(pdev);
1474 pci_save_state(pdev);
1475
1476 pci_enable_wake(pdev, PCI_D3hot, 0);
1477 pci_enable_wake(pdev, PCI_D3cold, 0);
1478 1398
1479 hw->link_speed = SPEED_UNKNOWN; 1399 if (!netif_running(alx->dev))
1480 alx->int_mask = ALX_ISR_MISC; 1400 return 0;
1481 1401 netif_device_attach(alx->dev);
1482 alx_reset_pcie(hw); 1402 return __alx_open(alx, true);
1483 alx_reset_phy(hw);
1484
1485 err = alx_reset_mac(hw);
1486 if (err) {
1487 netif_err(alx, hw, alx->dev,
1488 "resume:reset_mac fail %d\n", err);
1489 return -EIO;
1490 }
1491
1492 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1493 if (err) {
1494 netif_err(alx, hw, alx->dev,
1495 "resume:setup_speed_duplex fail %d\n", err);
1496 return -EIO;
1497 }
1498
1499 if (netif_running(netdev)) {
1500 err = __alx_open(alx, true);
1501 if (err)
1502 return err;
1503 }
1504
1505 netif_device_attach(netdev);
1506
1507 return err;
1508} 1403}
1404
1405static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1406#define ALX_PM_OPS (&alx_pm_ops)
1407#else
1408#define ALX_PM_OPS NULL
1509#endif 1409#endif
1510 1410
1411
1511static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, 1412static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1512 pci_channel_state_t state) 1413 pci_channel_state_t state)
1513{ 1414{
@@ -1550,8 +1451,6 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1550 } 1451 }
1551 1452
1552 pci_set_master(pdev); 1453 pci_set_master(pdev);
1553 pci_enable_wake(pdev, PCI_D3hot, 0);
1554 pci_enable_wake(pdev, PCI_D3cold, 0);
1555 1454
1556 alx_reset_pcie(hw); 1455 alx_reset_pcie(hw);
1557 if (!alx_reset_mac(hw)) 1456 if (!alx_reset_mac(hw))
@@ -1587,13 +1486,6 @@ static const struct pci_error_handlers alx_err_handlers = {
1587 .resume = alx_pci_error_resume, 1486 .resume = alx_pci_error_resume,
1588}; 1487};
1589 1488
1590#ifdef CONFIG_PM_SLEEP
1591static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1592#define ALX_PM_OPS (&alx_pm_ops)
1593#else
1594#define ALX_PM_OPS NULL
1595#endif
1596
1597static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { 1489static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
1598 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), 1490 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
1599 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1491 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
@@ -1611,7 +1503,6 @@ static struct pci_driver alx_driver = {
1611 .id_table = alx_pci_tbl, 1503 .id_table = alx_pci_tbl,
1612 .probe = alx_probe, 1504 .probe = alx_probe,
1613 .remove = alx_remove, 1505 .remove = alx_remove,
1614 .shutdown = alx_shutdown,
1615 .err_handler = &alx_err_handlers, 1506 .err_handler = &alx_err_handlers,
1616 .driver.pm = ALX_PM_OPS, 1507 .driver.pm = ALX_PM_OPS,
1617}; 1508};
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 0ba900762b13..786a87483298 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2755,27 +2755,4 @@ static struct pci_driver atl1c_driver = {
2755 .driver.pm = &atl1c_pm_ops, 2755 .driver.pm = &atl1c_pm_ops,
2756}; 2756};
2757 2757
2758/** 2758module_pci_driver(atl1c_driver);
2759 * atl1c_init_module - Driver Registration Routine
2760 *
2761 * atl1c_init_module is the first routine called when the driver is
2762 * loaded. All it does is register with the PCI subsystem.
2763 */
2764static int __init atl1c_init_module(void)
2765{
2766 return pci_register_driver(&atl1c_driver);
2767}
2768
2769/**
2770 * atl1c_exit_module - Driver Exit Cleanup Routine
2771 *
2772 * atl1c_exit_module is called just before the driver is removed
2773 * from memory.
2774 */
2775static void __exit atl1c_exit_module(void)
2776{
2777 pci_unregister_driver(&atl1c_driver);
2778}
2779
2780module_init(atl1c_init_module);
2781module_exit(atl1c_exit_module);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 0688bb82b442..895f5377ad1b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2489,27 +2489,4 @@ static struct pci_driver atl1e_driver = {
2489 .err_handler = &atl1e_err_handler 2489 .err_handler = &atl1e_err_handler
2490}; 2490};
2491 2491
2492/** 2492module_pci_driver(atl1e_driver);
2493 * atl1e_init_module - Driver Registration Routine
2494 *
2495 * atl1e_init_module is the first routine called when the driver is
2496 * loaded. All it does is register with the PCI subsystem.
2497 */
2498static int __init atl1e_init_module(void)
2499{
2500 return pci_register_driver(&atl1e_driver);
2501}
2502
2503/**
2504 * atl1e_exit_module - Driver Exit Cleanup Routine
2505 *
2506 * atl1e_exit_module is called just before the driver is removed
2507 * from memory.
2508 */
2509static void __exit atl1e_exit_module(void)
2510{
2511 pci_unregister_driver(&atl1e_driver);
2512}
2513
2514module_init(atl1e_init_module);
2515module_exit(atl1e_exit_module);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index fa0915f3999b..538211d6f7d9 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3145,31 +3145,6 @@ static struct pci_driver atl1_driver = {
3145 .driver.pm = &atl1_pm_ops, 3145 .driver.pm = &atl1_pm_ops,
3146}; 3146};
3147 3147
3148/**
3149 * atl1_exit_module - Driver Exit Cleanup Routine
3150 *
3151 * atl1_exit_module is called just before the driver is removed
3152 * from memory.
3153 */
3154static void __exit atl1_exit_module(void)
3155{
3156 pci_unregister_driver(&atl1_driver);
3157}
3158
3159/**
3160 * atl1_init_module - Driver Registration Routine
3161 *
3162 * atl1_init_module is the first routine called when the driver is
3163 * loaded. All it does is register with the PCI subsystem.
3164 */
3165static int __init atl1_init_module(void)
3166{
3167 return pci_register_driver(&atl1_driver);
3168}
3169
3170module_init(atl1_init_module);
3171module_exit(atl1_exit_module);
3172
3173struct atl1_stats { 3148struct atl1_stats {
3174 char stat_string[ETH_GSTRING_LEN]; 3149 char stat_string[ETH_GSTRING_LEN];
3175 int sizeof_stat; 3150 int sizeof_stat;
@@ -3705,3 +3680,5 @@ static const struct ethtool_ops atl1_ethtool_ops = {
3705 .get_ethtool_stats = atl1_get_ethtool_stats, 3680 .get_ethtool_stats = atl1_get_ethtool_stats,
3706 .get_sset_count = atl1_get_sset_count, 3681 .get_sset_count = atl1_get_sset_count,
3707}; 3682};
3683
3684module_pci_driver(atl1_driver);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 3e69b3f88099..1d680baf43d6 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -22,7 +22,6 @@ config B44
22 tristate "Broadcom 440x/47xx ethernet support" 22 tristate "Broadcom 440x/47xx ethernet support"
23 depends on SSB_POSSIBLE && HAS_DMA 23 depends on SSB_POSSIBLE && HAS_DMA
24 select SSB 24 select SSB
25 select NET_CORE
26 select MII 25 select MII
27 ---help--- 26 ---help---
28 If you have a network (Ethernet) controller of this type, say Y 27 If you have a network (Ethernet) controller of this type, say Y
@@ -54,7 +53,6 @@ config B44_PCI
54config BCM63XX_ENET 53config BCM63XX_ENET
55 tristate "Broadcom 63xx internal mac support" 54 tristate "Broadcom 63xx internal mac support"
56 depends on BCM63XX 55 depends on BCM63XX
57 select NET_CORE
58 select MII 56 select MII
59 select PHYLIB 57 select PHYLIB
60 help 58 help
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 0b3e23ec37f7..b1bcd4ba4744 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -41,8 +41,8 @@ static int copybreak __read_mostly = 128;
41module_param(copybreak, int, 0); 41module_param(copybreak, int, 0);
42MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 42MODULE_PARM_DESC(copybreak, "Receive copy threshold");
43 43
44/* io memory shared between all devices */ 44/* io registers memory shared between all devices */
45static void __iomem *bcm_enet_shared_base; 45static void __iomem *bcm_enet_shared_base[3];
46 46
47/* 47/*
48 * io helpers to access mac registers 48 * io helpers to access mac registers
@@ -59,17 +59,76 @@ static inline void enet_writel(struct bcm_enet_priv *priv,
59} 59}
60 60
61/* 61/*
62 * io helpers to access shared registers 62 * io helpers to access switch registers
63 */ 63 */
64static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
65{
66 return bcm_readl(priv->base + off);
67}
68
69static inline void enetsw_writel(struct bcm_enet_priv *priv,
70 u32 val, u32 off)
71{
72 bcm_writel(val, priv->base + off);
73}
74
75static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
76{
77 return bcm_readw(priv->base + off);
78}
79
80static inline void enetsw_writew(struct bcm_enet_priv *priv,
81 u16 val, u32 off)
82{
83 bcm_writew(val, priv->base + off);
84}
85
86static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
87{
88 return bcm_readb(priv->base + off);
89}
90
91static inline void enetsw_writeb(struct bcm_enet_priv *priv,
92 u8 val, u32 off)
93{
94 bcm_writeb(val, priv->base + off);
95}
96
97
98/* io helpers to access shared registers */
64static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) 99static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
65{ 100{
66 return bcm_readl(bcm_enet_shared_base + off); 101 return bcm_readl(bcm_enet_shared_base[0] + off);
67} 102}
68 103
69static inline void enet_dma_writel(struct bcm_enet_priv *priv, 104static inline void enet_dma_writel(struct bcm_enet_priv *priv,
70 u32 val, u32 off) 105 u32 val, u32 off)
71{ 106{
72 bcm_writel(val, bcm_enet_shared_base + off); 107 bcm_writel(val, bcm_enet_shared_base[0] + off);
108}
109
110static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
111{
112 return bcm_readl(bcm_enet_shared_base[1] +
113 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
114}
115
116static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
117 u32 val, u32 off, int chan)
118{
119 bcm_writel(val, bcm_enet_shared_base[1] +
120 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
121}
122
123static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
124{
125 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
126}
127
128static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
129 u32 val, u32 off, int chan)
130{
131 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
73} 132}
74 133
75/* 134/*
@@ -196,7 +255,6 @@ static int bcm_enet_refill_rx(struct net_device *dev)
196 if (!skb) 255 if (!skb)
197 break; 256 break;
198 priv->rx_skb[desc_idx] = skb; 257 priv->rx_skb[desc_idx] = skb;
199
200 p = dma_map_single(&priv->pdev->dev, skb->data, 258 p = dma_map_single(&priv->pdev->dev, skb->data,
201 priv->rx_skb_size, 259 priv->rx_skb_size,
202 DMA_FROM_DEVICE); 260 DMA_FROM_DEVICE);
@@ -206,7 +264,7 @@ static int bcm_enet_refill_rx(struct net_device *dev)
206 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; 264 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
207 len_stat |= DMADESC_OWNER_MASK; 265 len_stat |= DMADESC_OWNER_MASK;
208 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { 266 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
209 len_stat |= DMADESC_WRAP_MASK; 267 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
210 priv->rx_dirty_desc = 0; 268 priv->rx_dirty_desc = 0;
211 } else { 269 } else {
212 priv->rx_dirty_desc++; 270 priv->rx_dirty_desc++;
@@ -217,7 +275,10 @@ static int bcm_enet_refill_rx(struct net_device *dev)
217 priv->rx_desc_count++; 275 priv->rx_desc_count++;
218 276
219 /* tell dma engine we allocated one buffer */ 277 /* tell dma engine we allocated one buffer */
220 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); 278 if (priv->dma_has_sram)
279 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
280 else
281 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
221 } 282 }
222 283
223 /* If rx ring is still empty, set a timer to try allocating 284 /* If rx ring is still empty, set a timer to try allocating
@@ -293,13 +354,15 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
293 354
294 /* if the packet does not have start of packet _and_ 355 /* if the packet does not have start of packet _and_
295 * end of packet flag set, then just recycle it */ 356 * end of packet flag set, then just recycle it */
296 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { 357 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
358 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
297 dev->stats.rx_dropped++; 359 dev->stats.rx_dropped++;
298 continue; 360 continue;
299 } 361 }
300 362
301 /* recycle packet if it's marked as bad */ 363 /* recycle packet if it's marked as bad */
302 if (unlikely(len_stat & DMADESC_ERR_MASK)) { 364 if (!priv->enet_is_sw &&
365 unlikely(len_stat & DMADESC_ERR_MASK)) {
303 dev->stats.rx_errors++; 366 dev->stats.rx_errors++;
304 367
305 if (len_stat & DMADESC_OVSIZE_MASK) 368 if (len_stat & DMADESC_OVSIZE_MASK)
@@ -353,8 +416,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
353 bcm_enet_refill_rx(dev); 416 bcm_enet_refill_rx(dev);
354 417
355 /* kick rx dma */ 418 /* kick rx dma */
356 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, 419 enet_dmac_writel(priv, priv->dma_chan_en_mask,
357 ENETDMA_CHANCFG_REG(priv->rx_chan)); 420 ENETDMAC_CHANCFG, priv->rx_chan);
358 } 421 }
359 422
360 return processed; 423 return processed;
@@ -429,10 +492,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
429 dev = priv->net_dev; 492 dev = priv->net_dev;
430 493
431 /* ack interrupts */ 494 /* ack interrupts */
432 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 495 enet_dmac_writel(priv, priv->dma_chan_int_mask,
433 ENETDMA_IR_REG(priv->rx_chan)); 496 ENETDMAC_IR, priv->rx_chan);
434 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 497 enet_dmac_writel(priv, priv->dma_chan_int_mask,
435 ENETDMA_IR_REG(priv->tx_chan)); 498 ENETDMAC_IR, priv->tx_chan);
436 499
437 /* reclaim sent skb */ 500 /* reclaim sent skb */
438 tx_work_done = bcm_enet_tx_reclaim(dev, 0); 501 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
@@ -451,10 +514,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
451 napi_complete(napi); 514 napi_complete(napi);
452 515
453 /* restore rx/tx interrupt */ 516 /* restore rx/tx interrupt */
454 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 517 enet_dmac_writel(priv, priv->dma_chan_int_mask,
455 ENETDMA_IRMASK_REG(priv->rx_chan)); 518 ENETDMAC_IRMASK, priv->rx_chan);
456 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 519 enet_dmac_writel(priv, priv->dma_chan_int_mask,
457 ENETDMA_IRMASK_REG(priv->tx_chan)); 520 ENETDMAC_IRMASK, priv->tx_chan);
458 521
459 return rx_work_done; 522 return rx_work_done;
460} 523}
@@ -497,8 +560,8 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
497 priv = netdev_priv(dev); 560 priv = netdev_priv(dev);
498 561
499 /* mask rx/tx interrupts */ 562 /* mask rx/tx interrupts */
500 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); 563 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
501 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); 564 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
502 565
503 napi_schedule(&priv->napi); 566 napi_schedule(&priv->napi);
504 567
@@ -530,6 +593,26 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
530 goto out_unlock; 593 goto out_unlock;
531 } 594 }
532 595
596 /* pad small packets sent on a switch device */
597 if (priv->enet_is_sw && skb->len < 64) {
598 int needed = 64 - skb->len;
599 char *data;
600
601 if (unlikely(skb_tailroom(skb) < needed)) {
602 struct sk_buff *nskb;
603
604 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
605 if (!nskb) {
606 ret = NETDEV_TX_BUSY;
607 goto out_unlock;
608 }
609 dev_kfree_skb(skb);
610 skb = nskb;
611 }
612 data = skb_put(skb, needed);
613 memset(data, 0, needed);
614 }
615
533 /* point to the next available desc */ 616 /* point to the next available desc */
534 desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; 617 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
535 priv->tx_skb[priv->tx_curr_desc] = skb; 618 priv->tx_skb[priv->tx_curr_desc] = skb;
@@ -539,14 +622,14 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
539 DMA_TO_DEVICE); 622 DMA_TO_DEVICE);
540 623
541 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; 624 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
542 len_stat |= DMADESC_ESOP_MASK | 625 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
543 DMADESC_APPEND_CRC | 626 DMADESC_APPEND_CRC |
544 DMADESC_OWNER_MASK; 627 DMADESC_OWNER_MASK;
545 628
546 priv->tx_curr_desc++; 629 priv->tx_curr_desc++;
547 if (priv->tx_curr_desc == priv->tx_ring_size) { 630 if (priv->tx_curr_desc == priv->tx_ring_size) {
548 priv->tx_curr_desc = 0; 631 priv->tx_curr_desc = 0;
549 len_stat |= DMADESC_WRAP_MASK; 632 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
550 } 633 }
551 priv->tx_desc_count--; 634 priv->tx_desc_count--;
552 635
@@ -557,8 +640,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
557 wmb(); 640 wmb();
558 641
559 /* kick tx dma */ 642 /* kick tx dma */
560 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, 643 enet_dmac_writel(priv, priv->dma_chan_en_mask,
561 ENETDMA_CHANCFG_REG(priv->tx_chan)); 644 ENETDMAC_CHANCFG, priv->tx_chan);
562 645
563 /* stop queue if no more desc available */ 646 /* stop queue if no more desc available */
564 if (!priv->tx_desc_count) 647 if (!priv->tx_desc_count)
@@ -686,6 +769,9 @@ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
686 val &= ~ENET_RXCFG_ENFLOW_MASK; 769 val &= ~ENET_RXCFG_ENFLOW_MASK;
687 enet_writel(priv, val, ENET_RXCFG_REG); 770 enet_writel(priv, val, ENET_RXCFG_REG);
688 771
772 if (!priv->dma_has_sram)
773 return;
774
689 /* tx flow control (pause frame generation) */ 775 /* tx flow control (pause frame generation) */
690 val = enet_dma_readl(priv, ENETDMA_CFG_REG); 776 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
691 if (tx_en) 777 if (tx_en)
@@ -833,8 +919,8 @@ static int bcm_enet_open(struct net_device *dev)
833 919
834 /* mask all interrupts and request them */ 920 /* mask all interrupts and request them */
835 enet_writel(priv, 0, ENET_IRMASK_REG); 921 enet_writel(priv, 0, ENET_IRMASK_REG);
836 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); 922 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
837 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); 923 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
838 924
839 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); 925 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
840 if (ret) 926 if (ret)
@@ -909,8 +995,12 @@ static int bcm_enet_open(struct net_device *dev)
909 priv->rx_curr_desc = 0; 995 priv->rx_curr_desc = 0;
910 996
911 /* initialize flow control buffer allocation */ 997 /* initialize flow control buffer allocation */
912 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 998 if (priv->dma_has_sram)
913 ENETDMA_BUFALLOC_REG(priv->rx_chan)); 999 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1000 ENETDMA_BUFALLOC_REG(priv->rx_chan));
1001 else
1002 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1003 ENETDMAC_BUFALLOC, priv->rx_chan);
914 1004
915 if (bcm_enet_refill_rx(dev)) { 1005 if (bcm_enet_refill_rx(dev)) {
916 dev_err(kdev, "cannot allocate rx skb queue\n"); 1006 dev_err(kdev, "cannot allocate rx skb queue\n");
@@ -919,37 +1009,55 @@ static int bcm_enet_open(struct net_device *dev)
919 } 1009 }
920 1010
921 /* write rx & tx ring addresses */ 1011 /* write rx & tx ring addresses */
922 enet_dma_writel(priv, priv->rx_desc_dma, 1012 if (priv->dma_has_sram) {
923 ENETDMA_RSTART_REG(priv->rx_chan)); 1013 enet_dmas_writel(priv, priv->rx_desc_dma,
924 enet_dma_writel(priv, priv->tx_desc_dma, 1014 ENETDMAS_RSTART_REG, priv->rx_chan);
925 ENETDMA_RSTART_REG(priv->tx_chan)); 1015 enet_dmas_writel(priv, priv->tx_desc_dma,
1016 ENETDMAS_RSTART_REG, priv->tx_chan);
1017 } else {
1018 enet_dmac_writel(priv, priv->rx_desc_dma,
1019 ENETDMAC_RSTART, priv->rx_chan);
1020 enet_dmac_writel(priv, priv->tx_desc_dma,
1021 ENETDMAC_RSTART, priv->tx_chan);
1022 }
926 1023
927 /* clear remaining state ram for rx & tx channel */ 1024 /* clear remaining state ram for rx & tx channel */
928 enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); 1025 if (priv->dma_has_sram) {
929 enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); 1026 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
930 enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); 1027 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
931 enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); 1028 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
932 enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); 1029 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
933 enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); 1030 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1031 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1032 } else {
1033 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1034 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1035 }
934 1036
935 /* set max rx/tx length */ 1037 /* set max rx/tx length */
936 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); 1038 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
937 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); 1039 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
938 1040
939 /* set dma maximum burst len */ 1041 /* set dma maximum burst len */
940 enet_dma_writel(priv, BCMENET_DMA_MAXBURST, 1042 enet_dmac_writel(priv, priv->dma_maxburst,
941 ENETDMA_MAXBURST_REG(priv->rx_chan)); 1043 ENETDMAC_MAXBURST, priv->rx_chan);
942 enet_dma_writel(priv, BCMENET_DMA_MAXBURST, 1044 enet_dmac_writel(priv, priv->dma_maxburst,
943 ENETDMA_MAXBURST_REG(priv->tx_chan)); 1045 ENETDMAC_MAXBURST, priv->tx_chan);
944 1046
945 /* set correct transmit fifo watermark */ 1047 /* set correct transmit fifo watermark */
946 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); 1048 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
947 1049
948 /* set flow control low/high threshold to 1/3 / 2/3 */ 1050 /* set flow control low/high threshold to 1/3 / 2/3 */
949 val = priv->rx_ring_size / 3; 1051 if (priv->dma_has_sram) {
950 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 1052 val = priv->rx_ring_size / 3;
951 val = (priv->rx_ring_size * 2) / 3; 1053 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
952 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 1054 val = (priv->rx_ring_size * 2) / 3;
1055 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1056 } else {
1057 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1058 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1059 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1060 }
953 1061
954 /* all set, enable mac and interrupts, start dma engine and 1062 /* all set, enable mac and interrupts, start dma engine and
955 * kick rx dma channel */ 1063 * kick rx dma channel */
@@ -958,26 +1066,26 @@ static int bcm_enet_open(struct net_device *dev)
958 val |= ENET_CTL_ENABLE_MASK; 1066 val |= ENET_CTL_ENABLE_MASK;
959 enet_writel(priv, val, ENET_CTL_REG); 1067 enet_writel(priv, val, ENET_CTL_REG);
960 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 1068 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
961 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, 1069 enet_dmac_writel(priv, priv->dma_chan_en_mask,
962 ENETDMA_CHANCFG_REG(priv->rx_chan)); 1070 ENETDMAC_CHANCFG, priv->rx_chan);
963 1071
964 /* watch "mib counters about to overflow" interrupt */ 1072 /* watch "mib counters about to overflow" interrupt */
965 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 1073 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
966 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1074 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
967 1075
968 /* watch "packet transferred" interrupt in rx and tx */ 1076 /* watch "packet transferred" interrupt in rx and tx */
969 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 1077 enet_dmac_writel(priv, priv->dma_chan_int_mask,
970 ENETDMA_IR_REG(priv->rx_chan)); 1078 ENETDMAC_IR, priv->rx_chan);
971 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 1079 enet_dmac_writel(priv, priv->dma_chan_int_mask,
972 ENETDMA_IR_REG(priv->tx_chan)); 1080 ENETDMAC_IR, priv->tx_chan);
973 1081
974 /* make sure we enable napi before rx interrupt */ 1082 /* make sure we enable napi before rx interrupt */
975 napi_enable(&priv->napi); 1083 napi_enable(&priv->napi);
976 1084
977 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 1085 enet_dmac_writel(priv, priv->dma_chan_int_mask,
978 ENETDMA_IRMASK_REG(priv->rx_chan)); 1086 ENETDMAC_IRMASK, priv->rx_chan);
979 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 1087 enet_dmac_writel(priv, priv->dma_chan_int_mask,
980 ENETDMA_IRMASK_REG(priv->tx_chan)); 1088 ENETDMAC_IRMASK, priv->tx_chan);
981 1089
982 if (priv->has_phy) 1090 if (priv->has_phy)
983 phy_start(priv->phydev); 1091 phy_start(priv->phydev);
@@ -1057,14 +1165,14 @@ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1057{ 1165{
1058 int limit; 1166 int limit;
1059 1167
1060 enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); 1168 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1061 1169
1062 limit = 1000; 1170 limit = 1000;
1063 do { 1171 do {
1064 u32 val; 1172 u32 val;
1065 1173
1066 val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); 1174 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1067 if (!(val & ENETDMA_CHANCFG_EN_MASK)) 1175 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1068 break; 1176 break;
1069 udelay(1); 1177 udelay(1);
1070 } while (limit--); 1178 } while (limit--);
@@ -1090,8 +1198,8 @@ static int bcm_enet_stop(struct net_device *dev)
1090 1198
1091 /* mask all interrupts */ 1199 /* mask all interrupts */
1092 enet_writel(priv, 0, ENET_IRMASK_REG); 1200 enet_writel(priv, 0, ENET_IRMASK_REG);
1093 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); 1201 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1094 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); 1202 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1095 1203
1096 /* make sure no mib update is scheduled */ 1204 /* make sure no mib update is scheduled */
1097 cancel_work_sync(&priv->mib_update_task); 1205 cancel_work_sync(&priv->mib_update_task);
@@ -1328,6 +1436,20 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1328 mutex_unlock(&priv->mib_update_lock); 1436 mutex_unlock(&priv->mib_update_lock);
1329} 1437}
1330 1438
1439static int bcm_enet_nway_reset(struct net_device *dev)
1440{
1441 struct bcm_enet_priv *priv;
1442
1443 priv = netdev_priv(dev);
1444 if (priv->has_phy) {
1445 if (!priv->phydev)
1446 return -ENODEV;
1447 return genphy_restart_aneg(priv->phydev);
1448 }
1449
1450 return -EOPNOTSUPP;
1451}
1452
1331static int bcm_enet_get_settings(struct net_device *dev, 1453static int bcm_enet_get_settings(struct net_device *dev,
1332 struct ethtool_cmd *cmd) 1454 struct ethtool_cmd *cmd)
1333{ 1455{
@@ -1470,6 +1592,7 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = {
1470 .get_strings = bcm_enet_get_strings, 1592 .get_strings = bcm_enet_get_strings,
1471 .get_sset_count = bcm_enet_get_sset_count, 1593 .get_sset_count = bcm_enet_get_sset_count,
1472 .get_ethtool_stats = bcm_enet_get_ethtool_stats, 1594 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1595 .nway_reset = bcm_enet_nway_reset,
1473 .get_settings = bcm_enet_get_settings, 1596 .get_settings = bcm_enet_get_settings,
1474 .set_settings = bcm_enet_set_settings, 1597 .set_settings = bcm_enet_set_settings,
1475 .get_drvinfo = bcm_enet_get_drvinfo, 1598 .get_drvinfo = bcm_enet_get_drvinfo,
@@ -1530,7 +1653,7 @@ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1530 * it's appended 1653 * it's appended
1531 */ 1654 */
1532 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, 1655 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1533 BCMENET_DMA_MAXBURST * 4); 1656 priv->dma_maxburst * 4);
1534 return 0; 1657 return 0;
1535} 1658}
1536 1659
@@ -1621,7 +1744,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
1621 1744
1622 /* stop if shared driver failed, assume driver->probe will be 1745 /* stop if shared driver failed, assume driver->probe will be
1623 * called in the same order we register devices (correct ?) */ 1746 * called in the same order we register devices (correct ?) */
1624 if (!bcm_enet_shared_base) 1747 if (!bcm_enet_shared_base[0])
1625 return -ENODEV; 1748 return -ENODEV;
1626 1749
1627 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1750 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1637,6 +1760,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
1637 return -ENOMEM; 1760 return -ENOMEM;
1638 priv = netdev_priv(dev); 1761 priv = netdev_priv(dev);
1639 1762
1763 priv->enet_is_sw = false;
1764 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1765
1640 ret = compute_hw_mtu(priv, dev->mtu); 1766 ret = compute_hw_mtu(priv, dev->mtu);
1641 if (ret) 1767 if (ret)
1642 goto out; 1768 goto out;
@@ -1687,6 +1813,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
1687 priv->pause_tx = pd->pause_tx; 1813 priv->pause_tx = pd->pause_tx;
1688 priv->force_duplex_full = pd->force_duplex_full; 1814 priv->force_duplex_full = pd->force_duplex_full;
1689 priv->force_speed_100 = pd->force_speed_100; 1815 priv->force_speed_100 = pd->force_speed_100;
1816 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1817 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1818 priv->dma_chan_width = pd->dma_chan_width;
1819 priv->dma_has_sram = pd->dma_has_sram;
1820 priv->dma_desc_shift = pd->dma_desc_shift;
1690 } 1821 }
1691 1822
1692 if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { 1823 if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
@@ -1847,7 +1978,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
1847 clk_disable_unprepare(priv->mac_clk); 1978 clk_disable_unprepare(priv->mac_clk);
1848 clk_put(priv->mac_clk); 1979 clk_put(priv->mac_clk);
1849 1980
1850 platform_set_drvdata(pdev, NULL);
1851 free_netdev(dev); 1981 free_netdev(dev);
1852 return 0; 1982 return 0;
1853} 1983}
@@ -1862,19 +1992,881 @@ struct platform_driver bcm63xx_enet_driver = {
1862}; 1992};
1863 1993
1864/* 1994/*
1865 * reserve & remap memory space shared between all macs 1995 * switch mii access callbacks
1866 */ 1996 */
1867static int bcm_enet_shared_probe(struct platform_device *pdev) 1997static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1998 int ext, int phy_id, int location)
1868{ 1999{
1869 struct resource *res; 2000 u32 reg;
2001 int ret;
1870 2002
1871 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2003 spin_lock_bh(&priv->enetsw_mdio_lock);
1872 if (!res) 2004 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
2005
2006 reg = ENETSW_MDIOC_RD_MASK |
2007 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
2008 (location << ENETSW_MDIOC_REG_SHIFT);
2009
2010 if (ext)
2011 reg |= ENETSW_MDIOC_EXT_MASK;
2012
2013 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2014 udelay(50);
2015 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
2016 spin_unlock_bh(&priv->enetsw_mdio_lock);
2017 return ret;
2018}
2019
2020static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
2021 int ext, int phy_id, int location,
2022 uint16_t data)
2023{
2024 u32 reg;
2025
2026 spin_lock_bh(&priv->enetsw_mdio_lock);
2027 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
2028
2029 reg = ENETSW_MDIOC_WR_MASK |
2030 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
2031 (location << ENETSW_MDIOC_REG_SHIFT);
2032
2033 if (ext)
2034 reg |= ENETSW_MDIOC_EXT_MASK;
2035
2036 reg |= data;
2037
2038 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2039 udelay(50);
2040 spin_unlock_bh(&priv->enetsw_mdio_lock);
2041}
2042
2043static inline int bcm_enet_port_is_rgmii(int portid)
2044{
2045 return portid >= ENETSW_RGMII_PORT0;
2046}
2047
2048/*
2049 * enet sw PHY polling
2050 */
2051static void swphy_poll_timer(unsigned long data)
2052{
2053 struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
2054 unsigned int i;
2055
2056 for (i = 0; i < priv->num_ports; i++) {
2057 struct bcm63xx_enetsw_port *port;
2058 int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
2059 int external_phy = bcm_enet_port_is_rgmii(i);
2060 u8 override;
2061
2062 port = &priv->used_ports[i];
2063 if (!port->used)
2064 continue;
2065
2066 if (port->bypass_link)
2067 continue;
2068
2069 /* dummy read to clear */
2070 for (j = 0; j < 2; j++)
2071 val = bcmenet_sw_mdio_read(priv, external_phy,
2072 port->phy_id, MII_BMSR);
2073
2074 if (val == 0xffff)
2075 continue;
2076
2077 up = (val & BMSR_LSTATUS) ? 1 : 0;
2078 if (!(up ^ priv->sw_port_link[i]))
2079 continue;
2080
2081 priv->sw_port_link[i] = up;
2082
2083 /* link changed */
2084 if (!up) {
2085 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2086 port->name);
2087 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2088 ENETSW_PORTOV_REG(i));
2089 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2090 ENETSW_PTCTRL_TXDIS_MASK,
2091 ENETSW_PTCTRL_REG(i));
2092 continue;
2093 }
2094
2095 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2096 port->phy_id, MII_ADVERTISE);
2097
2098 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2099 MII_LPA);
2100
2101 lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2102 MII_STAT1000);
2103
2104 /* figure out media and duplex from advertise and LPA values */
2105 media = mii_nway_result(lpa & advertise);
2106 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2107 if (lpa2 & LPA_1000FULL)
2108 duplex = 1;
2109
2110 if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
2111 speed = 1000;
2112 else {
2113 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2114 speed = 100;
2115 else
2116 speed = 10;
2117 }
2118
2119 dev_info(&priv->pdev->dev,
2120 "link UP on %s, %dMbps, %s-duplex\n",
2121 port->name, speed, duplex ? "full" : "half");
2122
2123 override = ENETSW_PORTOV_ENABLE_MASK |
2124 ENETSW_PORTOV_LINKUP_MASK;
2125
2126 if (speed == 1000)
2127 override |= ENETSW_IMPOV_1000_MASK;
2128 else if (speed == 100)
2129 override |= ENETSW_IMPOV_100_MASK;
2130 if (duplex)
2131 override |= ENETSW_IMPOV_FDX_MASK;
2132
2133 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2134 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2135 }
2136
2137 priv->swphy_poll.expires = jiffies + HZ;
2138 add_timer(&priv->swphy_poll);
2139}
2140
2141/*
2142 * open callback, allocate dma rings & buffers and start rx operation
2143 */
2144static int bcm_enetsw_open(struct net_device *dev)
2145{
2146 struct bcm_enet_priv *priv;
2147 struct device *kdev;
2148 int i, ret;
2149 unsigned int size;
2150 void *p;
2151 u32 val;
2152
2153 priv = netdev_priv(dev);
2154 kdev = &priv->pdev->dev;
2155
2156 /* mask all interrupts and request them */
2157 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2158 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2159
2160 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2161 IRQF_DISABLED, dev->name, dev);
2162 if (ret)
2163 goto out_freeirq;
2164
2165 if (priv->irq_tx != -1) {
2166 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2167 IRQF_DISABLED, dev->name, dev);
2168 if (ret)
2169 goto out_freeirq_rx;
2170 }
2171
2172 /* allocate rx dma ring */
2173 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2174 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2175 if (!p) {
2176 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2177 ret = -ENOMEM;
2178 goto out_freeirq_tx;
2179 }
2180
2181 memset(p, 0, size);
2182 priv->rx_desc_alloc_size = size;
2183 priv->rx_desc_cpu = p;
2184
2185 /* allocate tx dma ring */
2186 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2187 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2188 if (!p) {
2189 dev_err(kdev, "cannot allocate tx ring\n");
2190 ret = -ENOMEM;
2191 goto out_free_rx_ring;
2192 }
2193
2194 memset(p, 0, size);
2195 priv->tx_desc_alloc_size = size;
2196 priv->tx_desc_cpu = p;
2197
2198 priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
2199 GFP_KERNEL);
2200 if (!priv->tx_skb) {
2201 dev_err(kdev, "cannot allocate rx skb queue\n");
2202 ret = -ENOMEM;
2203 goto out_free_tx_ring;
2204 }
2205
2206 priv->tx_desc_count = priv->tx_ring_size;
2207 priv->tx_dirty_desc = 0;
2208 priv->tx_curr_desc = 0;
2209 spin_lock_init(&priv->tx_lock);
2210
2211 /* init & fill rx ring with skbs */
2212 priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
2213 GFP_KERNEL);
2214 if (!priv->rx_skb) {
2215 dev_err(kdev, "cannot allocate rx skb queue\n");
2216 ret = -ENOMEM;
2217 goto out_free_tx_skb;
2218 }
2219
2220 priv->rx_desc_count = 0;
2221 priv->rx_dirty_desc = 0;
2222 priv->rx_curr_desc = 0;
2223
2224 /* disable all ports */
2225 for (i = 0; i < priv->num_ports; i++) {
2226 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2227 ENETSW_PORTOV_REG(i));
2228 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2229 ENETSW_PTCTRL_TXDIS_MASK,
2230 ENETSW_PTCTRL_REG(i));
2231
2232 priv->sw_port_link[i] = 0;
2233 }
2234
2235 /* reset mib */
2236 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2237 val |= ENETSW_GMCR_RST_MIB_MASK;
2238 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2239 mdelay(1);
2240 val &= ~ENETSW_GMCR_RST_MIB_MASK;
2241 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2242 mdelay(1);
2243
2244 /* force CPU port state */
2245 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2246 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2247 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2248
2249 /* enable switch forward engine */
2250 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2251 val |= ENETSW_SWMODE_FWD_EN_MASK;
2252 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2253
2254 /* enable jumbo on all ports */
2255 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2256 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2257
2258 /* initialize flow control buffer allocation */
2259 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2260 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2261
2262 if (bcm_enet_refill_rx(dev)) {
2263 dev_err(kdev, "cannot allocate rx skb queue\n");
2264 ret = -ENOMEM;
2265 goto out;
2266 }
2267
2268 /* write rx & tx ring addresses */
2269 enet_dmas_writel(priv, priv->rx_desc_dma,
2270 ENETDMAS_RSTART_REG, priv->rx_chan);
2271 enet_dmas_writel(priv, priv->tx_desc_dma,
2272 ENETDMAS_RSTART_REG, priv->tx_chan);
2273
2274 /* clear remaining state ram for rx & tx channel */
2275 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2276 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2277 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2278 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2279 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2280 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2281
2282 /* set dma maximum burst len */
2283 enet_dmac_writel(priv, priv->dma_maxburst,
2284 ENETDMAC_MAXBURST, priv->rx_chan);
2285 enet_dmac_writel(priv, priv->dma_maxburst,
2286 ENETDMAC_MAXBURST, priv->tx_chan);
2287
2288 /* set flow control low/high threshold to 1/3 / 2/3 */
2289 val = priv->rx_ring_size / 3;
2290 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2291 val = (priv->rx_ring_size * 2) / 3;
2292 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2293
2294 /* all set, enable mac and interrupts, start dma engine and
2295 * kick rx dma channel
2296 */
2297 wmb();
2298 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2299 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2300 ENETDMAC_CHANCFG, priv->rx_chan);
2301
2302 /* watch "packet transferred" interrupt in rx and tx */
2303 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2304 ENETDMAC_IR, priv->rx_chan);
2305 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2306 ENETDMAC_IR, priv->tx_chan);
2307
2308 /* make sure we enable napi before rx interrupt */
2309 napi_enable(&priv->napi);
2310
2311 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2312 ENETDMAC_IRMASK, priv->rx_chan);
2313 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2314 ENETDMAC_IRMASK, priv->tx_chan);
2315
2316 netif_carrier_on(dev);
2317 netif_start_queue(dev);
2318
2319 /* apply override config for bypass_link ports here. */
2320 for (i = 0; i < priv->num_ports; i++) {
2321 struct bcm63xx_enetsw_port *port;
2322 u8 override;
2323 port = &priv->used_ports[i];
2324 if (!port->used)
2325 continue;
2326
2327 if (!port->bypass_link)
2328 continue;
2329
2330 override = ENETSW_PORTOV_ENABLE_MASK |
2331 ENETSW_PORTOV_LINKUP_MASK;
2332
2333 switch (port->force_speed) {
2334 case 1000:
2335 override |= ENETSW_IMPOV_1000_MASK;
2336 break;
2337 case 100:
2338 override |= ENETSW_IMPOV_100_MASK;
2339 break;
2340 case 10:
2341 break;
2342 default:
2343 pr_warn("invalid forced speed on port %s: assume 10\n",
2344 port->name);
2345 break;
2346 }
2347
2348 if (port->force_duplex_full)
2349 override |= ENETSW_IMPOV_FDX_MASK;
2350
2351
2352 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2353 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2354 }
2355
2356 /* start phy polling timer */
2357 init_timer(&priv->swphy_poll);
2358 priv->swphy_poll.function = swphy_poll_timer;
2359 priv->swphy_poll.data = (unsigned long)priv;
2360 priv->swphy_poll.expires = jiffies;
2361 add_timer(&priv->swphy_poll);
2362 return 0;
2363
2364out:
2365 for (i = 0; i < priv->rx_ring_size; i++) {
2366 struct bcm_enet_desc *desc;
2367
2368 if (!priv->rx_skb[i])
2369 continue;
2370
2371 desc = &priv->rx_desc_cpu[i];
2372 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2373 DMA_FROM_DEVICE);
2374 kfree_skb(priv->rx_skb[i]);
2375 }
2376 kfree(priv->rx_skb);
2377
2378out_free_tx_skb:
2379 kfree(priv->tx_skb);
2380
2381out_free_tx_ring:
2382 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2383 priv->tx_desc_cpu, priv->tx_desc_dma);
2384
2385out_free_rx_ring:
2386 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2387 priv->rx_desc_cpu, priv->rx_desc_dma);
2388
2389out_freeirq_tx:
2390 if (priv->irq_tx != -1)
2391 free_irq(priv->irq_tx, dev);
2392
2393out_freeirq_rx:
2394 free_irq(priv->irq_rx, dev);
2395
2396out_freeirq:
2397 return ret;
2398}
2399
2400/* stop callback */
2401static int bcm_enetsw_stop(struct net_device *dev)
2402{
2403 struct bcm_enet_priv *priv;
2404 struct device *kdev;
2405 int i;
2406
2407 priv = netdev_priv(dev);
2408 kdev = &priv->pdev->dev;
2409
2410 del_timer_sync(&priv->swphy_poll);
2411 netif_stop_queue(dev);
2412 napi_disable(&priv->napi);
2413 del_timer_sync(&priv->rx_timeout);
2414
2415 /* mask all interrupts */
2416 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2417 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2418
2419 /* disable dma & mac */
2420 bcm_enet_disable_dma(priv, priv->tx_chan);
2421 bcm_enet_disable_dma(priv, priv->rx_chan);
2422
2423 /* force reclaim of all tx buffers */
2424 bcm_enet_tx_reclaim(dev, 1);
2425
2426 /* free the rx skb ring */
2427 for (i = 0; i < priv->rx_ring_size; i++) {
2428 struct bcm_enet_desc *desc;
2429
2430 if (!priv->rx_skb[i])
2431 continue;
2432
2433 desc = &priv->rx_desc_cpu[i];
2434 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2435 DMA_FROM_DEVICE);
2436 kfree_skb(priv->rx_skb[i]);
2437 }
2438
2439 /* free remaining allocated memory */
2440 kfree(priv->rx_skb);
2441 kfree(priv->tx_skb);
2442 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2443 priv->rx_desc_cpu, priv->rx_desc_dma);
2444 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2445 priv->tx_desc_cpu, priv->tx_desc_dma);
2446 if (priv->irq_tx != -1)
2447 free_irq(priv->irq_tx, dev);
2448 free_irq(priv->irq_rx, dev);
2449
2450 return 0;
2451}
2452
2453/* try to sort out phy external status by walking the used_port field
2454 * in the bcm_enet_priv structure. in case the phy address is not
2455 * assigned to any physical port on the switch, assume it is external
2456 * (and yell at the user).
2457 */
2458static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2459{
2460 int i;
2461
2462 for (i = 0; i < priv->num_ports; ++i) {
2463 if (!priv->used_ports[i].used)
2464 continue;
2465 if (priv->used_ports[i].phy_id == phy_id)
2466 return bcm_enet_port_is_rgmii(i);
2467 }
2468
2469 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2470 phy_id);
2471 return 1;
2472}
2473
2474/* can't use bcmenet_sw_mdio_read directly as we need to sort out
2475 * external/internal status of the given phy_id first.
2476 */
2477static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2478 int location)
2479{
2480 struct bcm_enet_priv *priv;
2481
2482 priv = netdev_priv(dev);
2483 return bcmenet_sw_mdio_read(priv,
2484 bcm_enetsw_phy_is_external(priv, phy_id),
2485 phy_id, location);
2486}
2487
2488/* can't use bcmenet_sw_mdio_write directly as we need to sort out
2489 * external/internal status of the given phy_id first.
2490 */
2491static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2492 int location,
2493 int val)
2494{
2495 struct bcm_enet_priv *priv;
2496
2497 priv = netdev_priv(dev);
2498 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2499 phy_id, location, val);
2500}
2501
2502static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2503{
2504 struct mii_if_info mii;
2505
2506 mii.dev = dev;
2507 mii.mdio_read = bcm_enetsw_mii_mdio_read;
2508 mii.mdio_write = bcm_enetsw_mii_mdio_write;
2509 mii.phy_id = 0;
2510 mii.phy_id_mask = 0x3f;
2511 mii.reg_num_mask = 0x1f;
2512 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2513
2514}
2515
2516static const struct net_device_ops bcm_enetsw_ops = {
2517 .ndo_open = bcm_enetsw_open,
2518 .ndo_stop = bcm_enetsw_stop,
2519 .ndo_start_xmit = bcm_enet_start_xmit,
2520 .ndo_change_mtu = bcm_enet_change_mtu,
2521 .ndo_do_ioctl = bcm_enetsw_ioctl,
2522};
2523
2524
2525static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2526 { "rx_packets", DEV_STAT(rx_packets), -1 },
2527 { "tx_packets", DEV_STAT(tx_packets), -1 },
2528 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2529 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2530 { "rx_errors", DEV_STAT(rx_errors), -1 },
2531 { "tx_errors", DEV_STAT(tx_errors), -1 },
2532 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2533 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2534
2535 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2536 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2537 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2538 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2539 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2540 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2541 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2542 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2543 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2544 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2545 ETHSW_MIB_RX_1024_1522 },
2546 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2547 ETHSW_MIB_RX_1523_2047 },
2548 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2549 ETHSW_MIB_RX_2048_4095 },
2550 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2551 ETHSW_MIB_RX_4096_8191 },
2552 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2553 ETHSW_MIB_RX_8192_9728 },
2554 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2555 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2556 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2557 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2558 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2559
2560 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2561 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2562 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2563 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2564 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2565 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2566
2567};
2568
2569#define BCM_ENETSW_STATS_LEN \
2570 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2571
2572static void bcm_enetsw_get_strings(struct net_device *netdev,
2573 u32 stringset, u8 *data)
2574{
2575 int i;
2576
2577 switch (stringset) {
2578 case ETH_SS_STATS:
2579 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2580 memcpy(data + i * ETH_GSTRING_LEN,
2581 bcm_enetsw_gstrings_stats[i].stat_string,
2582 ETH_GSTRING_LEN);
2583 }
2584 break;
2585 }
2586}
2587
2588static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2589 int string_set)
2590{
2591 switch (string_set) {
2592 case ETH_SS_STATS:
2593 return BCM_ENETSW_STATS_LEN;
2594 default:
2595 return -EINVAL;
2596 }
2597}
2598
2599static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2600 struct ethtool_drvinfo *drvinfo)
2601{
2602 strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
2603 strncpy(drvinfo->version, bcm_enet_driver_version, 32);
2604 strncpy(drvinfo->fw_version, "N/A", 32);
2605 strncpy(drvinfo->bus_info, "bcm63xx", 32);
2606 drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
2607}
2608
2609static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2610 struct ethtool_stats *stats,
2611 u64 *data)
2612{
2613 struct bcm_enet_priv *priv;
2614 int i;
2615
2616 priv = netdev_priv(netdev);
2617
2618 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2619 const struct bcm_enet_stats *s;
2620 u32 lo, hi;
2621 char *p;
2622 int reg;
2623
2624 s = &bcm_enetsw_gstrings_stats[i];
2625
2626 reg = s->mib_reg;
2627 if (reg == -1)
2628 continue;
2629
2630 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2631 p = (char *)priv + s->stat_offset;
2632
2633 if (s->sizeof_stat == sizeof(u64)) {
2634 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2635 *(u64 *)p = ((u64)hi << 32 | lo);
2636 } else {
2637 *(u32 *)p = lo;
2638 }
2639 }
2640
2641 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2642 const struct bcm_enet_stats *s;
2643 char *p;
2644
2645 s = &bcm_enetsw_gstrings_stats[i];
2646
2647 if (s->mib_reg == -1)
2648 p = (char *)&netdev->stats + s->stat_offset;
2649 else
2650 p = (char *)priv + s->stat_offset;
2651
2652 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2653 *(u64 *)p : *(u32 *)p;
2654 }
2655}
2656
2657static void bcm_enetsw_get_ringparam(struct net_device *dev,
2658 struct ethtool_ringparam *ering)
2659{
2660 struct bcm_enet_priv *priv;
2661
2662 priv = netdev_priv(dev);
2663
2664 /* rx/tx ring is actually only limited by memory */
2665 ering->rx_max_pending = 8192;
2666 ering->tx_max_pending = 8192;
2667 ering->rx_mini_max_pending = 0;
2668 ering->rx_jumbo_max_pending = 0;
2669 ering->rx_pending = priv->rx_ring_size;
2670 ering->tx_pending = priv->tx_ring_size;
2671}
2672
2673static int bcm_enetsw_set_ringparam(struct net_device *dev,
2674 struct ethtool_ringparam *ering)
2675{
2676 struct bcm_enet_priv *priv;
2677 int was_running;
2678
2679 priv = netdev_priv(dev);
2680
2681 was_running = 0;
2682 if (netif_running(dev)) {
2683 bcm_enetsw_stop(dev);
2684 was_running = 1;
2685 }
2686
2687 priv->rx_ring_size = ering->rx_pending;
2688 priv->tx_ring_size = ering->tx_pending;
2689
2690 if (was_running) {
2691 int err;
2692
2693 err = bcm_enetsw_open(dev);
2694 if (err)
2695 dev_close(dev);
2696 }
2697 return 0;
2698}
2699
2700static struct ethtool_ops bcm_enetsw_ethtool_ops = {
2701 .get_strings = bcm_enetsw_get_strings,
2702 .get_sset_count = bcm_enetsw_get_sset_count,
2703 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
2704 .get_drvinfo = bcm_enetsw_get_drvinfo,
2705 .get_ringparam = bcm_enetsw_get_ringparam,
2706 .set_ringparam = bcm_enetsw_set_ringparam,
2707};
2708
2709/* allocate netdevice, request register memory and register device. */
2710static int bcm_enetsw_probe(struct platform_device *pdev)
2711{
2712 struct bcm_enet_priv *priv;
2713 struct net_device *dev;
2714 struct bcm63xx_enetsw_platform_data *pd;
2715 struct resource *res_mem;
2716 int ret, irq_rx, irq_tx;
2717
2718 /* stop if shared driver failed, assume driver->probe will be
2719 * called in the same order we register devices (correct ?)
2720 */
2721 if (!bcm_enet_shared_base[0])
2722 return -ENODEV;
2723
2724 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2725 irq_rx = platform_get_irq(pdev, 0);
2726 irq_tx = platform_get_irq(pdev, 1);
2727 if (!res_mem || irq_rx < 0)
1873 return -ENODEV; 2728 return -ENODEV;
1874 2729
1875 bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res); 2730 ret = 0;
1876 if (!bcm_enet_shared_base) 2731 dev = alloc_etherdev(sizeof(*priv));
2732 if (!dev)
1877 return -ENOMEM; 2733 return -ENOMEM;
2734 priv = netdev_priv(dev);
2735 memset(priv, 0, sizeof(*priv));
2736
2737 /* initialize default and fetch platform data */
2738 priv->enet_is_sw = true;
2739 priv->irq_rx = irq_rx;
2740 priv->irq_tx = irq_tx;
2741 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2742 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2743 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2744
2745 pd = pdev->dev.platform_data;
2746 if (pd) {
2747 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2748 memcpy(priv->used_ports, pd->used_ports,
2749 sizeof(pd->used_ports));
2750 priv->num_ports = pd->num_ports;
2751 priv->dma_has_sram = pd->dma_has_sram;
2752 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2753 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2754 priv->dma_chan_width = pd->dma_chan_width;
2755 }
2756
2757 ret = compute_hw_mtu(priv, dev->mtu);
2758 if (ret)
2759 goto out;
2760
2761 if (!request_mem_region(res_mem->start, resource_size(res_mem),
2762 "bcm63xx_enetsw")) {
2763 ret = -EBUSY;
2764 goto out;
2765 }
2766
2767 priv->base = ioremap(res_mem->start, resource_size(res_mem));
2768 if (priv->base == NULL) {
2769 ret = -ENOMEM;
2770 goto out_release_mem;
2771 }
2772
2773 priv->mac_clk = clk_get(&pdev->dev, "enetsw");
2774 if (IS_ERR(priv->mac_clk)) {
2775 ret = PTR_ERR(priv->mac_clk);
2776 goto out_unmap;
2777 }
2778 clk_enable(priv->mac_clk);
2779
2780 priv->rx_chan = 0;
2781 priv->tx_chan = 1;
2782 spin_lock_init(&priv->rx_lock);
2783
2784 /* init rx timeout (used for oom) */
2785 init_timer(&priv->rx_timeout);
2786 priv->rx_timeout.function = bcm_enet_refill_rx_timer;
2787 priv->rx_timeout.data = (unsigned long)dev;
2788
2789 /* register netdevice */
2790 dev->netdev_ops = &bcm_enetsw_ops;
2791 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2792 SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
2793 SET_NETDEV_DEV(dev, &pdev->dev);
2794
2795 spin_lock_init(&priv->enetsw_mdio_lock);
2796
2797 ret = register_netdev(dev);
2798 if (ret)
2799 goto out_put_clk;
2800
2801 netif_carrier_off(dev);
2802 platform_set_drvdata(pdev, dev);
2803 priv->pdev = pdev;
2804 priv->net_dev = dev;
2805
2806 return 0;
2807
2808out_put_clk:
2809 clk_put(priv->mac_clk);
2810
2811out_unmap:
2812 iounmap(priv->base);
2813
2814out_release_mem:
2815 release_mem_region(res_mem->start, resource_size(res_mem));
2816out:
2817 free_netdev(dev);
2818 return ret;
2819}
2820
2821
2822/* exit func, stops hardware and unregisters netdevice */
2823static int bcm_enetsw_remove(struct platform_device *pdev)
2824{
2825 struct bcm_enet_priv *priv;
2826 struct net_device *dev;
2827 struct resource *res;
2828
2829 /* stop netdevice */
2830 dev = platform_get_drvdata(pdev);
2831 priv = netdev_priv(dev);
2832 unregister_netdev(dev);
2833
2834 /* release device resources */
2835 iounmap(priv->base);
2836 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2837 release_mem_region(res->start, resource_size(res));
2838
2839 platform_set_drvdata(pdev, NULL);
2840 free_netdev(dev);
2841 return 0;
2842}
2843
2844struct platform_driver bcm63xx_enetsw_driver = {
2845 .probe = bcm_enetsw_probe,
2846 .remove = bcm_enetsw_remove,
2847 .driver = {
2848 .name = "bcm63xx_enetsw",
2849 .owner = THIS_MODULE,
2850 },
2851};
2852
2853/* reserve & remap memory space shared between all macs */
2854static int bcm_enet_shared_probe(struct platform_device *pdev)
2855{
2856 struct resource *res;
2857 void __iomem *p[3];
2858 unsigned int i;
2859
2860 memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2861
2862 for (i = 0; i < 3; i++) {
2863 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2864 p[i] = devm_ioremap_resource(&pdev->dev, res);
2865 if (IS_ERR(p[i]))
2866 return PTR_ERR(p[i]);
2867 }
2868
2869 memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
1878 2870
1879 return 0; 2871 return 0;
1880} 2872}
@@ -1884,8 +2876,7 @@ static int bcm_enet_shared_remove(struct platform_device *pdev)
1884 return 0; 2876 return 0;
1885} 2877}
1886 2878
1887/* 2879/* this "shared" driver is needed because both macs share a single
1888 * this "shared" driver is needed because both macs share a single
1889 * address space 2880 * address space
1890 */ 2881 */
1891struct platform_driver bcm63xx_enet_shared_driver = { 2882struct platform_driver bcm63xx_enet_shared_driver = {
@@ -1897,9 +2888,7 @@ struct platform_driver bcm63xx_enet_shared_driver = {
1897 }, 2888 },
1898}; 2889};
1899 2890
1900/* 2891/* entry point */
1901 * entry point
1902 */
1903static int __init bcm_enet_init(void) 2892static int __init bcm_enet_init(void)
1904{ 2893{
1905 int ret; 2894 int ret;
@@ -1912,12 +2901,19 @@ static int __init bcm_enet_init(void)
1912 if (ret) 2901 if (ret)
1913 platform_driver_unregister(&bcm63xx_enet_shared_driver); 2902 platform_driver_unregister(&bcm63xx_enet_shared_driver);
1914 2903
2904 ret = platform_driver_register(&bcm63xx_enetsw_driver);
2905 if (ret) {
2906 platform_driver_unregister(&bcm63xx_enet_driver);
2907 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2908 }
2909
1915 return ret; 2910 return ret;
1916} 2911}
1917 2912
1918static void __exit bcm_enet_exit(void) 2913static void __exit bcm_enet_exit(void)
1919{ 2914{
1920 platform_driver_unregister(&bcm63xx_enet_driver); 2915 platform_driver_unregister(&bcm63xx_enet_driver);
2916 platform_driver_unregister(&bcm63xx_enetsw_driver);
1921 platform_driver_unregister(&bcm63xx_enet_shared_driver); 2917 platform_driver_unregister(&bcm63xx_enet_shared_driver);
1922} 2918}
1923 2919
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 133d5857b9e2..f55af4310085 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -18,6 +18,7 @@
18 18
19/* maximum burst len for dma (4 bytes unit) */ 19/* maximum burst len for dma (4 bytes unit) */
20#define BCMENET_DMA_MAXBURST 16 20#define BCMENET_DMA_MAXBURST 16
21#define BCMENETSW_DMA_MAXBURST 8
21 22
22/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value 23/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
23 * must be low enough so that a DMA transfer of above burst length can 24 * must be low enough so that a DMA transfer of above burst length can
@@ -84,11 +85,60 @@
84#define ETH_MIB_RX_CNTRL 54 85#define ETH_MIB_RX_CNTRL 54
85 86
86 87
88/*
89 * SW MIB Counters register definitions
90*/
91#define ETHSW_MIB_TX_ALL_OCT 0
92#define ETHSW_MIB_TX_DROP_PKTS 2
93#define ETHSW_MIB_TX_QOS_PKTS 3
94#define ETHSW_MIB_TX_BRDCAST 4
95#define ETHSW_MIB_TX_MULT 5
96#define ETHSW_MIB_TX_UNI 6
97#define ETHSW_MIB_TX_COL 7
98#define ETHSW_MIB_TX_1_COL 8
99#define ETHSW_MIB_TX_M_COL 9
100#define ETHSW_MIB_TX_DEF 10
101#define ETHSW_MIB_TX_LATE 11
102#define ETHSW_MIB_TX_EX_COL 12
103#define ETHSW_MIB_TX_PAUSE 14
104#define ETHSW_MIB_TX_QOS_OCT 15
105
106#define ETHSW_MIB_RX_ALL_OCT 17
107#define ETHSW_MIB_RX_UND 19
108#define ETHSW_MIB_RX_PAUSE 20
109#define ETHSW_MIB_RX_64 21
110#define ETHSW_MIB_RX_65_127 22
111#define ETHSW_MIB_RX_128_255 23
112#define ETHSW_MIB_RX_256_511 24
113#define ETHSW_MIB_RX_512_1023 25
114#define ETHSW_MIB_RX_1024_1522 26
115#define ETHSW_MIB_RX_OVR 27
116#define ETHSW_MIB_RX_JAB 28
117#define ETHSW_MIB_RX_ALIGN 29
118#define ETHSW_MIB_RX_CRC 30
119#define ETHSW_MIB_RX_GD_OCT 31
120#define ETHSW_MIB_RX_DROP 33
121#define ETHSW_MIB_RX_UNI 34
122#define ETHSW_MIB_RX_MULT 35
123#define ETHSW_MIB_RX_BRDCAST 36
124#define ETHSW_MIB_RX_SA_CHANGE 37
125#define ETHSW_MIB_RX_FRAG 38
126#define ETHSW_MIB_RX_OVR_DISC 39
127#define ETHSW_MIB_RX_SYM 40
128#define ETHSW_MIB_RX_QOS_PKTS 41
129#define ETHSW_MIB_RX_QOS_OCT 42
130#define ETHSW_MIB_RX_1523_2047 44
131#define ETHSW_MIB_RX_2048_4095 45
132#define ETHSW_MIB_RX_4096_8191 46
133#define ETHSW_MIB_RX_8192_9728 47
134
135
87struct bcm_enet_mib_counters { 136struct bcm_enet_mib_counters {
88 u64 tx_gd_octets; 137 u64 tx_gd_octets;
89 u32 tx_gd_pkts; 138 u32 tx_gd_pkts;
90 u32 tx_all_octets; 139 u32 tx_all_octets;
91 u32 tx_all_pkts; 140 u32 tx_all_pkts;
141 u32 tx_unicast;
92 u32 tx_brdcast; 142 u32 tx_brdcast;
93 u32 tx_mult; 143 u32 tx_mult;
94 u32 tx_64; 144 u32 tx_64;
@@ -97,7 +147,12 @@ struct bcm_enet_mib_counters {
97 u32 tx_256_511; 147 u32 tx_256_511;
98 u32 tx_512_1023; 148 u32 tx_512_1023;
99 u32 tx_1024_max; 149 u32 tx_1024_max;
150 u32 tx_1523_2047;
151 u32 tx_2048_4095;
152 u32 tx_4096_8191;
153 u32 tx_8192_9728;
100 u32 tx_jab; 154 u32 tx_jab;
155 u32 tx_drop;
101 u32 tx_ovr; 156 u32 tx_ovr;
102 u32 tx_frag; 157 u32 tx_frag;
103 u32 tx_underrun; 158 u32 tx_underrun;
@@ -114,6 +169,7 @@ struct bcm_enet_mib_counters {
114 u32 rx_all_octets; 169 u32 rx_all_octets;
115 u32 rx_all_pkts; 170 u32 rx_all_pkts;
116 u32 rx_brdcast; 171 u32 rx_brdcast;
172 u32 rx_unicast;
117 u32 rx_mult; 173 u32 rx_mult;
118 u32 rx_64; 174 u32 rx_64;
119 u32 rx_65_127; 175 u32 rx_65_127;
@@ -197,6 +253,9 @@ struct bcm_enet_priv {
197 /* number of dma desc in tx ring */ 253 /* number of dma desc in tx ring */
198 int tx_ring_size; 254 int tx_ring_size;
199 255
256 /* maximum dma burst size */
257 int dma_maxburst;
258
200 /* cpu view of rx dma ring */ 259 /* cpu view of rx dma ring */
201 struct bcm_enet_desc *tx_desc_cpu; 260 struct bcm_enet_desc *tx_desc_cpu;
202 261
@@ -269,6 +328,33 @@ struct bcm_enet_priv {
269 328
270 /* maximum hardware transmit/receive size */ 329 /* maximum hardware transmit/receive size */
271 unsigned int hw_mtu; 330 unsigned int hw_mtu;
331
332 bool enet_is_sw;
333
334 /* port mapping for switch devices */
335 int num_ports;
336 struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT];
337 int sw_port_link[ENETSW_MAX_PORT];
338
339 /* used to poll switch port state */
340 struct timer_list swphy_poll;
341 spinlock_t enetsw_mdio_lock;
342
343 /* dma channel enable mask */
344 u32 dma_chan_en_mask;
345
346 /* dma channel interrupt mask */
347 u32 dma_chan_int_mask;
348
349 /* DMA engine has internal SRAM */
350 bool dma_has_sram;
351
352 /* dma channel width */
353 unsigned int dma_chan_width;
354
355 /* dma descriptor shift value */
356 unsigned int dma_desc_shift;
272}; 357};
273 358
359
274#endif /* ! BCM63XX_ENET_H_ */ 360#endif /* ! BCM63XX_ENET_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 5d204492c603..6a2de1d79ff6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8104,7 +8104,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8104 8104
8105 pci_set_master(pdev); 8105 pci_set_master(pdev);
8106 8106
8107 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 8107 bp->pm_cap = pdev->pm_cap;
8108 if (bp->pm_cap == 0) { 8108 if (bp->pm_cap == 0) {
8109 dev_err(&pdev->dev, 8109 dev_err(&pdev->dev,
8110 "Cannot find power management capability, aborting\n"); 8110 "Cannot find power management capability, aborting\n");
@@ -8764,18 +8764,4 @@ static struct pci_driver bnx2_pci_driver = {
8764 .err_handler = &bnx2_err_handler, 8764 .err_handler = &bnx2_err_handler,
8765}; 8765};
8766 8766
8767static int __init bnx2_init(void) 8767module_pci_driver(bnx2_pci_driver);
8768{
8769 return pci_register_driver(&bnx2_pci_driver);
8770}
8771
8772static void __exit bnx2_cleanup(void)
8773{
8774 pci_unregister_driver(&bnx2_pci_driver);
8775}
8776
8777module_init(bnx2_init);
8778module_exit(bnx2_cleanup);
8779
8780
8781
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 3dba2a70a00e..dedbd76c033e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -34,12 +34,10 @@
34#define BCM_DCBNL 34#define BCM_DCBNL
35#endif 35#endif
36 36
37
38#include "bnx2x_hsi.h" 37#include "bnx2x_hsi.h"
39 38
40#include "../cnic_if.h" 39#include "../cnic_if.h"
41 40
42
43#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt) 41#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
44 42
45#include <linux/mdio.h> 43#include <linux/mdio.h>
@@ -114,7 +112,6 @@ do { \
114#define BNX2X_ERROR(fmt, ...) \ 112#define BNX2X_ERROR(fmt, ...) \
115 pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) 113 pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
116 114
117
118/* before we have a dev->name use dev_info() */ 115/* before we have a dev->name use dev_info() */
119#define BNX2X_DEV_INFO(fmt, ...) \ 116#define BNX2X_DEV_INFO(fmt, ...) \
120do { \ 117do { \
@@ -147,7 +144,6 @@ do { \
147#define U64_HI(x) ((u32)(((u64)(x)) >> 32)) 144#define U64_HI(x) ((u32)(((u64)(x)) >> 32))
148#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 145#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
149 146
150
151#define REG_ADDR(bp, offset) ((bp->regview) + (offset)) 147#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
152 148
153#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) 149#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
@@ -366,7 +362,7 @@ union db_prod {
366/* 362/*
367 * Number of required SGEs is the sum of two: 363 * Number of required SGEs is the sum of two:
368 * 1. Number of possible opened aggregations (next packet for 364 * 1. Number of possible opened aggregations (next packet for
369 * these aggregations will probably consume SGE immidiatelly) 365 * these aggregations will probably consume SGE immediately)
370 * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only 366 * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
371 * after placement on BD for new TPA aggregation) 367 * after placement on BD for new TPA aggregation)
372 * 368 *
@@ -387,7 +383,6 @@ union db_prod {
387#define BIT_VEC64_ELEM_SHIFT 6 383#define BIT_VEC64_ELEM_SHIFT 6
388#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) 384#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
389 385
390
391#define __BIT_VEC64_SET_BIT(el, bit) \ 386#define __BIT_VEC64_SET_BIT(el, bit) \
392 do { \ 387 do { \
393 el = ((el) | ((u64)0x1 << (bit))); \ 388 el = ((el) | ((u64)0x1 << (bit))); \
@@ -398,7 +393,6 @@ union db_prod {
398 el = ((el) & (~((u64)0x1 << (bit)))); \ 393 el = ((el) & (~((u64)0x1 << (bit)))); \
399 } while (0) 394 } while (0)
400 395
401
402#define BIT_VEC64_SET_BIT(vec64, idx) \ 396#define BIT_VEC64_SET_BIT(vec64, idx) \
403 __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ 397 __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
404 (idx) & BIT_VEC64_ELEM_MASK) 398 (idx) & BIT_VEC64_ELEM_MASK)
@@ -419,8 +413,6 @@ union db_prod {
419 413
420/*******************************************************/ 414/*******************************************************/
421 415
422
423
424/* Number of u64 elements in SGE mask array */ 416/* Number of u64 elements in SGE mask array */
425#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) 417#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
426#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) 418#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
@@ -493,11 +485,26 @@ struct bnx2x_fastpath {
493 struct bnx2x *bp; /* parent */ 485 struct bnx2x *bp; /* parent */
494 486
495 struct napi_struct napi; 487 struct napi_struct napi;
488
489#ifdef CONFIG_NET_LL_RX_POLL
490 unsigned int state;
491#define BNX2X_FP_STATE_IDLE 0
492#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
493#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
494#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */
495#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */
496#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
497#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
498#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
499 /* protect state */
500 spinlock_t lock;
501#endif /* CONFIG_NET_LL_RX_POLL */
502
496 union host_hc_status_block status_blk; 503 union host_hc_status_block status_blk;
497 /* chip independed shortcuts into sb structure */ 504 /* chip independent shortcuts into sb structure */
498 __le16 *sb_index_values; 505 __le16 *sb_index_values;
499 __le16 *sb_running_index; 506 __le16 *sb_running_index;
500 /* chip independed shortcut into rx_prods_offset memory */ 507 /* chip independent shortcut into rx_prods_offset memory */
501 u32 ustorm_rx_prods_offset; 508 u32 ustorm_rx_prods_offset;
502 509
503 u32 rx_buf_size; 510 u32 rx_buf_size;
@@ -565,6 +572,116 @@ struct bnx2x_fastpath {
565#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) 572#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
566#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) 573#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
567 574
575#ifdef CONFIG_NET_LL_RX_POLL
576static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
577{
578 spin_lock_init(&fp->lock);
579 fp->state = BNX2X_FP_STATE_IDLE;
580}
581
582/* called from the device poll routine to get ownership of a FP */
583static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
584{
585 bool rc = true;
586
587 spin_lock(&fp->lock);
588 if (fp->state & BNX2X_FP_LOCKED) {
589 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
590 fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
591 rc = false;
592 } else {
593 /* we don't care if someone yielded */
594 fp->state = BNX2X_FP_STATE_NAPI;
595 }
596 spin_unlock(&fp->lock);
597 return rc;
598}
599
600/* returns true is someone tried to get the FP while napi had it */
601static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
602{
603 bool rc = false;
604
605 spin_lock(&fp->lock);
606 WARN_ON(fp->state &
607 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
608
609 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
610 rc = true;
611 fp->state = BNX2X_FP_STATE_IDLE;
612 spin_unlock(&fp->lock);
613 return rc;
614}
615
616/* called from bnx2x_low_latency_poll() */
617static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
618{
619 bool rc = true;
620
621 spin_lock_bh(&fp->lock);
622 if ((fp->state & BNX2X_FP_LOCKED)) {
623 fp->state |= BNX2X_FP_STATE_POLL_YIELD;
624 rc = false;
625 } else {
626 /* preserve yield marks */
627 fp->state |= BNX2X_FP_STATE_POLL;
628 }
629 spin_unlock_bh(&fp->lock);
630 return rc;
631}
632
633/* returns true if someone tried to get the FP while it was locked */
634static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
635{
636 bool rc = false;
637
638 spin_lock_bh(&fp->lock);
639 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
640
641 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
642 rc = true;
643 fp->state = BNX2X_FP_STATE_IDLE;
644 spin_unlock_bh(&fp->lock);
645 return rc;
646}
647
648/* true if a socket is polling, even if it did not get the lock */
649static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
650{
651 WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
652 return fp->state & BNX2X_FP_USER_PEND;
653}
654#else
655static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
656{
657}
658
659static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
660{
661 return true;
662}
663
664static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
665{
666 return false;
667}
668
669static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
670{
671 return false;
672}
673
674static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
675{
676 return false;
677}
678
679static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
680{
681 return false;
682}
683#endif /* CONFIG_NET_LL_RX_POLL */
684
568/* Use 2500 as a mini-jumbo MTU for FCoE */ 685/* Use 2500 as a mini-jumbo MTU for FCoE */
569#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 686#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
570 687
@@ -580,12 +697,10 @@ struct bnx2x_fastpath {
580 txdata_ptr[FIRST_TX_COS_INDEX] \ 697 txdata_ptr[FIRST_TX_COS_INDEX] \
581 ->var) 698 ->var)
582 699
583
584#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp)) 700#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
585#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp)) 701#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
586#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) 702#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
587 703
588
589/* MC hsi */ 704/* MC hsi */
590#define MAX_FETCH_BD 13 /* HW max BDs per packet */ 705#define MAX_FETCH_BD 13 /* HW max BDs per packet */
591#define RX_COPY_THRESH 92 706#define RX_COPY_THRESH 92
@@ -613,7 +728,7 @@ struct bnx2x_fastpath {
613 * START_BD(splitted) - includes unpaged data segment for GSO 728 * START_BD(splitted) - includes unpaged data segment for GSO
614 * PARSING_BD - for TSO and CSUM data 729 * PARSING_BD - for TSO and CSUM data
615 * PARSING_BD2 - for encapsulation data 730 * PARSING_BD2 - for encapsulation data
616 * Frag BDs - decribes pages for frags 731 * Frag BDs - describes pages for frags
617 */ 732 */
618#define BDS_PER_TX_PKT 4 733#define BDS_PER_TX_PKT 4
619#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) 734#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
@@ -693,12 +808,10 @@ struct bnx2x_fastpath {
693 FW_DROP_LEVEL(bp)) 808 FW_DROP_LEVEL(bp))
694#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) 809#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
695 810
696
697/* This is needed for determining of last_max */ 811/* This is needed for determining of last_max */
698#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 812#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
699#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) 813#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
700 814
701
702#define BNX2X_SWCID_SHIFT 17 815#define BNX2X_SWCID_SHIFT 17
703#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) 816#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
704 817
@@ -723,7 +836,6 @@ struct bnx2x_fastpath {
723 DPM_TRIGER_TYPE); \ 836 DPM_TRIGER_TYPE); \
724 } while (0) 837 } while (0)
725 838
726
727/* TX CSUM helpers */ 839/* TX CSUM helpers */
728#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ 840#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
729 skb->csum_offset) 841 skb->csum_offset)
@@ -766,7 +878,6 @@ struct bnx2x_fastpath {
766#define BNX2X_RX_SUM_FIX(cqe) \ 878#define BNX2X_RX_SUM_FIX(cqe) \
767 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) 879 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
768 880
769
770#define FP_USB_FUNC_OFF \ 881#define FP_USB_FUNC_OFF \
771 offsetof(struct cstorm_status_block_u, func) 882 offsetof(struct cstorm_status_block_u, func)
772#define FP_CSB_FUNC_OFF \ 883#define FP_CSB_FUNC_OFF \
@@ -900,14 +1011,14 @@ struct bnx2x_common {
900#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ 1011#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
901 (CHIP_REV(bp) == CHIP_REV_Ax)) 1012 (CHIP_REV(bp) == CHIP_REV_Ax))
902/* This define is used in two main places: 1013/* This define is used in two main places:
903 * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher 1014 * 1. In the early stages of nic_load, to know if to configure Parser / Searcher
904 * to nic-only mode or to offload mode. Offload mode is configured if either the 1015 * to nic-only mode or to offload mode. Offload mode is configured if either the
905 * chip is E1x (where MIC_MODE register is not applicable), or if cnic already 1016 * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
906 * registered for this port (which means that the user wants storage services). 1017 * registered for this port (which means that the user wants storage services).
907 * 2. During cnic-related load, to know if offload mode is already configured in 1018 * 2. During cnic-related load, to know if offload mode is already configured in
908 * the HW or needs to be configrued. 1019 * the HW or needs to be configured.
909 * Since the transition from nic-mode to offload-mode in HW causes traffic 1020 * Since the transition from nic-mode to offload-mode in HW causes traffic
910 * coruption, nic-mode is configured only in ports on which storage services 1021 * corruption, nic-mode is configured only in ports on which storage services
911 * where never requested. 1022 * where never requested.
912 */ 1023 */
913#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) 1024#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
@@ -1008,14 +1119,14 @@ extern struct workqueue_struct *bnx2x_wq;
1008 * If the maximum number of FP-SB available is X then: 1119 * If the maximum number of FP-SB available is X then:
1009 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of 1120 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
1010 * regular L2 queues is Y=X-1 1121 * regular L2 queues is Y=X-1
1011 * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) 1122 * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
1012 * c. If the FCoE L2 queue is supported the actual number of L2 queues 1123 * c. If the FCoE L2 queue is supported the actual number of L2 queues
1013 * is Y+1 1124 * is Y+1
1014 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for 1125 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
1015 * slow-path interrupts) or Y+2 if CNIC is supported (one additional 1126 * slow-path interrupts) or Y+2 if CNIC is supported (one additional
1016 * FP interrupt context for the CNIC). 1127 * FP interrupt context for the CNIC).
1017 * e. The number of HW context (CID count) is always X or X+1 if FCoE 1128 * e. The number of HW context (CID count) is always X or X+1 if FCoE
1018 * L2 queue is supported. the cid for the FCoE L2 queue is always X. 1129 * L2 queue is supported. The cid for the FCoE L2 queue is always X.
1019 */ 1130 */
1020 1131
1021/* fast-path interrupt contexts E1x */ 1132/* fast-path interrupt contexts E1x */
@@ -1068,7 +1179,6 @@ struct bnx2x_slowpath {
1068 struct eth_classify_rules_ramrod_data e2; 1179 struct eth_classify_rules_ramrod_data e2;
1069 } mac_rdata; 1180 } mac_rdata;
1070 1181
1071
1072 union { 1182 union {
1073 struct tstorm_eth_mac_filter_config e1x; 1183 struct tstorm_eth_mac_filter_config e1x;
1074 struct eth_filter_rules_ramrod_data e2; 1184 struct eth_filter_rules_ramrod_data e2;
@@ -1119,7 +1229,6 @@ struct bnx2x_slowpath {
1119#define bnx2x_sp_mapping(bp, var) \ 1229#define bnx2x_sp_mapping(bp, var) \
1120 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) 1230 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
1121 1231
1122
1123/* attn group wiring */ 1232/* attn group wiring */
1124#define MAX_DYNAMIC_ATTN_GRPS 8 1233#define MAX_DYNAMIC_ATTN_GRPS 8
1125 1234
@@ -1221,11 +1330,11 @@ enum {
1221 BNX2X_SP_RTNL_AFEX_F_UPDATE, 1330 BNX2X_SP_RTNL_AFEX_F_UPDATE,
1222 BNX2X_SP_RTNL_ENABLE_SRIOV, 1331 BNX2X_SP_RTNL_ENABLE_SRIOV,
1223 BNX2X_SP_RTNL_VFPF_MCAST, 1332 BNX2X_SP_RTNL_VFPF_MCAST,
1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
1224 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
1225 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1226}; 1336};
1227 1337
1228
1229struct bnx2x_prev_path_list { 1338struct bnx2x_prev_path_list {
1230 struct list_head list; 1339 struct list_head list;
1231 u8 bus; 1340 u8 bus;
@@ -1392,6 +1501,7 @@ struct bnx2x {
1392#define USING_SINGLE_MSIX_FLAG (1 << 20) 1501#define USING_SINGLE_MSIX_FLAG (1 << 20)
1393#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) 1502#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
1394#define IS_VF_FLAG (1 << 22) 1503#define IS_VF_FLAG (1 << 22)
1504#define INTERRUPTS_ENABLED_FLAG (1 << 23)
1395 1505
1396#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) 1506#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
1397 1507
@@ -1585,7 +1695,7 @@ struct bnx2x {
1585 struct mutex cnic_mutex; 1695 struct mutex cnic_mutex;
1586 struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; 1696 struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
1587 1697
1588 /* Start index of the "special" (CNIC related) L2 cleints */ 1698 /* Start index of the "special" (CNIC related) L2 clients */
1589 u8 cnic_base_cl_id; 1699 u8 cnic_base_cl_id;
1590 1700
1591 int dmae_ready; 1701 int dmae_ready;
@@ -1699,7 +1809,7 @@ struct bnx2x {
1699 /* operation indication for the sp_rtnl task */ 1809 /* operation indication for the sp_rtnl task */
1700 unsigned long sp_rtnl_state; 1810 unsigned long sp_rtnl_state;
1701 1811
1702 /* DCBX Negotation results */ 1812 /* DCBX Negotiation results */
1703 struct dcbx_features dcbx_local_feat; 1813 struct dcbx_features dcbx_local_feat;
1704 u32 dcbx_error; 1814 u32 dcbx_error;
1705 1815
@@ -1755,7 +1865,6 @@ extern int num_queues;
1755#define FUNC_FLG_SPQ 0x0010 1865#define FUNC_FLG_SPQ 0x0010
1756#define FUNC_FLG_LEADING 0x0020 /* PF only */ 1866#define FUNC_FLG_LEADING 0x0020 /* PF only */
1757 1867
1758
1759struct bnx2x_func_init_params { 1868struct bnx2x_func_init_params {
1760 /* dma */ 1869 /* dma */
1761 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ 1870 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -1853,9 +1962,6 @@ struct bnx2x_func_init_params {
1853 1962
1854#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) 1963#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1855 1964
1856
1857
1858
1859/** 1965/**
1860 * bnx2x_set_mac_one - configure a single MAC address 1966 * bnx2x_set_mac_one - configure a single MAC address
1861 * 1967 *
@@ -1921,7 +2027,6 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
1921void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 2027void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
1922 u8 src_type, u8 dst_type); 2028 u8 src_type, u8 dst_type);
1923int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); 2029int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
1924void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl);
1925 2030
1926/* FLR related routines */ 2031/* FLR related routines */
1927u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); 2032u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -1937,6 +2042,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1937void bnx2x_update_coalesce(struct bnx2x *bp); 2042void bnx2x_update_coalesce(struct bnx2x *bp);
1938int bnx2x_get_cur_phy_idx(struct bnx2x *bp); 2043int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
1939 2044
2045bool bnx2x_port_after_undi(struct bnx2x *bp);
2046
1940static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 2047static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1941 int wait) 2048 int wait)
1942{ 2049{
@@ -1998,7 +2105,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
1998#define UNLOAD_CLOSE 1 2105#define UNLOAD_CLOSE 1
1999#define UNLOAD_RECOVERY 2 2106#define UNLOAD_RECOVERY 2
2000 2107
2001
2002/* DMAE command defines */ 2108/* DMAE command defines */
2003#define DMAE_TIMEOUT -1 2109#define DMAE_TIMEOUT -1
2004#define DMAE_PCI_ERROR -2 /* E2 and onward */ 2110#define DMAE_PCI_ERROR -2 /* E2 and onward */
@@ -2062,7 +2168,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2062#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) 2168#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
2063 2169
2064#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit 2170#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
2065 indicates eror */ 2171 * indicates error
2172 */
2066 2173
2067#define MAX_DMAE_C_PER_PORT 8 2174#define MAX_DMAE_C_PER_PORT 8
2068#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 2175#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@@ -2100,7 +2207,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2100#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) 2207#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
2101#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) 2208#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
2102 2209
2103
2104#define BNX2X_BTR 4 2210#define BNX2X_BTR 4
2105#define MAX_SPQ_PENDING 8 2211#define MAX_SPQ_PENDING 8
2106 2212
@@ -2137,6 +2243,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2137#define ATTN_HARD_WIRED_MASK 0xff00 2243#define ATTN_HARD_WIRED_MASK 0xff00
2138#define ATTENTION_ID 4 2244#define ATTENTION_ID 4
2139 2245
2246#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \
2247 IS_MF_FCOE_AFEX(bp))
2140 2248
2141/* stuff added to make the code fit 80Col */ 2249/* stuff added to make the code fit 80Col */
2142 2250
@@ -2338,4 +2446,9 @@ enum {
2338 2446
2339#define NUM_MACS 8 2447#define NUM_MACS 8
2340 2448
2449enum bnx2x_pci_bus_speed {
2450 BNX2X_PCI_LINK_SPEED_2500 = 2500,
2451 BNX2X_PCI_LINK_SPEED_5000 = 5000,
2452 BNX2X_PCI_LINK_SPEED_8000 = 8000
2453};
2341#endif /* bnx2x.h */ 2454#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 638e55435b04..ec3aa1d451e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -24,6 +24,7 @@
24#include <net/tcp.h> 24#include <net/tcp.h>
25#include <net/ipv6.h> 25#include <net/ipv6.h>
26#include <net/ip6_checksum.h> 26#include <net/ip6_checksum.h>
27#include <net/ll_poll.h>
27#include <linux/prefetch.h> 28#include <linux/prefetch.h>
28#include "bnx2x_cmn.h" 29#include "bnx2x_cmn.h"
29#include "bnx2x_init.h" 30#include "bnx2x_init.h"
@@ -124,7 +125,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); 125 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125 126
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer 127 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden 128 * backward along the array could cause memory to be overridden
128 */ 129 */
129 for (cos = 1; cos < bp->max_cos; cos++) { 130 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) { 131 for (i = 0; i < old_eth_num - delta; i++) {
@@ -165,7 +166,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 166 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); 167 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
167 168
168
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR 170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { 171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
@@ -259,7 +259,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
259 smp_mb(); 259 smp_mb();
260 260
261 if (unlikely(netif_tx_queue_stopped(txq))) { 261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue 262 /* Taking tx_lock() is needed to prevent re-enabling the queue
263 * while it's empty. This could have happen if rx_action() gets 263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before 264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): 265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
@@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
572 return err; 572 return err;
573 } 573 }
574 574
575 /* Unmap the page as we r going to pass it to the stack */ 575 /* Unmap the page as we're going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev, 576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping), 577 dma_unmap_addr(&old_rx_pg, mapping),
578 SGE_PAGES, DMA_FROM_DEVICE); 578 SGE_PAGES, DMA_FROM_DEVICE);
@@ -733,7 +733,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
733 dev_kfree_skb_any(skb); 733 dev_kfree_skb_any(skb);
734 } 734 }
735 735
736
737 /* put new data in bin */ 736 /* put new data in bin */
738 rx_buf->data = new_data; 737 rx_buf->data = new_data;
739 738
@@ -805,40 +804,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
805{ 804{
806 struct bnx2x *bp = fp->bp; 805 struct bnx2x *bp = fp->bp;
807 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 806 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
808 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; 807 u16 sw_comp_cons, sw_comp_prod;
809 int rx_pkt = 0; 808 int rx_pkt = 0;
809 union eth_rx_cqe *cqe;
810 struct eth_fast_path_rx_cqe *cqe_fp;
810 811
811#ifdef BNX2X_STOP_ON_ERROR 812#ifdef BNX2X_STOP_ON_ERROR
812 if (unlikely(bp->panic)) 813 if (unlikely(bp->panic))
813 return 0; 814 return 0;
814#endif 815#endif
815 816
816 /* CQ "next element" is of the size of the regular element,
817 that's why it's ok here */
818 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
819 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
820 hw_comp_cons++;
821
822 bd_cons = fp->rx_bd_cons; 817 bd_cons = fp->rx_bd_cons;
823 bd_prod = fp->rx_bd_prod; 818 bd_prod = fp->rx_bd_prod;
824 bd_prod_fw = bd_prod; 819 bd_prod_fw = bd_prod;
825 sw_comp_cons = fp->rx_comp_cons; 820 sw_comp_cons = fp->rx_comp_cons;
826 sw_comp_prod = fp->rx_comp_prod; 821 sw_comp_prod = fp->rx_comp_prod;
827 822
828 /* Memory barrier necessary as speculative reads of the rx 823 comp_ring_cons = RCQ_BD(sw_comp_cons);
829 * buffer can be ahead of the index in the status block 824 cqe = &fp->rx_comp_ring[comp_ring_cons];
830 */ 825 cqe_fp = &cqe->fast_path_cqe;
831 rmb();
832 826
833 DP(NETIF_MSG_RX_STATUS, 827 DP(NETIF_MSG_RX_STATUS,
834 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", 828 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
835 fp->index, hw_comp_cons, sw_comp_cons);
836 829
837 while (sw_comp_cons != hw_comp_cons) { 830 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
838 struct sw_rx_bd *rx_buf = NULL; 831 struct sw_rx_bd *rx_buf = NULL;
839 struct sk_buff *skb; 832 struct sk_buff *skb;
840 union eth_rx_cqe *cqe;
841 struct eth_fast_path_rx_cqe *cqe_fp;
842 u8 cqe_fp_flags; 833 u8 cqe_fp_flags;
843 enum eth_rx_cqe_type cqe_fp_type; 834 enum eth_rx_cqe_type cqe_fp_type;
844 u16 len, pad, queue; 835 u16 len, pad, queue;
@@ -850,12 +841,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
850 return 0; 841 return 0;
851#endif 842#endif
852 843
853 comp_ring_cons = RCQ_BD(sw_comp_cons);
854 bd_prod = RX_BD(bd_prod); 844 bd_prod = RX_BD(bd_prod);
855 bd_cons = RX_BD(bd_cons); 845 bd_cons = RX_BD(bd_cons);
856 846
857 cqe = &fp->rx_comp_ring[comp_ring_cons];
858 cqe_fp = &cqe->fast_path_cqe;
859 cqe_fp_flags = cqe_fp->type_error_flags; 847 cqe_fp_flags = cqe_fp->type_error_flags;
860 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 848 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
861 849
@@ -899,7 +887,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
899 cqe_fp); 887 cqe_fp);
900 888
901 goto next_rx; 889 goto next_rx;
902
903 } 890 }
904 queue = cqe->end_agg_cqe.queue_index; 891 queue = cqe->end_agg_cqe.queue_index;
905 tpa_info = &fp->tpa_info[queue]; 892 tpa_info = &fp->tpa_info[queue];
@@ -1002,9 +989,13 @@ reuse_rx:
1002 PARSING_FLAGS_VLAN) 989 PARSING_FLAGS_VLAN)
1003 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 990 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1004 le16_to_cpu(cqe_fp->vlan_tag)); 991 le16_to_cpu(cqe_fp->vlan_tag));
1005 napi_gro_receive(&fp->napi, skb);
1006 992
993 skb_mark_ll(skb, &fp->napi);
1007 994
995 if (bnx2x_fp_ll_polling(fp))
996 netif_receive_skb(skb);
997 else
998 napi_gro_receive(&fp->napi, skb);
1008next_rx: 999next_rx:
1009 rx_buf->data = NULL; 1000 rx_buf->data = NULL;
1010 1001
@@ -1016,8 +1007,15 @@ next_cqe:
1016 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); 1007 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1017 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); 1008 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1018 1009
1010 /* mark CQE as free */
1011 BNX2X_SEED_CQE(cqe_fp);
1012
1019 if (rx_pkt == budget) 1013 if (rx_pkt == budget)
1020 break; 1014 break;
1015
1016 comp_ring_cons = RCQ_BD(sw_comp_cons);
1017 cqe = &fp->rx_comp_ring[comp_ring_cons];
1018 cqe_fp = &cqe->fast_path_cqe;
1021 } /* while */ 1019 } /* while */
1022 1020
1023 fp->rx_bd_cons = bd_cons; 1021 fp->rx_bd_cons = bd_cons;
@@ -1053,8 +1051,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1053#endif 1051#endif
1054 1052
1055 /* Handle Rx and Tx according to MSI-X vector */ 1053 /* Handle Rx and Tx according to MSI-X vector */
1056 prefetch(fp->rx_cons_sb);
1057
1058 for_each_cos_in_tx_queue(fp, cos) 1054 for_each_cos_in_tx_queue(fp, cos)
1059 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1055 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1060 1056
@@ -1118,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp,
1118 1114
1119 memset(data, 0, sizeof(*data)); 1115 memset(data, 0, sizeof(*data));
1120 1116
1121 /* Fill the report data: efective line speed */ 1117 /* Fill the report data: effective line speed */
1122 data->line_speed = line_speed; 1118 data->line_speed = line_speed;
1123 1119
1124 /* Link is down */ 1120 /* Link is down */
@@ -1161,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp)
1161 * 1157 *
1162 * @bp: driver handle 1158 * @bp: driver handle
1163 * 1159 *
1164 * None atomic inmlementation. 1160 * None atomic implementation.
1165 * Should be called under the phy_lock. 1161 * Should be called under the phy_lock.
1166 */ 1162 */
1167void __bnx2x_link_report(struct bnx2x *bp) 1163void __bnx2x_link_report(struct bnx2x *bp)
@@ -1304,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1304 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); 1300 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1305 1301
1306 if (!fp->disable_tpa) { 1302 if (!fp->disable_tpa) {
1307 /* Fill the per-aggregtion pool */ 1303 /* Fill the per-aggregation pool */
1308 for (i = 0; i < MAX_AGG_QS(bp); i++) { 1304 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1309 struct bnx2x_agg_info *tpa_info = 1305 struct bnx2x_agg_info *tpa_info =
1310 &fp->tpa_info[i]; 1306 &fp->tpa_info[i];
@@ -1726,7 +1722,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
1726 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); 1722 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1727} 1723}
1728 1724
1729int bnx2x_setup_irqs(struct bnx2x *bp) 1725static int bnx2x_setup_irqs(struct bnx2x *bp)
1730{ 1726{
1731 int rc = 0; 1727 int rc = 0;
1732 if (bp->flags & USING_MSIX_FLAG && 1728 if (bp->flags & USING_MSIX_FLAG &&
@@ -1759,32 +1755,46 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1759{ 1755{
1760 int i; 1756 int i;
1761 1757
1762 for_each_rx_queue_cnic(bp, i) 1758 for_each_rx_queue_cnic(bp, i) {
1759 bnx2x_fp_init_lock(&bp->fp[i]);
1763 napi_enable(&bnx2x_fp(bp, i, napi)); 1760 napi_enable(&bnx2x_fp(bp, i, napi));
1761 }
1764} 1762}
1765 1763
1766static void bnx2x_napi_enable(struct bnx2x *bp) 1764static void bnx2x_napi_enable(struct bnx2x *bp)
1767{ 1765{
1768 int i; 1766 int i;
1769 1767
1770 for_each_eth_queue(bp, i) 1768 for_each_eth_queue(bp, i) {
1769 bnx2x_fp_init_lock(&bp->fp[i]);
1771 napi_enable(&bnx2x_fp(bp, i, napi)); 1770 napi_enable(&bnx2x_fp(bp, i, napi));
1771 }
1772} 1772}
1773 1773
1774static void bnx2x_napi_disable_cnic(struct bnx2x *bp) 1774static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1775{ 1775{
1776 int i; 1776 int i;
1777 1777
1778 for_each_rx_queue_cnic(bp, i) 1778 local_bh_disable();
1779 for_each_rx_queue_cnic(bp, i) {
1779 napi_disable(&bnx2x_fp(bp, i, napi)); 1780 napi_disable(&bnx2x_fp(bp, i, napi));
1781 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1782 mdelay(1);
1783 }
1784 local_bh_enable();
1780} 1785}
1781 1786
1782static void bnx2x_napi_disable(struct bnx2x *bp) 1787static void bnx2x_napi_disable(struct bnx2x *bp)
1783{ 1788{
1784 int i; 1789 int i;
1785 1790
1786 for_each_eth_queue(bp, i) 1791 local_bh_disable();
1792 for_each_eth_queue(bp, i) {
1787 napi_disable(&bnx2x_fp(bp, i, napi)); 1793 napi_disable(&bnx2x_fp(bp, i, napi));
1794 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1795 mdelay(1);
1796 }
1797 local_bh_enable();
1788} 1798}
1789 1799
1790void bnx2x_netif_start(struct bnx2x *bp) 1800void bnx2x_netif_start(struct bnx2x *bp)
@@ -1829,7 +1839,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1829 } 1839 }
1830 1840
1831 /* select a non-FCoE queue */ 1841 /* select a non-FCoE queue */
1832 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1842 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1833} 1843}
1834 1844
1835void bnx2x_set_num_queues(struct bnx2x *bp) 1845void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -1862,7 +1872,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1862 * 1872 *
1863 * If the actual number of Tx queues (for each CoS) is less than 16 then there 1873 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1864 * will be the holes at the end of each group of 16 ETh L2 indices (0..15, 1874 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1865 * 16..31,...) with indicies that are not coupled with any real Tx queue. 1875 * 16..31,...) with indices that are not coupled with any real Tx queue.
1866 * 1876 *
1867 * The proper configuration of skb->queue_mapping is handled by 1877 * The proper configuration of skb->queue_mapping is handled by
1868 * bnx2x_select_queue() and __skb_tx_hash(). 1878 * bnx2x_select_queue() and __skb_tx_hash().
@@ -1924,7 +1934,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1924 ETH_OVREHEAD + 1934 ETH_OVREHEAD +
1925 mtu + 1935 mtu +
1926 BNX2X_FW_RX_ALIGN_END; 1936 BNX2X_FW_RX_ALIGN_END;
1927 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ 1937 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1928 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) 1938 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1929 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; 1939 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1930 else 1940 else
@@ -1937,7 +1947,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
1937 int i; 1947 int i;
1938 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1948 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1939 1949
1940 /* Prepare the initial contents fo the indirection table if RSS is 1950 /* Prepare the initial contents for the indirection table if RSS is
1941 * enabled 1951 * enabled
1942 */ 1952 */
1943 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) 1953 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
@@ -2015,7 +2025,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2015 2025
2016/* 2026/*
2017 * Cleans the object that have internal lists without sending 2027 * Cleans the object that have internal lists without sending
2018 * ramrods. Should be run when interrutps are disabled. 2028 * ramrods. Should be run when interrupts are disabled.
2019 */ 2029 */
2020void bnx2x_squeeze_objects(struct bnx2x *bp) 2030void bnx2x_squeeze_objects(struct bnx2x *bp)
2021{ 2031{
@@ -2166,10 +2176,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2166 bp->fw_stats_data_mapping = bp->fw_stats_mapping + 2176 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2167 bp->fw_stats_req_sz; 2177 bp->fw_stats_req_sz;
2168 2178
2169 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x", 2179 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2170 U64_HI(bp->fw_stats_req_mapping), 2180 U64_HI(bp->fw_stats_req_mapping),
2171 U64_LO(bp->fw_stats_req_mapping)); 2181 U64_LO(bp->fw_stats_req_mapping));
2172 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x", 2182 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2173 U64_HI(bp->fw_stats_data_mapping), 2183 U64_HI(bp->fw_stats_data_mapping),
2174 U64_LO(bp->fw_stats_data_mapping)); 2184 U64_LO(bp->fw_stats_data_mapping));
2175 return 0; 2185 return 0;
@@ -2183,6 +2193,8 @@ alloc_mem_err:
2183/* send load request to mcp and analyze response */ 2193/* send load request to mcp and analyze response */
2184static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) 2194static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2185{ 2195{
2196 u32 param;
2197
2186 /* init fw_seq */ 2198 /* init fw_seq */
2187 bp->fw_seq = 2199 bp->fw_seq =
2188 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 2200 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
@@ -2195,9 +2207,13 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2195 DRV_PULSE_SEQ_MASK); 2207 DRV_PULSE_SEQ_MASK);
2196 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 2208 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2197 2209
2210 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2211
2212 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2213 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2214
2198 /* load request */ 2215 /* load request */
2199 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 2216 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2200 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2201 2217
2202 /* if mcp fails to respond we must abort */ 2218 /* if mcp fails to respond we must abort */
2203 if (!(*load_code)) { 2219 if (!(*load_code)) {
@@ -2238,7 +2254,7 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2238 2254
2239 /* abort nic load if version mismatch */ 2255 /* abort nic load if version mismatch */
2240 if (my_fw != loaded_fw) { 2256 if (my_fw != loaded_fw) {
2241 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n", 2257 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2242 loaded_fw, my_fw); 2258 loaded_fw, my_fw);
2243 return -EBUSY; 2259 return -EBUSY;
2244 } 2260 }
@@ -2316,10 +2332,10 @@ static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2316static void bnx2x_bz_fp(struct bnx2x *bp, int index) 2332static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2317{ 2333{
2318 struct bnx2x_fastpath *fp = &bp->fp[index]; 2334 struct bnx2x_fastpath *fp = &bp->fp[index];
2319
2320 int cos; 2335 int cos;
2321 struct napi_struct orig_napi = fp->napi; 2336 struct napi_struct orig_napi = fp->napi;
2322 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; 2337 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2338
2323 /* bzero bnx2x_fastpath contents */ 2339 /* bzero bnx2x_fastpath contents */
2324 if (fp->tpa_info) 2340 if (fp->tpa_info)
2325 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * 2341 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
@@ -2345,8 +2361,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2345 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * 2361 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2346 BNX2X_NUM_ETH_QUEUES(bp) + index]; 2362 BNX2X_NUM_ETH_QUEUES(bp) + index];
2347 2363
2348 /* 2364 /* set the tpa flag for each queue. The tpa flag determines the queue
2349 * set the tpa flag for each queue. The tpa flag determines the queue
2350 * minimal size so it must be set prior to queue memory allocation 2365 * minimal size so it must be set prior to queue memory allocation
2351 */ 2366 */
2352 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || 2367 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
@@ -2429,7 +2444,6 @@ int bnx2x_load_cnic(struct bnx2x *bp)
2429 if (bp->state == BNX2X_STATE_OPEN) 2444 if (bp->state == BNX2X_STATE_OPEN)
2430 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 2445 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2431 2446
2432
2433 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); 2447 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2434 2448
2435 return 0; 2449 return 0;
@@ -2472,6 +2486,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2472 2486
2473 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 2487 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2474 2488
2489 /* zero the structure w/o any lock, before SP handler is initialized */
2475 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); 2490 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2476 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 2491 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2477 &bp->last_reported_link.link_report_flags); 2492 &bp->last_reported_link.link_report_flags);
@@ -2536,8 +2551,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2536 } 2551 }
2537 2552
2538 /* configure multi cos mappings in kernel. 2553 /* configure multi cos mappings in kernel.
2539 * this configuration may be overriden by a multi class queue discipline 2554 * this configuration may be overridden by a multi class queue
2540 * or by a dcbx negotiation result. 2555 * discipline or by a dcbx negotiation result.
2541 */ 2556 */
2542 bnx2x_setup_tc(bp->dev, bp->max_cos); 2557 bnx2x_setup_tc(bp->dev, bp->max_cos);
2543 2558
@@ -2696,7 +2711,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2696 /* Start the Tx */ 2711 /* Start the Tx */
2697 switch (load_mode) { 2712 switch (load_mode) {
2698 case LOAD_NORMAL: 2713 case LOAD_NORMAL:
2699 /* Tx queue should be only reenabled */ 2714 /* Tx queue should be only re-enabled */
2700 netif_tx_wake_all_queues(bp->dev); 2715 netif_tx_wake_all_queues(bp->dev);
2701 break; 2716 break;
2702 2717
@@ -2841,7 +2856,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2841 } 2856 }
2842 2857
2843 /* Nothing to do during unload if previous bnx2x_nic_load() 2858 /* Nothing to do during unload if previous bnx2x_nic_load()
2844 * have not completed succesfully - all resourses are released. 2859 * have not completed successfully - all resources are released.
2845 * 2860 *
2846 * we can get here only after unsuccessful ndo_* callback, during which 2861 * we can get here only after unsuccessful ndo_* callback, during which
2847 * dev->IFF_UP flag is still on. 2862 * dev->IFF_UP flag is still on.
@@ -2856,6 +2871,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2856 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 2871 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2857 smp_mb(); 2872 smp_mb();
2858 2873
2874 /* indicate to VFs that the PF is going down */
2875 bnx2x_iov_channel_down(bp);
2876
2859 if (CNIC_LOADED(bp)) 2877 if (CNIC_LOADED(bp))
2860 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 2878 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2861 2879
@@ -2890,10 +2908,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2890 /* Send the UNLOAD_REQUEST to the MCP */ 2908 /* Send the UNLOAD_REQUEST to the MCP */
2891 bnx2x_send_unload_req(bp, unload_mode); 2909 bnx2x_send_unload_req(bp, unload_mode);
2892 2910
2893 /* 2911 /* Prevent transactions to host from the functions on the
2894 * Prevent transactions to host from the functions on the
2895 * engine that doesn't reset global blocks in case of global 2912 * engine that doesn't reset global blocks in case of global
2896 * attention once gloabl blocks are reset and gates are opened 2913 * attention once global blocks are reset and gates are opened
2897 * (the engine which leader will perform the recovery 2914 * (the engine which leader will perform the recovery
2898 * last). 2915 * last).
2899 */ 2916 */
@@ -2914,7 +2931,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2914 } 2931 }
2915 2932
2916 /* 2933 /*
2917 * At this stage no more interrupts will arrive so we may safly clean 2934 * At this stage no more interrupts will arrive so we may safely clean
2918 * the queueable objects here in case they failed to get cleaned so far. 2935 * the queueable objects here in case they failed to get cleaned so far.
2919 */ 2936 */
2920 if (IS_PF(bp)) 2937 if (IS_PF(bp))
@@ -2955,7 +2972,6 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2955 bnx2x_set_reset_global(bp); 2972 bnx2x_set_reset_global(bp);
2956 } 2973 }
2957 2974
2958
2959 /* The last driver must disable a "close the gate" if there is no 2975 /* The last driver must disable a "close the gate" if there is no
2960 * parity attention or "process kill" pending. 2976 * parity attention or "process kill" pending.
2961 */ 2977 */
@@ -3040,6 +3056,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
3040 return 0; 3056 return 0;
3041 } 3057 }
3042#endif 3058#endif
3059 if (!bnx2x_fp_lock_napi(fp))
3060 return work_done;
3043 3061
3044 for_each_cos_in_tx_queue(fp, cos) 3062 for_each_cos_in_tx_queue(fp, cos)
3045 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 3063 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
@@ -3049,12 +3067,15 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
3049 work_done += bnx2x_rx_int(fp, budget - work_done); 3067 work_done += bnx2x_rx_int(fp, budget - work_done);
3050 3068
3051 /* must not complete if we consumed full budget */ 3069 /* must not complete if we consumed full budget */
3052 if (work_done >= budget) 3070 if (work_done >= budget) {
3071 bnx2x_fp_unlock_napi(fp);
3053 break; 3072 break;
3073 }
3054 } 3074 }
3055 3075
3056 /* Fall out from the NAPI loop if needed */ 3076 /* Fall out from the NAPI loop if needed */
3057 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 3077 if (!bnx2x_fp_unlock_napi(fp) &&
3078 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3058 3079
3059 /* No need to update SB for FCoE L2 ring as long as 3080 /* No need to update SB for FCoE L2 ring as long as
3060 * it's connected to the default SB and the SB 3081 * it's connected to the default SB and the SB
@@ -3096,6 +3117,32 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
3096 return work_done; 3117 return work_done;
3097} 3118}
3098 3119
3120#ifdef CONFIG_NET_LL_RX_POLL
3121/* must be called with local_bh_disable()d */
3122int bnx2x_low_latency_recv(struct napi_struct *napi)
3123{
3124 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3125 napi);
3126 struct bnx2x *bp = fp->bp;
3127 int found = 0;
3128
3129 if ((bp->state == BNX2X_STATE_CLOSED) ||
3130 (bp->state == BNX2X_STATE_ERROR) ||
3131 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3132 return LL_FLUSH_FAILED;
3133
3134 if (!bnx2x_fp_lock_poll(fp))
3135 return LL_FLUSH_BUSY;
3136
3137 if (bnx2x_has_rx_work(fp))
3138 found = bnx2x_rx_int(fp, 4);
3139
3140 bnx2x_fp_unlock_poll(fp);
3141
3142 return found;
3143}
3144#endif
3145
3099/* we split the first BD into headers and data BDs 3146/* we split the first BD into headers and data BDs
3100 * to ease the pain of our fellow microcode engineers 3147 * to ease the pain of our fellow microcode engineers
3101 * we use one mapping for both BDs 3148 * we use one mapping for both BDs
@@ -3496,9 +3543,12 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3496 /* outer IP header info */ 3543 /* outer IP header info */
3497 if (xmit_type & XMIT_CSUM_V4) { 3544 if (xmit_type & XMIT_CSUM_V4) {
3498 struct iphdr *iph = ip_hdr(skb); 3545 struct iphdr *iph = ip_hdr(skb);
3546 u16 csum = (__force u16)(~iph->check) -
3547 (__force u16)iph->tot_len -
3548 (__force u16)iph->frag_off;
3549
3499 pbd2->fw_ip_csum_wo_len_flags_frag = 3550 pbd2->fw_ip_csum_wo_len_flags_frag =
3500 bswab16(csum_fold((~iph->check) - 3551 bswab16(csum_fold((__force __wsum)csum));
3501 iph->tot_len - iph->frag_off));
3502 } else { 3552 } else {
3503 pbd2->fw_ip_hdr_to_payload_w = 3553 pbd2->fw_ip_hdr_to_payload_w =
3504 hlen_w - ((sizeof(struct ipv6hdr)) >> 1); 3554 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
@@ -3586,7 +3636,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3586 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", 3636 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3587 txq_index, fp_index, txdata_index); */ 3637 txq_index, fp_index, txdata_index); */
3588 3638
3589 /* enable this debug print to view the tranmission details 3639 /* enable this debug print to view the transmission details
3590 DP(NETIF_MSG_TX_QUEUED, 3640 DP(NETIF_MSG_TX_QUEUED,
3591 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", 3641 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3592 txdata->cid, fp_index, txdata_index, txdata, fp); */ 3642 txdata->cid, fp_index, txdata_index, txdata, fp); */
@@ -3968,7 +4018,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3968 /* setup tc must be called under rtnl lock */ 4018 /* setup tc must be called under rtnl lock */
3969 ASSERT_RTNL(); 4019 ASSERT_RTNL();
3970 4020
3971 /* no traffic classes requested. aborting */ 4021 /* no traffic classes requested. Aborting */
3972 if (!num_tc) { 4022 if (!num_tc) {
3973 netdev_reset_tc(dev); 4023 netdev_reset_tc(dev);
3974 return 0; 4024 return 0;
@@ -3976,7 +4026,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3976 4026
3977 /* requested to support too many traffic classes */ 4027 /* requested to support too many traffic classes */
3978 if (num_tc > bp->max_cos) { 4028 if (num_tc > bp->max_cos) {
3979 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n", 4029 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
3980 num_tc, bp->max_cos); 4030 num_tc, bp->max_cos);
3981 return -EINVAL; 4031 return -EINVAL;
3982 } 4032 }
@@ -3995,8 +4045,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3995 prio, bp->prio_to_cos[prio]); 4045 prio, bp->prio_to_cos[prio]);
3996 } 4046 }
3997 4047
3998 4048 /* Use this configuration to differentiate tc0 from other COSes
3999 /* Use this configuration to diffrentiate tc0 from other COSes
4000 This can be used for ets or pfc, and save the effort of setting 4049 This can be used for ets or pfc, and save the effort of setting
4001 up a multio class queue disc or negotiating DCBX with a switch 4050 up a multio class queue disc or negotiating DCBX with a switch
4002 netdev_set_prio_tc_map(dev, 0, 0); 4051 netdev_set_prio_tc_map(dev, 0, 0);
@@ -4288,10 +4337,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4288 &bnx2x_fp(bp, index, rx_desc_mapping), 4337 &bnx2x_fp(bp, index, rx_desc_mapping),
4289 sizeof(struct eth_rx_bd) * NUM_RX_BD); 4338 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4290 4339
4291 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), 4340 /* Seed all CQEs by 1s */
4292 &bnx2x_fp(bp, index, rx_comp_mapping), 4341 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4293 sizeof(struct eth_fast_path_rx_cqe) * 4342 &bnx2x_fp(bp, index, rx_comp_mapping),
4294 NUM_RCQ_BD); 4343 sizeof(struct eth_fast_path_rx_cqe) *
4344 NUM_RCQ_BD);
4295 4345
4296 /* SGE ring */ 4346 /* SGE ring */
4297 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), 4347 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
@@ -4472,7 +4522,6 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4472alloc_err: 4522alloc_err:
4473 bnx2x_free_mem_bp(bp); 4523 bnx2x_free_mem_bp(bp);
4474 return -ENOMEM; 4524 return -ENOMEM;
4475
4476} 4525}
4477 4526
4478int bnx2x_reload_if_running(struct net_device *dev) 4527int bnx2x_reload_if_running(struct net_device *dev)
@@ -4514,7 +4563,6 @@ int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4514 } 4563 }
4515 4564
4516 return sel_phy_idx; 4565 return sel_phy_idx;
4517
4518} 4566}
4519int bnx2x_get_link_cfg_idx(struct bnx2x *bp) 4567int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4520{ 4568{
@@ -4602,6 +4650,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4602{ 4650{
4603 struct bnx2x *bp = netdev_priv(dev); 4651 struct bnx2x *bp = netdev_priv(dev);
4604 u32 flags = bp->flags; 4652 u32 flags = bp->flags;
4653 u32 changes;
4605 bool bnx2x_reload = false; 4654 bool bnx2x_reload = false;
4606 4655
4607 if (features & NETIF_F_LRO) 4656 if (features & NETIF_F_LRO)
@@ -4626,10 +4675,16 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4626 } 4675 }
4627 } 4676 }
4628 4677
4629 if (flags ^ bp->flags) { 4678 changes = flags ^ bp->flags;
4630 bp->flags = flags; 4679
4680 /* if GRO is changed while LRO is enabled, don't force a reload */
4681 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4682 changes &= ~GRO_ENABLE_FLAG;
4683
4684 if (changes)
4631 bnx2x_reload = true; 4685 bnx2x_reload = true;
4632 } 4686
4687 bp->flags = flags;
4633 4688
4634 if (bnx2x_reload) { 4689 if (bnx2x_reload) {
4635 if (bp->recovery_state == BNX2X_RECOVERY_DONE) 4690 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
@@ -4724,7 +4779,6 @@ int bnx2x_resume(struct pci_dev *pdev)
4724 return rc; 4779 return rc;
4725} 4780}
4726 4781
4727
4728void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 4782void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4729 u32 cid) 4783 u32 cid)
4730{ 4784{
@@ -4742,7 +4796,6 @@ static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4742 u8 fw_sb_id, u8 sb_index, 4796 u8 fw_sb_id, u8 sb_index,
4743 u8 ticks) 4797 u8 ticks)
4744{ 4798{
4745
4746 u32 addr = BAR_CSTRORM_INTMEM + 4799 u32 addr = BAR_CSTRORM_INTMEM +
4747 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); 4800 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4748 REG_WR8(bp, addr, ticks); 4801 REG_WR8(bp, addr, ticks);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 151675d66b0d..c07a6d054cfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -22,7 +22,6 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/etherdevice.h> 23#include <linux/etherdevice.h>
24 24
25
26#include "bnx2x.h" 25#include "bnx2x.h"
27#include "bnx2x_sriov.h" 26#include "bnx2x_sriov.h"
28 27
@@ -50,13 +49,25 @@ extern int int_mode;
50 } \ 49 } \
51 } while (0) 50 } while (0)
52 51
53#define BNX2X_PCI_ALLOC(x, y, size) \ 52#define BNX2X_PCI_ALLOC(x, y, size) \
54do { \ 53 do { \
55 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ 54 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
56 GFP_KERNEL | __GFP_ZERO); \ 55 GFP_KERNEL | __GFP_ZERO); \
57 if (x == NULL) \ 56 if (x == NULL) \
58 goto alloc_mem_err; \ 57 goto alloc_mem_err; \
59} while (0) 58 DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
59 (unsigned long long)(*y), x); \
60 } while (0)
61
62#define BNX2X_PCI_FALLOC(x, y, size) \
63 do { \
64 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
65 if (x == NULL) \
66 goto alloc_mem_err; \
67 memset((void *)x, 0xFFFFFFFF, size); \
68 DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\
69 (unsigned long long)(*y), x); \
70 } while (0)
60 71
61#define BNX2X_ALLOC(x, size) \ 72#define BNX2X_ALLOC(x, size) \
62 do { \ 73 do { \
@@ -494,9 +505,6 @@ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
494/* Error handling */ 505/* Error handling */
495void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 506void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
496 507
497/* validate currect fw is loaded */
498bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
499
500/* dev_close main block */ 508/* dev_close main block */
501int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); 509int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
502 510
@@ -607,6 +615,13 @@ int bnx2x_enable_msi(struct bnx2x *bp);
607int bnx2x_poll(struct napi_struct *napi, int budget); 615int bnx2x_poll(struct napi_struct *napi, int budget);
608 616
609/** 617/**
618 * bnx2x_low_latency_recv - LL callback
619 *
620 * @napi: napi structure
621 */
622int bnx2x_low_latency_recv(struct napi_struct *napi);
623
624/**
610 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 625 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
611 * 626 *
612 * @bp: driver handle 627 * @bp: driver handle
@@ -800,16 +815,18 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
800 return false; 815 return false;
801} 816}
802 817
818#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0)
819#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF)
803static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 820static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
804{ 821{
805 u16 rx_cons_sb; 822 u16 cons;
823 union eth_rx_cqe *cqe;
824 struct eth_fast_path_rx_cqe *cqe_fp;
806 825
807 /* Tell compiler that status block fields can change */ 826 cons = RCQ_BD(fp->rx_comp_cons);
808 barrier(); 827 cqe = &fp->rx_comp_ring[cons];
809 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 828 cqe_fp = &cqe->fast_path_cqe;
810 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 829 return BNX2X_IS_CQE_COMPLETED(cqe_fp);
811 rx_cons_sb++;
812 return (fp->rx_comp_cons != rx_cons_sb);
813} 830}
814 831
815/** 832/**
@@ -848,9 +865,11 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
848 int i; 865 int i;
849 866
850 /* Add NAPI objects */ 867 /* Add NAPI objects */
851 for_each_rx_queue_cnic(bp, i) 868 for_each_rx_queue_cnic(bp, i) {
852 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 869 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
853 bnx2x_poll, NAPI_POLL_WEIGHT); 870 bnx2x_poll, NAPI_POLL_WEIGHT);
871 napi_hash_add(&bnx2x_fp(bp, i, napi));
872 }
854} 873}
855 874
856static inline void bnx2x_add_all_napi(struct bnx2x *bp) 875static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -858,25 +877,31 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
858 int i; 877 int i;
859 878
860 /* Add NAPI objects */ 879 /* Add NAPI objects */
861 for_each_eth_queue(bp, i) 880 for_each_eth_queue(bp, i) {
862 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 881 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
863 bnx2x_poll, NAPI_POLL_WEIGHT); 882 bnx2x_poll, NAPI_POLL_WEIGHT);
883 napi_hash_add(&bnx2x_fp(bp, i, napi));
884 }
864} 885}
865 886
866static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) 887static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
867{ 888{
868 int i; 889 int i;
869 890
870 for_each_rx_queue_cnic(bp, i) 891 for_each_rx_queue_cnic(bp, i) {
892 napi_hash_del(&bnx2x_fp(bp, i, napi));
871 netif_napi_del(&bnx2x_fp(bp, i, napi)); 893 netif_napi_del(&bnx2x_fp(bp, i, napi));
894 }
872} 895}
873 896
874static inline void bnx2x_del_all_napi(struct bnx2x *bp) 897static inline void bnx2x_del_all_napi(struct bnx2x *bp)
875{ 898{
876 int i; 899 int i;
877 900
878 for_each_eth_queue(bp, i) 901 for_each_eth_queue(bp, i) {
902 napi_hash_del(&bnx2x_fp(bp, i, napi));
879 netif_napi_del(&bnx2x_fp(bp, i, napi)); 903 netif_napi_del(&bnx2x_fp(bp, i, napi));
904 }
880} 905}
881 906
882int bnx2x_set_int_mode(struct bnx2x *bp); 907int bnx2x_set_int_mode(struct bnx2x *bp);
@@ -1171,7 +1196,6 @@ static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1171 1196
1172static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1197static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
1173{ 1198{
1174
1175 /* the 'first' id is allocated for the cnic */ 1199 /* the 'first' id is allocated for the cnic */
1176 return bp->base_fw_ndsb; 1200 return bp->base_fw_ndsb;
1177} 1201}
@@ -1181,7 +1205,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
1181 return bp->igu_base_sb; 1205 return bp->igu_base_sb;
1182} 1206}
1183 1207
1184
1185static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1208static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1186{ 1209{
1187 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 1210 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
@@ -1334,8 +1357,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1334 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); 1357 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
1335 1358
1336 /* 1359 /*
1337 * 1. number of frags should not grow above MAX_SKB_FRAGS 1360 * 1. Number of frags should not grow above MAX_SKB_FRAGS
1338 * 2. frag must fit the page 1361 * 2. Frag must fit the page
1339 */ 1362 */
1340 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1363 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1341} 1364}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4b077a7f16af..0c94df47e0e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -253,7 +253,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
253 253
254 memset(&pg_help_data, 0, sizeof(struct pg_help_data)); 254 memset(&pg_help_data, 0, sizeof(struct pg_help_data));
255 255
256
257 if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) 256 if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
258 DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n"); 257 DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n");
259 258
@@ -298,7 +297,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
298static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, 297static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
299 struct dcbx_pfc_feature *pfc, u32 error) 298 struct dcbx_pfc_feature *pfc, u32 error)
300{ 299{
301
302 if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) 300 if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
303 DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n"); 301 DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n");
304 302
@@ -367,7 +365,6 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
367 struct lldp_remote_mib *remote_mib ; 365 struct lldp_remote_mib *remote_mib ;
368 struct lldp_local_mib *local_mib; 366 struct lldp_local_mib *local_mib;
369 367
370
371 switch (read_mib_type) { 368 switch (read_mib_type) {
372 case DCBX_READ_LOCAL_MIB: 369 case DCBX_READ_LOCAL_MIB:
373 mib_size = sizeof(struct lldp_local_mib); 370 mib_size = sizeof(struct lldp_local_mib);
@@ -629,7 +626,6 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
629 return 0; 626 return 0;
630} 627}
631 628
632
633#ifdef BCM_DCBNL 629#ifdef BCM_DCBNL
634static inline 630static inline
635u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) 631u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
@@ -691,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
691 } 687 }
692 688
693 /* setup tc must be called under rtnl lock, but we can't take it here 689 /* setup tc must be called under rtnl lock, but we can't take it here
694 * as we are handling an attetntion on a work queue which must be 690 * as we are handling an attention on a work queue which must be
695 * flushed at some rtnl-locked contexts (e.g. if down) 691 * flushed at some rtnl-locked contexts (e.g. if down)
696 */ 692 */
697 if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 693 if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
@@ -711,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
711 */ 707 */
712 bnx2x_dcbnl_update_applist(bp, true); 708 bnx2x_dcbnl_update_applist(bp, true);
713 709
714 /* Read rmeote mib if dcbx is in the FW */ 710 /* Read remote mib if dcbx is in the FW */
715 if (bnx2x_dcbx_read_shmem_remote_mib(bp)) 711 if (bnx2x_dcbx_read_shmem_remote_mib(bp))
716 return; 712 return;
717#endif 713#endif
@@ -742,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
742 bnx2x_dcbx_update_tc_mapping(bp); 738 bnx2x_dcbx_update_tc_mapping(bp);
743 739
744 /* 740 /*
745 * allow other funtions to update their netdevices 741 * allow other functions to update their netdevices
746 * accordingly 742 * accordingly
747 */ 743 */
748 if (IS_MF(bp)) 744 if (IS_MF(bp))
@@ -864,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
864 i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); 860 i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
865 } 861 }
866 862
867 /*For IEEE admin_recommendation_bw_precentage 863 /*For IEEE admin_recommendation_bw_percentage
868 *For IEEE admin_recommendation_ets_pg */ 864 *For IEEE admin_recommendation_ets_pg */
869 af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; 865 af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
870 for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { 866 for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
@@ -896,13 +892,11 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
896 } 892 }
897 893
898 af->app.default_pri = (u8)dp->admin_default_priority; 894 af->app.default_pri = (u8)dp->admin_default_priority;
899
900 } 895 }
901 896
902 /* Write the data. */ 897 /* Write the data. */
903 bnx2x_write_data(bp, (u32 *)&admin_mib, offset, 898 bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
904 sizeof(struct lldp_admin_mib)); 899 sizeof(struct lldp_admin_mib));
905
906} 900}
907 901
908void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) 902void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
@@ -1076,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
1076 bool pg_found = false; 1070 bool pg_found = false;
1077 u32 i, traf_type, add_traf_type, add_pg; 1071 u32 i, traf_type, add_traf_type, add_pg;
1078 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; 1072 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1079 struct pg_entry_help_data *data = help_data->data; /*shotcut*/ 1073 struct pg_entry_help_data *data = help_data->data; /*shortcut*/
1080 1074
1081 /* Set to invalid */ 1075 /* Set to invalid */
1082 for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) 1076 for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
@@ -1172,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
1172 DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); 1166 DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
1173 else 1167 else
1174 /* If we join a group and one is strict 1168 /* If we join a group and one is strict
1175 * than the bw rulls */ 1169 * than the bw rules
1170 */
1176 cos_data->data[entry].strict = 1171 cos_data->data[entry].strict =
1177 BNX2X_DCBX_STRICT_COS_HIGHEST; 1172 BNX2X_DCBX_STRICT_COS_HIGHEST;
1178 } 1173 }
@@ -1181,7 +1176,6 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
1181 BNX2X_ERR("dcbx error: Both groups must have priorities\n"); 1176 BNX2X_ERR("dcbx error: Both groups must have priorities\n");
1182} 1177}
1183 1178
1184
1185#ifndef POWER_OF_2 1179#ifndef POWER_OF_2
1186#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) 1180#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
1187#endif 1181#endif
@@ -1284,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
1284 } else { 1278 } else {
1285 /* If there are only pauseable priorities or 1279 /* If there are only pauseable priorities or
1286 * only non-pauseable,* the lower priorities go 1280 * only non-pauseable,* the lower priorities go
1287 * to the first queue and the higherpriorities go 1281 * to the first queue and the higher priorities go
1288 * to the second queue. 1282 * to the second queue.
1289 */ 1283 */
1290 cos_data->data[0].pausable = 1284 cos_data->data[0].pausable =
@@ -1484,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
1484 * queue and one priority goes to the second queue. 1478 * queue and one priority goes to the second queue.
1485 * 1479 *
1486 * We will join this two cases: 1480 * We will join this two cases:
1487 * if one is BW limited it will go to the secoend queue 1481 * if one is BW limited it will go to the second queue
1488 * otherwise the last priority will get it 1482 * otherwise the last priority will get it
1489 */ 1483 */
1490 1484
@@ -1504,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
1504 false == b_found_strict) 1498 false == b_found_strict)
1505 /* last entry will be handled separately 1499 /* last entry will be handled separately
1506 * If no priority is strict than last 1500 * If no priority is strict than last
1507 * enty goes to last queue.*/ 1501 * entry goes to last queue.
1502 */
1508 entry = 1; 1503 entry = 1;
1509 cos_data->data[entry].pri_join_mask |= 1504 cos_data->data[entry].pri_join_mask |=
1510 pri_tested; 1505 pri_tested;
@@ -1516,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
1516 b_found_strict = true; 1511 b_found_strict = true;
1517 cos_data->data[1].pri_join_mask |= pri_tested; 1512 cos_data->data[1].pri_join_mask |= pri_tested;
1518 /* If we join a group and one is strict 1513 /* If we join a group and one is strict
1519 * than the bw rulls */ 1514 * than the bw rules
1515 */
1520 cos_data->data[1].strict = 1516 cos_data->data[1].strict =
1521 BNX2X_DCBX_STRICT_COS_HIGHEST; 1517 BNX2X_DCBX_STRICT_COS_HIGHEST;
1522 } 1518 }
@@ -1524,7 +1520,6 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
1524 } 1520 }
1525} 1521}
1526 1522
1527
1528static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, 1523static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
1529 struct pg_help_data *help_data, 1524 struct pg_help_data *help_data,
1530 struct dcbx_ets_feature *ets, 1525 struct dcbx_ets_feature *ets,
@@ -1533,7 +1528,6 @@ static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
1533 u32 pri_join_mask, 1528 u32 pri_join_mask,
1534 u8 num_of_dif_pri) 1529 u8 num_of_dif_pri)
1535{ 1530{
1536
1537 /* default E2 settings */ 1531 /* default E2 settings */
1538 cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; 1532 cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
1539 1533
@@ -1629,7 +1623,6 @@ static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
1629 u8 num_spread_of_entries, 1623 u8 num_spread_of_entries,
1630 u8 strict_app_pris) 1624 u8 strict_app_pris)
1631{ 1625{
1632
1633 if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, 1626 if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
1634 num_spread_of_entries, 1627 num_spread_of_entries,
1635 strict_app_pris)) { 1628 strict_app_pris)) {
@@ -1848,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
1848 1841
1849void bnx2x_dcbx_pmf_update(struct bnx2x *bp) 1842void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
1850{ 1843{
1851 /* if we need to syncronize DCBX result from prev PMF 1844 /* if we need to synchronize DCBX result from prev PMF
1852 * read it from shmem and update bp and netdev accordingly 1845 * read it from shmem and update bp and netdev accordingly
1853 */ 1846 */
1854 if (SHMEM2_HAS(bp, drv_flags) && 1847 if (SHMEM2_HAS(bp, drv_flags) &&
@@ -1876,7 +1869,6 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
1876 * dcbx negotiation. 1869 * dcbx negotiation.
1877 */ 1870 */
1878 bnx2x_dcbx_update_tc_mapping(bp); 1871 bnx2x_dcbx_update_tc_mapping(bp);
1879
1880 } 1872 }
1881} 1873}
1882 1874
@@ -1943,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
1943 return; 1935 return;
1944 1936
1945 /** 1937 /**
1946 * bw_pct ingnored - band-width percentage devision between user 1938 * bw_pct ignored - band-width percentage devision between user
1947 * priorities within the same group is not 1939 * priorities within the same group is not
1948 * standard and hence not supported 1940 * standard and hence not supported
1949 * 1941 *
1950 * prio_type igonred - priority levels within the same group are not 1942 * prio_type ignored - priority levels within the same group are not
1951 * standard and hence are not supported. According 1943 * standard and hence are not supported. According
1952 * to the standard pgid 15 is dedicated to strict 1944 * to the standard pgid 15 is dedicated to strict
1953 * prioirty traffic (on the port level). 1945 * priority traffic (on the port level).
1954 * 1946 *
1955 * up_map ignored 1947 * up_map ignored
1956 */ 1948 */
@@ -1995,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
1995 DP(BNX2X_MSG_DCB, "prio = %d\n", prio); 1987 DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
1996 1988
1997 /** 1989 /**
1998 * bw_pct ingnored - band-width percentage devision between user 1990 * bw_pct ignored - band-width percentage devision between user
1999 * priorities within the same group is not 1991 * priorities within the same group is not
2000 * standard and hence not supported 1992 * standard and hence not supported
2001 * 1993 *
2002 * prio_type igonred - priority levels within the same group are not 1994 * prio_type ignored - priority levels within the same group are not
2003 * standard and hence are not supported. According 1995 * standard and hence are not supported. According
2004 * to the standard pgid 15 is dedicated to strict 1996 * to the standard pgid 15 is dedicated to strict
2005 * prioirty traffic (on the port level). 1997 * priority traffic (on the port level).
2006 * 1998 *
2007 * up_map ignored 1999 * up_map ignored
2008 */ 2000 */
@@ -2389,7 +2381,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2389 *flags |= DCB_FEATCFG_ERROR; 2381 *flags |= DCB_FEATCFG_ERROR;
2390 break; 2382 break;
2391 default: 2383 default:
2392 BNX2X_ERR("Non valid featrue-ID\n"); 2384 BNX2X_ERR("Non valid feature-ID\n");
2393 rval = 1; 2385 rval = 1;
2394 break; 2386 break;
2395 } 2387 }
@@ -2430,7 +2422,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
2430 flags & DCB_FEATCFG_WILLING ? 1 : 0; 2422 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2431 break; 2423 break;
2432 default: 2424 default:
2433 BNX2X_ERR("Non valid featrue-ID\n"); 2425 BNX2X_ERR("Non valid feature-ID\n");
2434 rval = 1; 2426 rval = 1;
2435 break; 2427 break;
2436 } 2428 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index d153f44cf8f9..125bd1b6586f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -134,8 +134,6 @@ enum {
134#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130 134#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
135#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170 135#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
136 136
137
138
139struct cos_entry_help_data { 137struct cos_entry_help_data {
140 u32 pri_join_mask; 138 u32 pri_join_mask;
141 u32 cos_bw; 139 u32 cos_bw;
@@ -170,7 +168,6 @@ struct cos_help_data {
170 (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ 168 (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
171 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) 169 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
172 170
173
174struct pg_entry_help_data { 171struct pg_entry_help_data {
175 u8 num_of_dif_pri; 172 u8 num_of_dif_pri;
176 u8 pg; 173 u8 pg;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index bff5e33eaa14..12eb4baee9f6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -13,12 +13,6 @@
13 * consent. 13 * consent.
14 */ 14 */
15 15
16
17/* This struct holds a signature to ensure the dump returned from the driver
18 * match the meta data file inserted to grc_dump.tcl
19 * The signature is time stamp, diag version and grc_dump version
20 */
21
22#ifndef BNX2X_DUMP_H 16#ifndef BNX2X_DUMP_H
23#define BNX2X_DUMP_H 17#define BNX2X_DUMP_H
24 18
@@ -28,7 +22,6 @@
28#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80 22#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80
29#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80 23#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80
30 24
31
32/* Possible Chips */ 25/* Possible Chips */
33#define DUMP_CHIP_E1 1 26#define DUMP_CHIP_E1 1
34#define DUMP_CHIP_E1H 2 27#define DUMP_CHIP_E1H 2
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ce1a91618677..c5f225101684 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
320 320
321 speed = ethtool_cmd_speed(cmd); 321 speed = ethtool_cmd_speed(cmd);
322 322
323 /* If recieved a request for an unknown duplex, assume full*/ 323 /* If received a request for an unknown duplex, assume full*/
324 if (cmd->duplex == DUPLEX_UNKNOWN) 324 if (cmd->duplex == DUPLEX_UNKNOWN)
325 cmd->duplex = DUPLEX_FULL; 325 cmd->duplex = DUPLEX_FULL;
326 326
@@ -733,7 +733,6 @@ static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
733 return false; 733 return false;
734} 734}
735 735
736
737static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, 736static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
738 const struct wreg_addr *wreg_info) 737 const struct wreg_addr *wreg_info)
739{ 738{
@@ -850,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
850 849
851 /* Paged registers are supported in E2 & E3 only */ 850 /* Paged registers are supported in E2 & E3 only */
852 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { 851 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
853 /* Read "paged" registes */ 852 /* Read "paged" registers */
854 bnx2x_read_pages_regs(bp, p, preset); 853 bnx2x_read_pages_regs(bp, p, preset);
855 } 854 }
856 855
@@ -960,6 +959,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
960 struct bnx2x *bp = netdev_priv(dev); 959 struct bnx2x *bp = netdev_priv(dev);
961 960
962 /* Use the ethtool_dump "flag" field as the dump preset index */ 961 /* Use the ethtool_dump "flag" field as the dump preset index */
962 if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
963 return -EINVAL;
964
963 bp->dump_preset_idx = val->flag; 965 bp->dump_preset_idx = val->flag;
964 return 0; 966 return 0;
965} 967}
@@ -969,12 +971,12 @@ static int bnx2x_get_dump_flag(struct net_device *dev,
969{ 971{
970 struct bnx2x *bp = netdev_priv(dev); 972 struct bnx2x *bp = netdev_priv(dev);
971 973
974 dump->version = BNX2X_DUMP_VERSION;
975 dump->flag = bp->dump_preset_idx;
972 /* Calculate the requested preset idx length */ 976 /* Calculate the requested preset idx length */
973 dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); 977 dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
974 DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n", 978 DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
975 bp->dump_preset_idx, dump->len); 979 bp->dump_preset_idx, dump->len);
976
977 dump->flag = ETHTOOL_GET_DUMP_DATA;
978 return 0; 980 return 0;
979} 981}
980 982
@@ -986,8 +988,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
986 struct bnx2x *bp = netdev_priv(dev); 988 struct bnx2x *bp = netdev_priv(dev);
987 struct dump_header dump_hdr = {0}; 989 struct dump_header dump_hdr = {0};
988 990
989 memset(p, 0, dump->len);
990
991 /* Disable parity attentions as long as following dump may 991 /* Disable parity attentions as long as following dump may
992 * cause false alarms by reading never written registers. We 992 * cause false alarms by reading never written registers. We
993 * will re-enable parity attentions right after the dump. 993 * will re-enable parity attentions right after the dump.
@@ -1155,8 +1155,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
1155 return bp->common.flash_size; 1155 return bp->common.flash_size;
1156} 1156}
1157 1157
1158/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had 1158/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1159 * we done things the other way around, if two pfs from the same port would 1159 * had we done things the other way around, if two pfs from the same port would
1160 * attempt to access nvram at the same time, we could run into a scenario such 1160 * attempt to access nvram at the same time, we could run into a scenario such
1161 * as: 1161 * as:
1162 * pf A takes the port lock. 1162 * pf A takes the port lock.
@@ -1381,12 +1381,29 @@ static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
1381 return rc; 1381 return rc;
1382} 1382}
1383 1383
1384static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
1385{
1386 int rc = 1;
1387 u16 pm = 0;
1388 struct net_device *dev = pci_get_drvdata(bp->pdev);
1389
1390 if (bp->pm_cap)
1391 rc = pci_read_config_word(bp->pdev,
1392 bp->pm_cap + PCI_PM_CTRL, &pm);
1393
1394 if ((rc && !netif_running(dev)) ||
1395 (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
1396 return false;
1397
1398 return true;
1399}
1400
1384static int bnx2x_get_eeprom(struct net_device *dev, 1401static int bnx2x_get_eeprom(struct net_device *dev,
1385 struct ethtool_eeprom *eeprom, u8 *eebuf) 1402 struct ethtool_eeprom *eeprom, u8 *eebuf)
1386{ 1403{
1387 struct bnx2x *bp = netdev_priv(dev); 1404 struct bnx2x *bp = netdev_priv(dev);
1388 1405
1389 if (!netif_running(dev)) { 1406 if (!bnx2x_is_nvm_accessible(bp)) {
1390 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1407 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1391 "cannot access eeprom when the interface is down\n"); 1408 "cannot access eeprom when the interface is down\n");
1392 return -EAGAIN; 1409 return -EAGAIN;
@@ -1411,7 +1428,7 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
1411 u8 *user_data = data; 1428 u8 *user_data = data;
1412 unsigned int start_addr = ee->offset, xfer_size = 0; 1429 unsigned int start_addr = ee->offset, xfer_size = 0;
1413 1430
1414 if (!netif_running(dev)) { 1431 if (!bnx2x_is_nvm_accessible(bp)) {
1415 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1432 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1416 "cannot access eeprom when the interface is down\n"); 1433 "cannot access eeprom when the interface is down\n");
1417 return -EAGAIN; 1434 return -EAGAIN;
@@ -1474,7 +1491,7 @@ static int bnx2x_get_module_info(struct net_device *dev,
1474 int phy_idx, rc; 1491 int phy_idx, rc;
1475 u8 sff8472_comp, diag_type; 1492 u8 sff8472_comp, diag_type;
1476 1493
1477 if (!netif_running(dev)) { 1494 if (!bnx2x_is_nvm_accessible(bp)) {
1478 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1495 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1479 "cannot access eeprom when the interface is down\n"); 1496 "cannot access eeprom when the interface is down\n");
1480 return -EAGAIN; 1497 return -EAGAIN;
@@ -1594,8 +1611,10 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
1594 */ 1611 */
1595 val = be32_to_cpu(val_be); 1612 val = be32_to_cpu(val_be);
1596 1613
1597 val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset)); 1614 val &= ~le32_to_cpu((__force __le32)
1598 val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset)); 1615 (0xff << BYTE_OFFSET(offset)));
1616 val |= le32_to_cpu((__force __le32)
1617 (*data_buf << BYTE_OFFSET(offset)));
1599 1618
1600 rc = bnx2x_nvram_write_dword(bp, align_offset, val, 1619 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
1601 cmd_flags); 1620 cmd_flags);
@@ -1676,7 +1695,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
1676 int port = BP_PORT(bp); 1695 int port = BP_PORT(bp);
1677 int rc = 0; 1696 int rc = 0;
1678 u32 ext_phy_config; 1697 u32 ext_phy_config;
1679 if (!netif_running(dev)) { 1698
1699 if (!bnx2x_is_nvm_accessible(bp)) {
1680 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1700 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1681 "cannot access eeprom when the interface is down\n"); 1701 "cannot access eeprom when the interface is down\n");
1682 return -EAGAIN; 1702 return -EAGAIN;
@@ -1921,6 +1941,19 @@ static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
1921 "link_test (online) " 1941 "link_test (online) "
1922}; 1942};
1923 1943
1944enum {
1945 BNX2X_PRI_FLAG_ISCSI,
1946 BNX2X_PRI_FLAG_FCOE,
1947 BNX2X_PRI_FLAG_STORAGE,
1948 BNX2X_PRI_FLAG_LEN,
1949};
1950
1951static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
1952 "iSCSI offload support",
1953 "FCoE offload support",
1954 "Storage only interface"
1955};
1956
1924static u32 bnx2x_eee_to_adv(u32 eee_adv) 1957static u32 bnx2x_eee_to_adv(u32 eee_adv)
1925{ 1958{
1926 u32 modes = 0; 1959 u32 modes = 0;
@@ -2041,7 +2074,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2041 EEE_MODE_OVERRIDE_NVRAM | 2074 EEE_MODE_OVERRIDE_NVRAM |
2042 EEE_MODE_OUTPUT_TIME; 2075 EEE_MODE_OUTPUT_TIME;
2043 2076
2044 /* Restart link to propogate changes */ 2077 /* Restart link to propagate changes */
2045 if (netif_running(dev)) { 2078 if (netif_running(dev)) {
2046 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2079 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2047 bnx2x_force_link_reset(bp); 2080 bnx2x_force_link_reset(bp);
@@ -2160,7 +2193,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
2160 { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } 2193 { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
2161 }; 2194 };
2162 2195
2163 if (!netif_running(bp->dev)) { 2196 if (!bnx2x_is_nvm_accessible(bp)) {
2164 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2197 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2165 "cannot access eeprom when the interface is down\n"); 2198 "cannot access eeprom when the interface is down\n");
2166 return rc; 2199 return rc;
@@ -2264,7 +2297,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
2264 { NULL, 0xffffffff, {0, 0, 0, 0} } 2297 { NULL, 0xffffffff, {0, 0, 0, 0} }
2265 }; 2298 };
2266 2299
2267 if (!netif_running(bp->dev)) { 2300 if (!bnx2x_is_nvm_accessible(bp)) {
2268 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2301 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2269 "cannot access eeprom when the interface is down\n"); 2302 "cannot access eeprom when the interface is down\n");
2270 return rc; 2303 return rc;
@@ -2978,32 +3011,47 @@ static int bnx2x_num_stat_queues(struct bnx2x *bp)
2978static int bnx2x_get_sset_count(struct net_device *dev, int stringset) 3011static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2979{ 3012{
2980 struct bnx2x *bp = netdev_priv(dev); 3013 struct bnx2x *bp = netdev_priv(dev);
2981 int i, num_stats; 3014 int i, num_strings = 0;
2982 3015
2983 switch (stringset) { 3016 switch (stringset) {
2984 case ETH_SS_STATS: 3017 case ETH_SS_STATS:
2985 if (is_multi(bp)) { 3018 if (is_multi(bp)) {
2986 num_stats = bnx2x_num_stat_queues(bp) * 3019 num_strings = bnx2x_num_stat_queues(bp) *
2987 BNX2X_NUM_Q_STATS; 3020 BNX2X_NUM_Q_STATS;
2988 } else 3021 } else
2989 num_stats = 0; 3022 num_strings = 0;
2990 if (IS_MF_MODE_STAT(bp)) { 3023 if (IS_MF_MODE_STAT(bp)) {
2991 for (i = 0; i < BNX2X_NUM_STATS; i++) 3024 for (i = 0; i < BNX2X_NUM_STATS; i++)
2992 if (IS_FUNC_STAT(i)) 3025 if (IS_FUNC_STAT(i))
2993 num_stats++; 3026 num_strings++;
2994 } else 3027 } else
2995 num_stats += BNX2X_NUM_STATS; 3028 num_strings += BNX2X_NUM_STATS;
2996 3029
2997 return num_stats; 3030 return num_strings;
2998 3031
2999 case ETH_SS_TEST: 3032 case ETH_SS_TEST:
3000 return BNX2X_NUM_TESTS(bp); 3033 return BNX2X_NUM_TESTS(bp);
3001 3034
3035 case ETH_SS_PRIV_FLAGS:
3036 return BNX2X_PRI_FLAG_LEN;
3037
3002 default: 3038 default:
3003 return -EINVAL; 3039 return -EINVAL;
3004 } 3040 }
3005} 3041}
3006 3042
3043static u32 bnx2x_get_private_flags(struct net_device *dev)
3044{
3045 struct bnx2x *bp = netdev_priv(dev);
3046 u32 flags = 0;
3047
3048 flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI;
3049 flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE;
3050 flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE;
3051
3052 return flags;
3053}
3054
3007static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 3055static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3008{ 3056{
3009 struct bnx2x *bp = netdev_priv(dev); 3057 struct bnx2x *bp = netdev_priv(dev);
@@ -3026,7 +3074,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3026 } 3074 }
3027 } 3075 }
3028 3076
3029
3030 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 3077 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3031 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 3078 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
3032 continue; 3079 continue;
@@ -3045,6 +3092,12 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3045 start = 4; 3092 start = 4;
3046 memcpy(buf, bnx2x_tests_str_arr + start, 3093 memcpy(buf, bnx2x_tests_str_arr + start,
3047 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); 3094 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
3095 break;
3096
3097 case ETH_SS_PRIV_FLAGS:
3098 memcpy(buf, bnx2x_private_arr,
3099 ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
3100 break;
3048 } 3101 }
3049} 3102}
3050 3103
@@ -3106,17 +3159,12 @@ static int bnx2x_set_phys_id(struct net_device *dev,
3106{ 3159{
3107 struct bnx2x *bp = netdev_priv(dev); 3160 struct bnx2x *bp = netdev_priv(dev);
3108 3161
3109 if (!netif_running(dev)) { 3162 if (!bnx2x_is_nvm_accessible(bp)) {
3110 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 3163 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
3111 "cannot access eeprom when the interface is down\n"); 3164 "cannot access eeprom when the interface is down\n");
3112 return -EAGAIN; 3165 return -EAGAIN;
3113 } 3166 }
3114 3167
3115 if (!bp->port.pmf) {
3116 DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n");
3117 return -EOPNOTSUPP;
3118 }
3119
3120 switch (state) { 3168 switch (state) {
3121 case ETHTOOL_ID_ACTIVE: 3169 case ETHTOOL_ID_ACTIVE:
3122 return 1; /* cycle on/off once per second */ 3170 return 1; /* cycle on/off once per second */
@@ -3148,7 +3196,6 @@ static int bnx2x_set_phys_id(struct net_device *dev,
3148 3196
3149static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) 3197static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3150{ 3198{
3151
3152 switch (info->flow_type) { 3199 switch (info->flow_type) {
3153 case TCP_V4_FLOW: 3200 case TCP_V4_FLOW:
3154 case TCP_V6_FLOW: 3201 case TCP_V6_FLOW:
@@ -3384,7 +3431,6 @@ static int bnx2x_set_channels(struct net_device *dev,
3384{ 3431{
3385 struct bnx2x *bp = netdev_priv(dev); 3432 struct bnx2x *bp = netdev_priv(dev);
3386 3433
3387
3388 DP(BNX2X_MSG_ETHTOOL, 3434 DP(BNX2X_MSG_ETHTOOL,
3389 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", 3435 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
3390 channels->rx_count, channels->tx_count, channels->other_count, 3436 channels->rx_count, channels->tx_count, channels->other_count,
@@ -3445,6 +3491,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
3445 .set_pauseparam = bnx2x_set_pauseparam, 3491 .set_pauseparam = bnx2x_set_pauseparam,
3446 .self_test = bnx2x_self_test, 3492 .self_test = bnx2x_self_test,
3447 .get_sset_count = bnx2x_get_sset_count, 3493 .get_sset_count = bnx2x_get_sset_count,
3494 .get_priv_flags = bnx2x_get_private_flags,
3448 .get_strings = bnx2x_get_strings, 3495 .get_strings = bnx2x_get_strings,
3449 .set_phys_id = bnx2x_set_phys_id, 3496 .set_phys_id = bnx2x_set_phys_id,
3450 .get_ethtool_stats = bnx2x_get_ethtool_stats, 3497 .get_ethtool_stats = bnx2x_get_ethtool_stats,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 12f00a40cdf0..5018e52ae2ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1323,6 +1323,8 @@ struct drv_func_mb {
1323 #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 1323 #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
1324 1324
1325 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a 1325 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
1326 #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000
1327
1326 u32 fw_mb_header; 1328 u32 fw_mb_header;
1327 #define FW_MSG_CODE_MASK 0xffff0000 1329 #define FW_MSG_CODE_MASK 0xffff0000
1328 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 1330 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
@@ -3816,7 +3818,8 @@ struct eth_fast_path_rx_cqe {
3816 __le16 len_on_bd; 3818 __le16 len_on_bd;
3817 struct parsing_flags pars_flags; 3819 struct parsing_flags pars_flags;
3818 union eth_sgl_or_raw_data sgl_or_raw_data; 3820 union eth_sgl_or_raw_data sgl_or_raw_data;
3819 __le32 reserved1[8]; 3821 __le32 reserved1[7];
3822 u32 marker;
3820}; 3823};
3821 3824
3822 3825
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b4c9dea93a53..15a528bda87c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -93,7 +93,6 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
93MODULE_FIRMWARE(FW_FILE_NAME_E1H); 93MODULE_FIRMWARE(FW_FILE_NAME_E1H);
94MODULE_FIRMWARE(FW_FILE_NAME_E2); 94MODULE_FIRMWARE(FW_FILE_NAME_E2);
95 95
96
97int num_queues; 96int num_queues;
98module_param(num_queues, int, 0); 97module_param(num_queues, int, 0);
99MODULE_PARM_DESC(num_queues, 98MODULE_PARM_DESC(num_queues,
@@ -103,8 +102,6 @@ static int disable_tpa;
103module_param(disable_tpa, int, 0); 102module_param(disable_tpa, int, 0);
104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 103MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
105 104
106#define INT_MODE_INTx 1
107#define INT_MODE_MSI 2
108int int_mode; 105int int_mode;
109module_param(int_mode, int, 0); 106module_param(int_mode, int, 0);
110MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 107MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
@@ -122,8 +119,6 @@ static int debug;
122module_param(debug, int, 0); 119module_param(debug, int, 0);
123MODULE_PARM_DESC(debug, " Default debug msglevel"); 120MODULE_PARM_DESC(debug, " Default debug msglevel");
124 121
125
126
127struct workqueue_struct *bnx2x_wq; 122struct workqueue_struct *bnx2x_wq;
128 123
129struct bnx2x_mac_vals { 124struct bnx2x_mac_vals {
@@ -376,9 +371,11 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
376#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 371#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
377#define DMAE_DP_DST_NONE "dst_addr [none]" 372#define DMAE_DP_DST_NONE "dst_addr [none]"
378 373
379void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) 374static void bnx2x_dp_dmae(struct bnx2x *bp,
375 struct dmae_command *dmae, int msglvl)
380{ 376{
381 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; 377 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
378 int i;
382 379
383 switch (dmae->opcode & DMAE_COMMAND_DST) { 380 switch (dmae->opcode & DMAE_COMMAND_DST) {
384 case DMAE_CMD_DST_PCI: 381 case DMAE_CMD_DST_PCI:
@@ -434,6 +431,10 @@ void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
434 dmae->comp_val); 431 dmae->comp_val);
435 break; 432 break;
436 } 433 }
434
435 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
436 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
437 i, *(((u32 *)dmae) + i));
437} 438}
438 439
439/* copy command into DMAE command memory and set DMAE command go */ 440/* copy command into DMAE command memory and set DMAE command go */
@@ -508,8 +509,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
508 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
509 int rc = 0; 510 int rc = 0;
510 511
511 /* 512 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
512 * Lock the dmae channel. Disable BHs to prevent a dead-lock 513
514 /* Lock the dmae channel. Disable BHs to prevent a dead-lock
513 * as long as this code is called both from syscall context and 515 * as long as this code is called both from syscall context and
514 * from ndo_set_rx_mode() flow that may be called from BH. 516 * from ndo_set_rx_mode() flow that may be called from BH.
515 */ 517 */
@@ -548,6 +550,7 @@ unlock:
548void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 550void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
549 u32 len32) 551 u32 len32)
550{ 552{
553 int rc;
551 struct dmae_command dmae; 554 struct dmae_command dmae;
552 555
553 if (!bp->dmae_ready) { 556 if (!bp->dmae_ready) {
@@ -571,11 +574,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
571 dmae.len = len32; 574 dmae.len = len32;
572 575
573 /* issue the command and wait for completion */ 576 /* issue the command and wait for completion */
574 bnx2x_issue_dmae_with_comp(bp, &dmae); 577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
578 if (rc) {
579 BNX2X_ERR("DMAE returned failure %d\n", rc);
580 bnx2x_panic();
581 }
575} 582}
576 583
577void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 584void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
578{ 585{
586 int rc;
579 struct dmae_command dmae; 587 struct dmae_command dmae;
580 588
581 if (!bp->dmae_ready) { 589 if (!bp->dmae_ready) {
@@ -603,7 +611,11 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
603 dmae.len = len32; 611 dmae.len = len32;
604 612
605 /* issue the command and wait for completion */ 613 /* issue the command and wait for completion */
606 bnx2x_issue_dmae_with_comp(bp, &dmae); 614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
615 if (rc) {
616 BNX2X_ERR("DMAE returned failure %d\n", rc);
617 bnx2x_panic();
618 }
607} 619}
608 620
609static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 621static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
@@ -811,8 +823,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
811 u32 val = REG_RD(bp, addr); 823 u32 val = REG_RD(bp, addr);
812 824
813 /* in E1 we must use only PCI configuration space to disable 825 /* in E1 we must use only PCI configuration space to disable
814 * MSI/MSIX capablility 826 * MSI/MSIX capability
815 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 827 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
816 */ 828 */
817 if (CHIP_IS_E1(bp)) { 829 if (CHIP_IS_E1(bp)) {
818 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 830 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
@@ -839,7 +851,7 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
839 851
840 REG_WR(bp, addr, val); 852 REG_WR(bp, addr, val);
841 if (REG_RD(bp, addr) != val) 853 if (REG_RD(bp, addr) != val)
842 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 854 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
843} 855}
844 856
845static void bnx2x_igu_int_disable(struct bnx2x *bp) 857static void bnx2x_igu_int_disable(struct bnx2x *bp)
@@ -857,7 +869,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
857 869
858 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 870 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
859 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 871 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
860 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 872 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
861} 873}
862 874
863static void bnx2x_int_disable(struct bnx2x *bp) 875static void bnx2x_int_disable(struct bnx2x *bp)
@@ -917,7 +929,6 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
917 sp_sb_data.p_func.vf_valid, 929 sp_sb_data.p_func.vf_valid,
918 sp_sb_data.state); 930 sp_sb_data.state);
919 931
920
921 for_each_eth_queue(bp, i) { 932 for_each_eth_queue(bp, i) {
922 struct bnx2x_fastpath *fp = &bp->fp[i]; 933 struct bnx2x_fastpath *fp = &bp->fp[i];
923 int loop; 934 int loop;
@@ -1016,7 +1027,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1016 hc_sm_p[j].timer_value); 1027 hc_sm_p[j].timer_value);
1017 } 1028 }
1018 1029
1019 /* Indecies data */ 1030 /* Indices data */
1020 for (j = 0; j < loop; j++) { 1031 for (j = 0; j < loop; j++) {
1021 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 1032 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1022 hc_index_p[j].flags, 1033 hc_index_p[j].flags,
@@ -1027,6 +1038,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1027#ifdef BNX2X_STOP_ON_ERROR 1038#ifdef BNX2X_STOP_ON_ERROR
1028 1039
1029 /* event queue */ 1040 /* event queue */
1041 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1030 for (i = 0; i < NUM_EQ_DESC; i++) { 1042 for (i = 0; i < NUM_EQ_DESC; i++) {
1031 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1043 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1032 1044
@@ -1111,7 +1123,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1111 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 1123 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1112 * initialization. 1124 * initialization.
1113 */ 1125 */
1114#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ 1126#define FLR_WAIT_USEC 10000 /* 10 milliseconds */
1115#define FLR_WAIT_INTERVAL 50 /* usec */ 1127#define FLR_WAIT_INTERVAL 50 /* usec */
1116#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 1128#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1117 1129
@@ -1290,7 +1302,6 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1290 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1302 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1291 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1303 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1292 1304
1293
1294 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1305 /* Verify the transmission buffers are flushed P0, P1, P4 */
1295 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1306 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1296 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1307 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
@@ -1305,11 +1316,9 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1305#define OP_GEN_AGG_VECT(index) \ 1316#define OP_GEN_AGG_VECT(index) \
1306 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1317 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1307 1318
1308
1309int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) 1319int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1310{ 1320{
1311 u32 op_gen_command = 0; 1321 u32 op_gen_command = 0;
1312
1313 u32 comp_addr = BAR_CSTRORM_INTMEM + 1322 u32 comp_addr = BAR_CSTRORM_INTMEM +
1314 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1323 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1315 int ret = 0; 1324 int ret = 0;
@@ -1334,7 +1343,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1334 bnx2x_panic(); 1343 bnx2x_panic();
1335 return 1; 1344 return 1;
1336 } 1345 }
1337 /* Zero completion for nxt FLR */ 1346 /* Zero completion for next FLR */
1338 REG_WR(bp, comp_addr, 0); 1347 REG_WR(bp, comp_addr, 0);
1339 1348
1340 return ret; 1349 return ret;
@@ -1352,7 +1361,6 @@ u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1352*/ 1361*/
1353static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1362static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1354{ 1363{
1355
1356 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1364 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1357 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1365 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1358 CFC_REG_NUM_LCIDS_INSIDE_PF, 1366 CFC_REG_NUM_LCIDS_INSIDE_PF,
@@ -1360,7 +1368,6 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1360 poll_cnt)) 1368 poll_cnt))
1361 return 1; 1369 return 1;
1362 1370
1363
1364 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1371 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1365 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1372 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1366 DORQ_REG_PF_USAGE_CNT, 1373 DORQ_REG_PF_USAGE_CNT,
@@ -1390,7 +1397,7 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1390 /* Wait DMAE PF usage counter to zero */ 1397 /* Wait DMAE PF usage counter to zero */
1391 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1398 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1392 dmae_reg_go_c[INIT_DMAE_C(bp)], 1399 dmae_reg_go_c[INIT_DMAE_C(bp)],
1393 "DMAE dommand register timed out", 1400 "DMAE command register timed out",
1394 poll_cnt)) 1401 poll_cnt))
1395 return 1; 1402 return 1;
1396 1403
@@ -1770,7 +1777,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1770 break; 1777 break;
1771 1778
1772 case (RAMROD_CMD_ID_ETH_TERMINATE): 1779 case (RAMROD_CMD_ID_ETH_TERMINATE):
1773 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); 1780 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1774 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1781 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1775 break; 1782 break;
1776 1783
@@ -1859,7 +1866,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1859 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); 1866 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1860 if (status & mask) { 1867 if (status & mask) {
1861 /* Handle Rx or Tx according to SB id */ 1868 /* Handle Rx or Tx according to SB id */
1862 prefetch(fp->rx_cons_sb);
1863 for_each_cos_in_tx_queue(fp, cos) 1869 for_each_cos_in_tx_queue(fp, cos)
1864 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1870 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1865 prefetch(&fp->sb_running_index[SM_RX_ID]); 1871 prefetch(&fp->sb_running_index[SM_RX_ID]);
@@ -1947,7 +1953,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1947 if (lock_status & resource_bit) 1953 if (lock_status & resource_bit)
1948 return 0; 1954 return 0;
1949 1955
1950 msleep(5); 1956 usleep_range(5000, 10000);
1951 } 1957 }
1952 BNX2X_ERR("Timeout\n"); 1958 BNX2X_ERR("Timeout\n");
1953 return -EAGAIN; 1959 return -EAGAIN;
@@ -1982,8 +1988,8 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1982 /* Validating that the resource is currently taken */ 1988 /* Validating that the resource is currently taken */
1983 lock_status = REG_RD(bp, hw_lock_control_reg); 1989 lock_status = REG_RD(bp, hw_lock_control_reg);
1984 if (!(lock_status & resource_bit)) { 1990 if (!(lock_status & resource_bit)) {
1985 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", 1991 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
1986 lock_status, resource_bit); 1992 lock_status, resource_bit);
1987 return -EFAULT; 1993 return -EFAULT;
1988 } 1994 }
1989 1995
@@ -1991,7 +1997,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1991 return 0; 1997 return 0;
1992} 1998}
1993 1999
1994
1995int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 2000int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1996{ 2001{
1997 /* The GPIO should be swapped if swap register is set and active */ 2002 /* The GPIO should be swapped if swap register is set and active */
@@ -2347,14 +2352,13 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2347 return rc; 2352 return rc;
2348} 2353}
2349 2354
2350
2351/* Calculates the sum of vn_min_rates. 2355/* Calculates the sum of vn_min_rates.
2352 It's needed for further normalizing of the min_rates. 2356 It's needed for further normalizing of the min_rates.
2353 Returns: 2357 Returns:
2354 sum of vn_min_rates. 2358 sum of vn_min_rates.
2355 or 2359 or
2356 0 - if all the min_rates are 0. 2360 0 - if all the min_rates are 0.
2357 In the later case fainess algorithm should be deactivated. 2361 In the later case fairness algorithm should be deactivated.
2358 If not all min_rates are zero then those that are zeroes will be set to 1. 2362 If not all min_rates are zero then those that are zeroes will be set to 1.
2359 */ 2363 */
2360static void bnx2x_calc_vn_min(struct bnx2x *bp, 2364static void bnx2x_calc_vn_min(struct bnx2x *bp,
@@ -2419,7 +2423,6 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2419 input->vnic_max_rate[vn] = vn_max_rate; 2423 input->vnic_max_rate[vn] = vn_max_rate;
2420} 2424}
2421 2425
2422
2423static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2426static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2424{ 2427{
2425 if (CHIP_REV_IS_SLOW(bp)) 2428 if (CHIP_REV_IS_SLOW(bp))
@@ -2435,7 +2438,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2435 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2438 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2436 2439
2437 if (BP_NOMCP(bp)) 2440 if (BP_NOMCP(bp))
2438 return; /* what should be the default bvalue in this case */ 2441 return; /* what should be the default value in this case */
2439 2442
2440 /* For 2 port configuration the absolute function number formula 2443 /* For 2 port configuration the absolute function number formula
2441 * is: 2444 * is:
@@ -2901,7 +2904,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2901 return rc; 2904 return rc;
2902} 2905}
2903 2906
2904
2905static void storm_memset_func_cfg(struct bnx2x *bp, 2907static void storm_memset_func_cfg(struct bnx2x *bp,
2906 struct tstorm_eth_function_common_config *tcfg, 2908 struct tstorm_eth_function_common_config *tcfg,
2907 u16 abs_fid) 2909 u16 abs_fid)
@@ -2935,7 +2937,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2935} 2937}
2936 2938
2937/** 2939/**
2938 * bnx2x_get_tx_only_flags - Return common flags 2940 * bnx2x_get_common_flags - Return common flags
2939 * 2941 *
2940 * @bp device handle 2942 * @bp device handle
2941 * @fp queue handle 2943 * @fp queue handle
@@ -3006,7 +3008,6 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3006 if (IS_MF_AFEX(bp)) 3008 if (IS_MF_AFEX(bp))
3007 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 3009 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3008 3010
3009
3010 return flags | bnx2x_get_common_flags(bp, fp, true); 3011 return flags | bnx2x_get_common_flags(bp, fp, true);
3011} 3012}
3012 3013
@@ -3082,7 +3083,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3082 * placed on the BD (not including paddings). 3083 * placed on the BD (not including paddings).
3083 */ 3084 */
3084 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 3085 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3085 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 3086 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3086 3087
3087 rxq_init->cl_qzone_id = fp->cl_qzone_id; 3088 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3088 rxq_init->tpa_agg_sz = tpa_agg_size; 3089 rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -3124,7 +3125,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3124 txq_init->fw_sb_id = fp->fw_sb_id; 3125 txq_init->fw_sb_id = fp->fw_sb_id;
3125 3126
3126 /* 3127 /*
3127 * set the tss leading client id for TX classfication == 3128 * set the tss leading client id for TX classification ==
3128 * leading RSS client id 3129 * leading RSS client id
3129 */ 3130 */
3130 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 3131 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
@@ -3196,7 +3197,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
3196 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 3197 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3197} 3198}
3198 3199
3199
3200static void bnx2x_e1h_disable(struct bnx2x *bp) 3200static void bnx2x_e1h_disable(struct bnx2x *bp)
3201{ 3201{
3202 int port = BP_PORT(bp); 3202 int port = BP_PORT(bp);
@@ -3212,7 +3212,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
3212 3212
3213 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 3213 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3214 3214
3215 /* Tx queue should be only reenabled */ 3215 /* Tx queue should be only re-enabled */
3216 netif_tx_wake_all_queues(bp->dev); 3216 netif_tx_wake_all_queues(bp->dev);
3217 3217
3218 /* 3218 /*
@@ -3540,10 +3540,8 @@ static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3540 return true; 3540 return true;
3541 else 3541 else
3542 return false; 3542 return false;
3543
3544} 3543}
3545 3544
3546
3547/** 3545/**
3548 * bnx2x_sp_post - place a single command on an SP ring 3546 * bnx2x_sp_post - place a single command on an SP ring
3549 * 3547 *
@@ -3608,14 +3606,13 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3608 /* 3606 /*
3609 * It's ok if the actual decrement is issued towards the memory 3607 * It's ok if the actual decrement is issued towards the memory
3610 * somewhere between the spin_lock and spin_unlock. Thus no 3608 * somewhere between the spin_lock and spin_unlock. Thus no
3611 * more explict memory barrier is needed. 3609 * more explicit memory barrier is needed.
3612 */ 3610 */
3613 if (common) 3611 if (common)
3614 atomic_dec(&bp->eq_spq_left); 3612 atomic_dec(&bp->eq_spq_left);
3615 else 3613 else
3616 atomic_dec(&bp->cq_spq_left); 3614 atomic_dec(&bp->cq_spq_left);
3617 3615
3618
3619 DP(BNX2X_MSG_SP, 3616 DP(BNX2X_MSG_SP,
3620 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3617 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3621 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3618 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
@@ -3637,15 +3634,14 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
3637 3634
3638 might_sleep(); 3635 might_sleep();
3639 for (j = 0; j < 1000; j++) { 3636 for (j = 0; j < 1000; j++) {
3640 val = (1UL << 31); 3637 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3641 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 3638 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3642 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 3639 if (val & MCPR_ACCESS_LOCK_LOCK)
3643 if (val & (1L << 31))
3644 break; 3640 break;
3645 3641
3646 msleep(5); 3642 usleep_range(5000, 10000);
3647 } 3643 }
3648 if (!(val & (1L << 31))) { 3644 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3649 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3645 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3650 rc = -EBUSY; 3646 rc = -EBUSY;
3651 } 3647 }
@@ -3656,7 +3652,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
3656/* release split MCP access lock register */ 3652/* release split MCP access lock register */
3657static void bnx2x_release_alr(struct bnx2x *bp) 3653static void bnx2x_release_alr(struct bnx2x *bp)
3658{ 3654{
3659 REG_WR(bp, GRCBASE_MCP + 0x9c, 0); 3655 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3660} 3656}
3661 3657
3662#define BNX2X_DEF_SB_ATT_IDX 0x0001 3658#define BNX2X_DEF_SB_ATT_IDX 0x0001
@@ -3678,7 +3674,7 @@ static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3678 rc |= BNX2X_DEF_SB_IDX; 3674 rc |= BNX2X_DEF_SB_IDX;
3679 } 3675 }
3680 3676
3681 /* Do not reorder: indecies reading should complete before handling */ 3677 /* Do not reorder: indices reading should complete before handling */
3682 barrier(); 3678 barrier();
3683 return rc; 3679 return rc;
3684} 3680}
@@ -3827,8 +3823,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
3827 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 3823 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3828 "Please contact OEM Support for assistance\n"); 3824 "Please contact OEM Support for assistance\n");
3829 3825
3830 /* 3826 /* Schedule device reset (unload)
3831 * Schedule device reset (unload)
3832 * This is due to some boards consuming sufficient power when driver is 3827 * This is due to some boards consuming sufficient power when driver is
3833 * up to overheat if fan fails. 3828 * up to overheat if fan fails.
3834 */ 3829 */
@@ -3836,7 +3831,6 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
3836 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); 3831 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3837 smp_mb__after_clear_bit(); 3832 smp_mb__after_clear_bit();
3838 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3833 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3839
3840} 3834}
3841 3835
3842static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3836static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4106,7 +4100,7 @@ static void bnx2x_clear_reset_global(struct bnx2x *bp)
4106 */ 4100 */
4107static bool bnx2x_reset_is_global(struct bnx2x *bp) 4101static bool bnx2x_reset_is_global(struct bnx2x *bp)
4108{ 4102{
4109 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4103 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4110 4104
4111 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 4105 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4112 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 4106 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
@@ -4157,7 +4151,7 @@ void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4157 */ 4151 */
4158bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 4152bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4159{ 4153{
4160 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4154 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4161 u32 bit = engine ? 4155 u32 bit = engine ?
4162 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4156 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4163 4157
@@ -4260,13 +4254,18 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4260 return val != 0; 4254 return val != 0;
4261} 4255}
4262 4256
4257static void _print_parity(struct bnx2x *bp, u32 reg)
4258{
4259 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4260}
4261
4263static void _print_next_block(int idx, const char *blk) 4262static void _print_next_block(int idx, const char *blk)
4264{ 4263{
4265 pr_cont("%s%s", idx ? ", " : "", blk); 4264 pr_cont("%s%s", idx ? ", " : "", blk);
4266} 4265}
4267 4266
4268static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, 4267static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4269 bool print) 4268 int par_num, bool print)
4270{ 4269{
4271 int i = 0; 4270 int i = 0;
4272 u32 cur_bit = 0; 4271 u32 cur_bit = 0;
@@ -4275,33 +4274,54 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
4275 if (sig & cur_bit) { 4274 if (sig & cur_bit) {
4276 switch (cur_bit) { 4275 switch (cur_bit) {
4277 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4276 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4278 if (print) 4277 if (print) {
4279 _print_next_block(par_num++, "BRB"); 4278 _print_next_block(par_num++, "BRB");
4279 _print_parity(bp,
4280 BRB1_REG_BRB1_PRTY_STS);
4281 }
4280 break; 4282 break;
4281 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4283 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4282 if (print) 4284 if (print) {
4283 _print_next_block(par_num++, "PARSER"); 4285 _print_next_block(par_num++, "PARSER");
4286 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4287 }
4284 break; 4288 break;
4285 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4289 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4286 if (print) 4290 if (print) {
4287 _print_next_block(par_num++, "TSDM"); 4291 _print_next_block(par_num++, "TSDM");
4292 _print_parity(bp,
4293 TSDM_REG_TSDM_PRTY_STS);
4294 }
4288 break; 4295 break;
4289 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4296 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4290 if (print) 4297 if (print) {
4291 _print_next_block(par_num++, 4298 _print_next_block(par_num++,
4292 "SEARCHER"); 4299 "SEARCHER");
4300 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4301 }
4293 break; 4302 break;
4294 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4303 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4295 if (print) 4304 if (print) {
4296 _print_next_block(par_num++, "TCM"); 4305 _print_next_block(par_num++, "TCM");
4306 _print_parity(bp,
4307 TCM_REG_TCM_PRTY_STS);
4308 }
4297 break; 4309 break;
4298 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4310 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4299 if (print) 4311 if (print) {
4300 _print_next_block(par_num++, "TSEMI"); 4312 _print_next_block(par_num++, "TSEMI");
4313 _print_parity(bp,
4314 TSEM_REG_TSEM_PRTY_STS_0);
4315 _print_parity(bp,
4316 TSEM_REG_TSEM_PRTY_STS_1);
4317 }
4301 break; 4318 break;
4302 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4319 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4303 if (print) 4320 if (print) {
4304 _print_next_block(par_num++, "XPB"); 4321 _print_next_block(par_num++, "XPB");
4322 _print_parity(bp, GRCBASE_XPB +
4323 PB_REG_PB_PRTY_STS);
4324 }
4305 break; 4325 break;
4306 } 4326 }
4307 4327
@@ -4313,8 +4333,9 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
4313 return par_num; 4333 return par_num;
4314} 4334}
4315 4335
4316static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, 4336static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4317 bool *global, bool print) 4337 int par_num, bool *global,
4338 bool print)
4318{ 4339{
4319 int i = 0; 4340 int i = 0;
4320 u32 cur_bit = 0; 4341 u32 cur_bit = 0;
@@ -4323,37 +4344,66 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4323 if (sig & cur_bit) { 4344 if (sig & cur_bit) {
4324 switch (cur_bit) { 4345 switch (cur_bit) {
4325 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4346 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4326 if (print) 4347 if (print) {
4327 _print_next_block(par_num++, "PBF"); 4348 _print_next_block(par_num++, "PBF");
4349 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4350 }
4328 break; 4351 break;
4329 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4352 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4330 if (print) 4353 if (print) {
4331 _print_next_block(par_num++, "QM"); 4354 _print_next_block(par_num++, "QM");
4355 _print_parity(bp, QM_REG_QM_PRTY_STS);
4356 }
4332 break; 4357 break;
4333 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4358 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4334 if (print) 4359 if (print) {
4335 _print_next_block(par_num++, "TM"); 4360 _print_next_block(par_num++, "TM");
4361 _print_parity(bp, TM_REG_TM_PRTY_STS);
4362 }
4336 break; 4363 break;
4337 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4364 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4338 if (print) 4365 if (print) {
4339 _print_next_block(par_num++, "XSDM"); 4366 _print_next_block(par_num++, "XSDM");
4367 _print_parity(bp,
4368 XSDM_REG_XSDM_PRTY_STS);
4369 }
4340 break; 4370 break;
4341 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4371 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4342 if (print) 4372 if (print) {
4343 _print_next_block(par_num++, "XCM"); 4373 _print_next_block(par_num++, "XCM");
4374 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4375 }
4344 break; 4376 break;
4345 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4377 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4346 if (print) 4378 if (print) {
4347 _print_next_block(par_num++, "XSEMI"); 4379 _print_next_block(par_num++, "XSEMI");
4380 _print_parity(bp,
4381 XSEM_REG_XSEM_PRTY_STS_0);
4382 _print_parity(bp,
4383 XSEM_REG_XSEM_PRTY_STS_1);
4384 }
4348 break; 4385 break;
4349 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4386 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4350 if (print) 4387 if (print) {
4351 _print_next_block(par_num++, 4388 _print_next_block(par_num++,
4352 "DOORBELLQ"); 4389 "DOORBELLQ");
4390 _print_parity(bp,
4391 DORQ_REG_DORQ_PRTY_STS);
4392 }
4353 break; 4393 break;
4354 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4394 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4355 if (print) 4395 if (print) {
4356 _print_next_block(par_num++, "NIG"); 4396 _print_next_block(par_num++, "NIG");
4397 if (CHIP_IS_E1x(bp)) {
4398 _print_parity(bp,
4399 NIG_REG_NIG_PRTY_STS);
4400 } else {
4401 _print_parity(bp,
4402 NIG_REG_NIG_PRTY_STS_0);
4403 _print_parity(bp,
4404 NIG_REG_NIG_PRTY_STS_1);
4405 }
4406 }
4357 break; 4407 break;
4358 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4408 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4359 if (print) 4409 if (print)
@@ -4362,32 +4412,52 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4362 *global = true; 4412 *global = true;
4363 break; 4413 break;
4364 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4414 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4365 if (print) 4415 if (print) {
4366 _print_next_block(par_num++, "DEBUG"); 4416 _print_next_block(par_num++, "DEBUG");
4417 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4418 }
4367 break; 4419 break;
4368 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4420 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4369 if (print) 4421 if (print) {
4370 _print_next_block(par_num++, "USDM"); 4422 _print_next_block(par_num++, "USDM");
4423 _print_parity(bp,
4424 USDM_REG_USDM_PRTY_STS);
4425 }
4371 break; 4426 break;
4372 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4427 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4373 if (print) 4428 if (print) {
4374 _print_next_block(par_num++, "UCM"); 4429 _print_next_block(par_num++, "UCM");
4430 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4431 }
4375 break; 4432 break;
4376 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4433 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4377 if (print) 4434 if (print) {
4378 _print_next_block(par_num++, "USEMI"); 4435 _print_next_block(par_num++, "USEMI");
4436 _print_parity(bp,
4437 USEM_REG_USEM_PRTY_STS_0);
4438 _print_parity(bp,
4439 USEM_REG_USEM_PRTY_STS_1);
4440 }
4379 break; 4441 break;
4380 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4442 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4381 if (print) 4443 if (print) {
4382 _print_next_block(par_num++, "UPB"); 4444 _print_next_block(par_num++, "UPB");
4445 _print_parity(bp, GRCBASE_UPB +
4446 PB_REG_PB_PRTY_STS);
4447 }
4383 break; 4448 break;
4384 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4449 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4385 if (print) 4450 if (print) {
4386 _print_next_block(par_num++, "CSDM"); 4451 _print_next_block(par_num++, "CSDM");
4452 _print_parity(bp,
4453 CSDM_REG_CSDM_PRTY_STS);
4454 }
4387 break; 4455 break;
4388 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4456 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4389 if (print) 4457 if (print) {
4390 _print_next_block(par_num++, "CCM"); 4458 _print_next_block(par_num++, "CCM");
4459 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4460 }
4391 break; 4461 break;
4392 } 4462 }
4393 4463
@@ -4399,8 +4469,8 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4399 return par_num; 4469 return par_num;
4400} 4470}
4401 4471
4402static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, 4472static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4403 bool print) 4473 int par_num, bool print)
4404{ 4474{
4405 int i = 0; 4475 int i = 0;
4406 u32 cur_bit = 0; 4476 u32 cur_bit = 0;
@@ -4409,12 +4479,23 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4409 if (sig & cur_bit) { 4479 if (sig & cur_bit) {
4410 switch (cur_bit) { 4480 switch (cur_bit) {
4411 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4481 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4412 if (print) 4482 if (print) {
4413 _print_next_block(par_num++, "CSEMI"); 4483 _print_next_block(par_num++, "CSEMI");
4484 _print_parity(bp,
4485 CSEM_REG_CSEM_PRTY_STS_0);
4486 _print_parity(bp,
4487 CSEM_REG_CSEM_PRTY_STS_1);
4488 }
4414 break; 4489 break;
4415 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4490 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4416 if (print) 4491 if (print) {
4417 _print_next_block(par_num++, "PXP"); 4492 _print_next_block(par_num++, "PXP");
4493 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4494 _print_parity(bp,
4495 PXP2_REG_PXP2_PRTY_STS_0);
4496 _print_parity(bp,
4497 PXP2_REG_PXP2_PRTY_STS_1);
4498 }
4418 break; 4499 break;
4419 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4500 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4420 if (print) 4501 if (print)
@@ -4422,24 +4503,42 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4422 "PXPPCICLOCKCLIENT"); 4503 "PXPPCICLOCKCLIENT");
4423 break; 4504 break;
4424 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4505 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4425 if (print) 4506 if (print) {
4426 _print_next_block(par_num++, "CFC"); 4507 _print_next_block(par_num++, "CFC");
4508 _print_parity(bp,
4509 CFC_REG_CFC_PRTY_STS);
4510 }
4427 break; 4511 break;
4428 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4512 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4429 if (print) 4513 if (print) {
4430 _print_next_block(par_num++, "CDU"); 4514 _print_next_block(par_num++, "CDU");
4515 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4516 }
4431 break; 4517 break;
4432 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4518 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4433 if (print) 4519 if (print) {
4434 _print_next_block(par_num++, "DMAE"); 4520 _print_next_block(par_num++, "DMAE");
4521 _print_parity(bp,
4522 DMAE_REG_DMAE_PRTY_STS);
4523 }
4435 break; 4524 break;
4436 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4525 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4437 if (print) 4526 if (print) {
4438 _print_next_block(par_num++, "IGU"); 4527 _print_next_block(par_num++, "IGU");
4528 if (CHIP_IS_E1x(bp))
4529 _print_parity(bp,
4530 HC_REG_HC_PRTY_STS);
4531 else
4532 _print_parity(bp,
4533 IGU_REG_IGU_PRTY_STS);
4534 }
4439 break; 4535 break;
4440 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4536 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4441 if (print) 4537 if (print) {
4442 _print_next_block(par_num++, "MISC"); 4538 _print_next_block(par_num++, "MISC");
4539 _print_parity(bp,
4540 MISC_REG_MISC_PRTY_STS);
4541 }
4443 break; 4542 break;
4444 } 4543 }
4445 4544
@@ -4493,8 +4592,8 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4493 return par_num; 4592 return par_num;
4494} 4593}
4495 4594
4496static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, 4595static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4497 bool print) 4596 int par_num, bool print)
4498{ 4597{
4499 int i = 0; 4598 int i = 0;
4500 u32 cur_bit = 0; 4599 u32 cur_bit = 0;
@@ -4503,12 +4602,18 @@ static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4503 if (sig & cur_bit) { 4602 if (sig & cur_bit) {
4504 switch (cur_bit) { 4603 switch (cur_bit) {
4505 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4604 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4506 if (print) 4605 if (print) {
4507 _print_next_block(par_num++, "PGLUE_B"); 4606 _print_next_block(par_num++, "PGLUE_B");
4607 _print_parity(bp,
4608 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4609 }
4508 break; 4610 break;
4509 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4611 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4510 if (print) 4612 if (print) {
4511 _print_next_block(par_num++, "ATC"); 4613 _print_next_block(par_num++, "ATC");
4614 _print_parity(bp,
4615 ATC_REG_ATC_PRTY_STS);
4616 }
4512 break; 4617 break;
4513 } 4618 }
4514 4619
@@ -4539,15 +4644,15 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4539 if (print) 4644 if (print)
4540 netdev_err(bp->dev, 4645 netdev_err(bp->dev,
4541 "Parity errors detected in blocks: "); 4646 "Parity errors detected in blocks: ");
4542 par_num = bnx2x_check_blocks_with_parity0( 4647 par_num = bnx2x_check_blocks_with_parity0(bp,
4543 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4648 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
4544 par_num = bnx2x_check_blocks_with_parity1( 4649 par_num = bnx2x_check_blocks_with_parity1(bp,
4545 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4650 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
4546 par_num = bnx2x_check_blocks_with_parity2( 4651 par_num = bnx2x_check_blocks_with_parity2(bp,
4547 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4652 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
4548 par_num = bnx2x_check_blocks_with_parity3( 4653 par_num = bnx2x_check_blocks_with_parity3(
4549 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4654 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4550 par_num = bnx2x_check_blocks_with_parity4( 4655 par_num = bnx2x_check_blocks_with_parity4(bp,
4551 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4656 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4552 4657
4553 if (print) 4658 if (print)
@@ -4591,7 +4696,6 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4591 return bnx2x_parity_attn(bp, global, print, attn.sig); 4696 return bnx2x_parity_attn(bp, global, print, attn.sig);
4592} 4697}
4593 4698
4594
4595static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4699static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4596{ 4700{
4597 u32 val; 4701 u32 val;
@@ -4643,7 +4747,6 @@ static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4643 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4747 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4644 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 4748 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4645 } 4749 }
4646
4647} 4750}
4648 4751
4649static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4752static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
@@ -4878,7 +4981,6 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4878 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 4981 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
4879 else if (rc > 0) 4982 else if (rc > 0)
4880 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 4983 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
4881
4882} 4984}
4883 4985
4884static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4986static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
@@ -5009,7 +5111,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5009 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 5111 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5010 5112
5011 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 5113 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5012 * when we get the the next-page we nned to adjust so the loop 5114 * when we get the next-page we need to adjust so the loop
5013 * condition below will be met. The next element is the size of a 5115 * condition below will be met. The next element is the size of a
5014 * regular element and hence incrementing by 1 5116 * regular element and hence incrementing by 1
5015 */ 5117 */
@@ -5075,8 +5177,6 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5075 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 5177 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5076 break; 5178 break;
5077 5179
5078
5079
5080 goto next_spqe; 5180 goto next_spqe;
5081 5181
5082 case EVENT_RING_OPCODE_STOP_TRAFFIC: 5182 case EVENT_RING_OPCODE_STOP_TRAFFIC:
@@ -5218,7 +5318,7 @@ static void bnx2x_sp_task(struct work_struct *work)
5218 5318
5219 DP(BNX2X_MSG_SP, "sp task invoked\n"); 5319 DP(BNX2X_MSG_SP, "sp task invoked\n");
5220 5320
5221 /* make sure the atomic interupt_occurred has been written */ 5321 /* make sure the atomic interrupt_occurred has been written */
5222 smp_rmb(); 5322 smp_rmb();
5223 if (atomic_read(&bp->interrupt_occurred)) { 5323 if (atomic_read(&bp->interrupt_occurred)) {
5224 5324
@@ -5265,7 +5365,6 @@ static void bnx2x_sp_task(struct work_struct *work)
5265 /* ack status block only if something was actually handled */ 5365 /* ack status block only if something was actually handled */
5266 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5366 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5267 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5367 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5268
5269 } 5368 }
5270 5369
5271 /* must be called after the EQ processing (since eq leads to sriov 5370 /* must be called after the EQ processing (since eq leads to sriov
@@ -5316,7 +5415,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5316 5415
5317/* end of slow path */ 5416/* end of slow path */
5318 5417
5319
5320void bnx2x_drv_pulse(struct bnx2x *bp) 5418void bnx2x_drv_pulse(struct bnx2x *bp)
5321{ 5419{
5322 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5420 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
@@ -5360,7 +5458,7 @@ static void bnx2x_timer(unsigned long data)
5360 5458
5361 /* sample pf vf bulletin board for new posts from pf */ 5459 /* sample pf vf bulletin board for new posts from pf */
5362 if (IS_VF(bp)) 5460 if (IS_VF(bp))
5363 bnx2x_sample_bulletin(bp); 5461 bnx2x_timer_sriov(bp);
5364 5462
5365 mod_timer(&bp->timer, jiffies + bp->current_interval); 5463 mod_timer(&bp->timer, jiffies + bp->current_interval);
5366} 5464}
@@ -5382,7 +5480,6 @@ static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5382 else 5480 else
5383 for (i = 0; i < len; i++) 5481 for (i = 0; i < len; i++)
5384 REG_WR8(bp, addr + i, fill); 5482 REG_WR8(bp, addr + i, fill);
5385
5386} 5483}
5387 5484
5388/* helper: writes FP SP data to FW - data_size in dwords */ 5485/* helper: writes FP SP data to FW - data_size in dwords */
@@ -5461,10 +5558,8 @@ static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5461 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5558 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5462 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5559 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5463 CSTORM_SP_SYNC_BLOCK_SIZE); 5560 CSTORM_SP_SYNC_BLOCK_SIZE);
5464
5465} 5561}
5466 5562
5467
5468static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5563static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5469 int igu_sb_id, int igu_seg_id) 5564 int igu_sb_id, int igu_seg_id)
5470{ 5565{
@@ -5474,7 +5569,6 @@ static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5474 hc_sm->time_to_expire = 0xFFFFFFFF; 5569 hc_sm->time_to_expire = 0xFFFFFFFF;
5475} 5570}
5476 5571
5477
5478/* allocates state machine ids. */ 5572/* allocates state machine ids. */
5479static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5573static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5480{ 5574{
@@ -5700,7 +5794,7 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
5700 bp->eq_cons = 0; 5794 bp->eq_cons = 0;
5701 bp->eq_prod = NUM_EQ_DESC; 5795 bp->eq_prod = NUM_EQ_DESC;
5702 bp->eq_cons_sb = BNX2X_EQ_INDEX; 5796 bp->eq_cons_sb = BNX2X_EQ_INDEX;
5703 /* we want a warning message before it gets rought... */ 5797 /* we want a warning message before it gets wrought... */
5704 atomic_set(&bp->eq_spq_left, 5798 atomic_set(&bp->eq_spq_left,
5705 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 5799 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5706} 5800}
@@ -5784,7 +5878,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
5784 5878
5785 break; 5879 break;
5786 case BNX2X_RX_MODE_PROMISC: 5880 case BNX2X_RX_MODE_PROMISC:
5787 /* According to deffinition of SI mode, iface in promisc mode 5881 /* According to definition of SI mode, iface in promisc mode
5788 * should receive matched and unmatched (in resolution of port) 5882 * should receive matched and unmatched (in resolution of port)
5789 * unicast packets. 5883 * unicast packets.
5790 */ 5884 */
@@ -5927,7 +6021,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5927 /* init shortcut */ 6021 /* init shortcut */
5928 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 6022 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
5929 6023
5930 /* Setup SB indicies */ 6024 /* Setup SB indices */
5931 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 6025 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5932 6026
5933 /* Configure Queue State object */ 6027 /* Configure Queue State object */
@@ -5983,6 +6077,8 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5983 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6077 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5984 } 6078 }
5985 6079
6080 *txdata->tx_cons_sb = cpu_to_le16(0);
6081
5986 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 6082 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
5987 txdata->tx_db.data.zero_fill1 = 0; 6083 txdata->tx_db.data.zero_fill1 = 0;
5988 txdata->tx_db.data.prod = 0; 6084 txdata->tx_db.data.prod = 0;
@@ -6001,6 +6097,7 @@ static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6001 for_each_tx_queue_cnic(bp, i) 6097 for_each_tx_queue_cnic(bp, i)
6002 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); 6098 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6003} 6099}
6100
6004static void bnx2x_init_tx_rings(struct bnx2x *bp) 6101static void bnx2x_init_tx_rings(struct bnx2x *bp)
6005{ 6102{
6006 int i; 6103 int i;
@@ -6043,11 +6140,6 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6043 bnx2x_init_rx_rings(bp); 6140 bnx2x_init_rx_rings(bp);
6044 bnx2x_init_tx_rings(bp); 6141 bnx2x_init_tx_rings(bp);
6045 6142
6046 if (IS_VF(bp)) {
6047 bnx2x_memset_stats(bp);
6048 return;
6049 }
6050
6051 if (IS_PF(bp)) { 6143 if (IS_PF(bp)) {
6052 /* Initialize MOD_ABS interrupts */ 6144 /* Initialize MOD_ABS interrupts */
6053 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 6145 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
@@ -6058,6 +6150,8 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6058 bnx2x_init_def_sb(bp); 6150 bnx2x_init_def_sb(bp);
6059 bnx2x_update_dsb_idx(bp); 6151 bnx2x_update_dsb_idx(bp);
6060 bnx2x_init_sp_ring(bp); 6152 bnx2x_init_sp_ring(bp);
6153 } else {
6154 bnx2x_memset_stats(bp);
6061 } 6155 }
6062} 6156}
6063 6157
@@ -6236,7 +6330,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
6236 if (val == 0x10) 6330 if (val == 0x10)
6237 break; 6331 break;
6238 6332
6239 msleep(10); 6333 usleep_range(10000, 20000);
6240 count--; 6334 count--;
6241 } 6335 }
6242 if (val != 0x10) { 6336 if (val != 0x10) {
@@ -6251,7 +6345,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
6251 if (val == 1) 6345 if (val == 1)
6252 break; 6346 break;
6253 6347
6254 msleep(10); 6348 usleep_range(10000, 20000);
6255 count--; 6349 count--;
6256 } 6350 }
6257 if (val != 0x1) { 6351 if (val != 0x1) {
@@ -6292,7 +6386,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
6292 if (val == 0xb0) 6386 if (val == 0xb0)
6293 break; 6387 break;
6294 6388
6295 msleep(10); 6389 usleep_range(10000, 20000);
6296 count--; 6390 count--;
6297 } 6391 }
6298 if (val != 0xb0) { 6392 if (val != 0xb0) {
@@ -6681,7 +6775,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6681 * stay set) 6775 * stay set)
6682 * f. If this is VNIC 3 of a port then also init 6776 * f. If this is VNIC 3 of a port then also init
6683 * first_timers_ilt_entry to zero and last_timers_ilt_entry 6777 * first_timers_ilt_entry to zero and last_timers_ilt_entry
6684 * to the last enrty in the ILT. 6778 * to the last entry in the ILT.
6685 * 6779 *
6686 * Notes: 6780 * Notes:
6687 * Currently the PF error in the PGLC is non recoverable. 6781 * Currently the PF error in the PGLC is non recoverable.
@@ -6772,7 +6866,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6772 6866
6773 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 6867 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
6774 6868
6775
6776 /* QM queues pointers table */ 6869 /* QM queues pointers table */
6777 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 6870 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6778 6871
@@ -7013,7 +7106,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7013 u32 low, high; 7106 u32 low, high;
7014 u32 val; 7107 u32 val;
7015 7108
7016
7017 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7109 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7018 7110
7019 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 7111 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
@@ -7078,7 +7170,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7078 BRB1_REG_MAC_GUARANTIED_1 : 7170 BRB1_REG_MAC_GUARANTIED_1 :
7079 BRB1_REG_MAC_GUARANTIED_0), 40); 7171 BRB1_REG_MAC_GUARANTIED_0), 40);
7080 7172
7081
7082 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7173 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7083 if (CHIP_IS_E3B0(bp)) { 7174 if (CHIP_IS_E3B0(bp)) {
7084 if (IS_MF_AFEX(bp)) { 7175 if (IS_MF_AFEX(bp)) {
@@ -7150,8 +7241,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7150 7241
7151 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7242 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7152 /* init aeu_mask_attn_func_0/1: 7243 /* init aeu_mask_attn_func_0/1:
7153 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 7244 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
7154 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 7245 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
7155 * bits 4-7 are used for "per vn group attention" */ 7246 * bits 4-7 are used for "per vn group attention" */
7156 val = IS_MF(bp) ? 0xF7 : 0x7; 7247 val = IS_MF(bp) ? 0xF7 : 0x7;
7157 /* Enable DCBX attention for all but E1 */ 7248 /* Enable DCBX attention for all but E1 */
@@ -7275,7 +7366,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7275 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7366 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7276 msleep(20); 7367 msleep(20);
7277 7368
7278
7279 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7369 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7280 DP(NETIF_MSG_HW, 7370 DP(NETIF_MSG_HW,
7281 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7371 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
@@ -7295,7 +7385,6 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7295 bnx2x_ilt_wr(bp, i, 0); 7385 bnx2x_ilt_wr(bp, i, 0);
7296} 7386}
7297 7387
7298
7299static void bnx2x_init_searcher(struct bnx2x *bp) 7388static void bnx2x_init_searcher(struct bnx2x *bp)
7300{ 7389{
7301 int port = BP_PORT(bp); 7390 int port = BP_PORT(bp);
@@ -7331,7 +7420,6 @@ static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7331 int rc, i, port = BP_PORT(bp); 7420 int rc, i, port = BP_PORT(bp);
7332 int vlan_en = 0, mac_en[NUM_MACS]; 7421 int vlan_en = 0, mac_en[NUM_MACS];
7333 7422
7334
7335 /* Close input from network */ 7423 /* Close input from network */
7336 if (bp->mf_mode == SINGLE_FUNCTION) { 7424 if (bp->mf_mode == SINGLE_FUNCTION) {
7337 bnx2x_set_rx_filter(&bp->link_params, 0); 7425 bnx2x_set_rx_filter(&bp->link_params, 0);
@@ -7406,7 +7494,7 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7406 bnx2x_ilt_init_op_cnic(bp, INITOP_SET); 7494 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7407 7495
7408 if (CONFIGURE_NIC_MODE(bp)) { 7496 if (CONFIGURE_NIC_MODE(bp)) {
7409 /* Configrue searcher as part of function hw init */ 7497 /* Configure searcher as part of function hw init */
7410 bnx2x_init_searcher(bp); 7498 bnx2x_init_searcher(bp);
7411 7499
7412 /* Reset NIC mode */ 7500 /* Reset NIC mode */
@@ -7479,8 +7567,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7479 } else { 7567 } else {
7480 /* Set NIC mode */ 7568 /* Set NIC mode */
7481 REG_WR(bp, PRS_REG_NIC_MODE, 1); 7569 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7482 DP(NETIF_MSG_IFUP, "NIC MODE configrued\n"); 7570 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7483
7484 } 7571 }
7485 7572
7486 if (!CHIP_IS_E1x(bp)) { 7573 if (!CHIP_IS_E1x(bp)) {
@@ -7677,7 +7764,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7677 } 7764 }
7678 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 7765 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7679 7766
7680 /* !!! these should become driver const once 7767 /* !!! These should become driver const once
7681 rf-tool supports split-68 const */ 7768 rf-tool supports split-68 const */
7682 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 7769 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7683 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 7770 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
@@ -7734,7 +7821,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7734 return 0; 7821 return 0;
7735} 7822}
7736 7823
7737
7738void bnx2x_free_mem_cnic(struct bnx2x *bp) 7824void bnx2x_free_mem_cnic(struct bnx2x *bp)
7739{ 7825{
7740 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); 7826 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
@@ -7779,7 +7865,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
7779 bnx2x_iov_free_mem(bp); 7865 bnx2x_iov_free_mem(bp);
7780} 7866}
7781 7867
7782
7783int bnx2x_alloc_mem_cnic(struct bnx2x *bp) 7868int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7784{ 7869{
7785 if (!CHIP_IS_E1x(bp)) 7870 if (!CHIP_IS_E1x(bp))
@@ -7793,7 +7878,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7793 host_hc_status_block_e1x)); 7878 host_hc_status_block_e1x));
7794 7879
7795 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) 7880 if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
7796 /* allocate searcher T2 table, as it wan't allocated before */ 7881 /* allocate searcher T2 table, as it wasn't allocated before */
7797 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7882 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7798 7883
7799 /* write address to which L5 should insert its values */ 7884 /* write address to which L5 should insert its values */
@@ -8068,7 +8153,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
8068 ilt_client->page_size, 8153 ilt_client->page_size,
8069 ilt_client->flags, 8154 ilt_client->flags,
8070 ilog2(ilt_client->page_size >> 12)); 8155 ilog2(ilt_client->page_size >> 12));
8071
8072 } 8156 }
8073 8157
8074 if (CNIC_SUPPORT(bp)) { 8158 if (CNIC_SUPPORT(bp)) {
@@ -8124,7 +8208,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
8124static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 8208static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8125 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 8209 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8126{ 8210{
8127
8128 u8 cos; 8211 u8 cos;
8129 int cxt_index, cxt_offset; 8212 int cxt_index, cxt_offset;
8130 8213
@@ -8133,7 +8216,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8133 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 8216 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8134 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 8217 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8135 8218
8136 /* If HC is supporterd, enable host coalescing in the transition 8219 /* If HC is supported, enable host coalescing in the transition
8137 * to INIT state. 8220 * to INIT state.
8138 */ 8221 */
8139 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 8222 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
@@ -8205,7 +8288,6 @@ static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8205 return bnx2x_queue_state_change(bp, q_params); 8288 return bnx2x_queue_state_change(bp, q_params);
8206} 8289}
8207 8290
8208
8209/** 8291/**
8210 * bnx2x_setup_queue - setup queue 8292 * bnx2x_setup_queue - setup queue
8211 * 8293 *
@@ -8254,7 +8336,6 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8254 8336
8255 DP(NETIF_MSG_IFUP, "init complete\n"); 8337 DP(NETIF_MSG_IFUP, "init complete\n");
8256 8338
8257
8258 /* Now move the Queue to the SETUP state... */ 8339 /* Now move the Queue to the SETUP state... */
8259 memset(setup_params, 0, sizeof(*setup_params)); 8340 memset(setup_params, 0, sizeof(*setup_params));
8260 8341
@@ -8315,7 +8396,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8315 /* We want to wait for completion in this context */ 8396 /* We want to wait for completion in this context */
8316 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8397 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8317 8398
8318
8319 /* close tx-only connections */ 8399 /* close tx-only connections */
8320 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8400 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8321 tx_index < fp->max_cos; 8401 tx_index < fp->max_cos;
@@ -8369,7 +8449,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8369 return bnx2x_queue_state_change(bp, &q_params); 8449 return bnx2x_queue_state_change(bp, &q_params);
8370} 8450}
8371 8451
8372
8373static void bnx2x_reset_func(struct bnx2x *bp) 8452static void bnx2x_reset_func(struct bnx2x *bp)
8374{ 8453{
8375 int port = BP_PORT(bp); 8454 int port = BP_PORT(bp);
@@ -8422,7 +8501,7 @@ static void bnx2x_reset_func(struct bnx2x *bp)
8422 * scan to complete 8501 * scan to complete
8423 */ 8502 */
8424 for (i = 0; i < 200; i++) { 8503 for (i = 0; i < 200; i++) {
8425 msleep(10); 8504 usleep_range(10000, 20000);
8426 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8505 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8427 break; 8506 break;
8428 } 8507 }
@@ -8623,14 +8702,14 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
8623 8702
8624 /* 8703 /*
8625 * (assumption: No Attention from MCP at this stage) 8704 * (assumption: No Attention from MCP at this stage)
8626 * PMF probably in the middle of TXdisable/enable transaction 8705 * PMF probably in the middle of TX disable/enable transaction
8627 * 1. Sync IRS for default SB 8706 * 1. Sync IRS for default SB
8628 * 2. Sync SP queue - this guarantes us that attention handling started 8707 * 2. Sync SP queue - this guarantees us that attention handling started
8629 * 3. Wait, that TXdisable/enable transaction completes 8708 * 3. Wait, that TX disable/enable transaction completes
8630 * 8709 *
8631 * 1+2 guranty that if DCBx attention was scheduled it already changed 8710 * 1+2 guarantee that if DCBx attention was scheduled it already changed
8632 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy 8711 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
8633 * received complettion for the transaction the state is TX_STOPPED. 8712 * received completion for the transaction the state is TX_STOPPED.
8634 * State will return to STARTED after completion of TX_STOPPED-->STARTED 8713 * State will return to STARTED after completion of TX_STOPPED-->STARTED
8635 * transaction. 8714 * transaction.
8636 */ 8715 */
@@ -8660,7 +8739,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
8660 struct bnx2x_func_state_params func_params = {NULL}; 8739 struct bnx2x_func_state_params func_params = {NULL};
8661 8740
8662 DP(NETIF_MSG_IFDOWN, 8741 DP(NETIF_MSG_IFDOWN,
8663 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); 8742 "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
8664 8743
8665 func_params.f_obj = &bp->func_obj; 8744 func_params.f_obj = &bp->func_obj;
8666 __set_bit(RAMROD_DRV_CLR_ONLY, 8745 __set_bit(RAMROD_DRV_CLR_ONLY,
@@ -8740,7 +8819,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8740 8819
8741 bnx2x_iov_chip_cleanup(bp); 8820 bnx2x_iov_chip_cleanup(bp);
8742 8821
8743
8744 /* 8822 /*
8745 * Send the UNLOAD_REQUEST to the MCP. This will return if 8823 * Send the UNLOAD_REQUEST to the MCP. This will return if
8746 * this function should perform FUNC, PORT or COMMON HW 8824 * this function should perform FUNC, PORT or COMMON HW
@@ -8750,7 +8828,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8750 8828
8751 /* 8829 /*
8752 * (assumption: No Attention from MCP at this stage) 8830 * (assumption: No Attention from MCP at this stage)
8753 * PMF probably in the middle of TXdisable/enable transaction 8831 * PMF probably in the middle of TX disable/enable transaction
8754 */ 8832 */
8755 rc = bnx2x_func_wait_started(bp); 8833 rc = bnx2x_func_wait_started(bp);
8756 if (rc) { 8834 if (rc) {
@@ -8813,7 +8891,6 @@ unload_error:
8813 if (rc) 8891 if (rc)
8814 BNX2X_ERR("HW_RESET failed\n"); 8892 BNX2X_ERR("HW_RESET failed\n");
8815 8893
8816
8817 /* Report UNLOAD_DONE to MCP */ 8894 /* Report UNLOAD_DONE to MCP */
8818 bnx2x_send_unload_done(bp, keep_link); 8895 bnx2x_send_unload_done(bp, keep_link);
8819} 8896}
@@ -9179,7 +9256,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9179 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 9256 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9180 return -EAGAIN; 9257 return -EAGAIN;
9181 9258
9182
9183 /* TBD: Indicate that "process kill" is in progress to MCP */ 9259 /* TBD: Indicate that "process kill" is in progress to MCP */
9184 9260
9185 /* Clear "unprepared" bit */ 9261 /* Clear "unprepared" bit */
@@ -9367,7 +9443,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
9367 * the first leader that performs a 9443 * the first leader that performs a
9368 * leader_reset() reset the global blocks in 9444 * leader_reset() reset the global blocks in
9369 * order to clear global attentions. Otherwise 9445 * order to clear global attentions. Otherwise
9370 * the the gates will remain closed for that 9446 * the gates will remain closed for that
9371 * engine. 9447 * engine.
9372 */ 9448 */
9373 if (load_status || 9449 if (load_status ||
@@ -9480,14 +9556,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
9480 return; 9556 return;
9481 } 9557 }
9482 9558
9483 /* if stop on error is defined no recovery flows should be executed */ 9559 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9484#ifdef BNX2X_STOP_ON_ERROR 9560#ifdef BNX2X_STOP_ON_ERROR
9485 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9561 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9486 "you will need to reboot when done\n"); 9562 "you will need to reboot when done\n");
9487 goto sp_rtnl_not_reset; 9563 goto sp_rtnl_not_reset;
9488#endif 9564#endif
9489
9490 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9491 /* 9565 /*
9492 * Clear all pending SP commands as we are going to reset the 9566 * Clear all pending SP commands as we are going to reset the
9493 * function anyway. 9567 * function anyway.
@@ -9502,6 +9576,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
9502 } 9576 }
9503 9577
9504 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 9578 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
9579#ifdef BNX2X_STOP_ON_ERROR
9580 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9581 "you will need to reboot when done\n");
9582 goto sp_rtnl_not_reset;
9583#endif
9584
9505 /* 9585 /*
9506 * Clear all pending SP commands as we are going to reset the 9586 * Clear all pending SP commands as we are going to reset the
9507 * function anyway. 9587 * function anyway.
@@ -9540,6 +9620,13 @@ sp_rtnl_not_reset:
9540 "sending set mcast vf pf channel message from rtnl sp-task\n"); 9620 "sending set mcast vf pf channel message from rtnl sp-task\n");
9541 bnx2x_vfpf_set_mcast(bp->dev); 9621 bnx2x_vfpf_set_mcast(bp->dev);
9542 } 9622 }
9623 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
9624 &bp->sp_rtnl_state)){
9625 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
9626 bnx2x_tx_disable(bp);
9627 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
9628 }
9629 }
9543 9630
9544 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 9631 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
9545 &bp->sp_rtnl_state)) { 9632 &bp->sp_rtnl_state)) {
@@ -9647,7 +9734,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
9647 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 9734 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
9648 REG_WR(bp, vals->bmac_addr, wb_data[0]); 9735 REG_WR(bp, vals->bmac_addr, wb_data[0]);
9649 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); 9736 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
9650
9651 } 9737 }
9652 BNX2X_DEV_INFO("Disable emac Rx\n"); 9738 BNX2X_DEV_INFO("Disable emac Rx\n");
9653 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; 9739 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
@@ -9681,7 +9767,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
9681 9767
9682 if (mac_stopped) 9768 if (mac_stopped)
9683 msleep(20); 9769 msleep(20);
9684
9685} 9770}
9686 9771
9687#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9772#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
@@ -9780,6 +9865,21 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9780 return rc; 9865 return rc;
9781} 9866}
9782 9867
9868bool bnx2x_port_after_undi(struct bnx2x *bp)
9869{
9870 struct bnx2x_prev_path_list *entry;
9871 bool val;
9872
9873 down(&bnx2x_prev_sem);
9874
9875 entry = bnx2x_prev_path_get_entry(bp);
9876 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
9877
9878 up(&bnx2x_prev_sem);
9879
9880 return val;
9881}
9882
9783static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) 9883static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9784{ 9884{
9785 struct bnx2x_prev_path_list *tmp_list; 9885 struct bnx2x_prev_path_list *tmp_list;
@@ -9839,7 +9939,6 @@ static int bnx2x_do_flr(struct bnx2x *bp)
9839 u16 status; 9939 u16 status;
9840 struct pci_dev *dev = bp->pdev; 9940 struct pci_dev *dev = bp->pdev;
9841 9941
9842
9843 if (CHIP_IS_E1x(bp)) { 9942 if (CHIP_IS_E1x(bp)) {
9844 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); 9943 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
9845 return -EINVAL; 9944 return -EINVAL;
@@ -9986,7 +10085,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
9986 10085
9987 if (!timer_count) 10086 if (!timer_count)
9988 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 10087 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
9989
9990 } 10088 }
9991 10089
9992 /* No packets are in the pipeline, path is ready for reset */ 10090 /* No packets are in the pipeline, path is ready for reset */
@@ -10036,7 +10134,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10036{ 10134{
10037 int time_counter = 10; 10135 int time_counter = 10;
10038 u32 rc, fw, hw_lock_reg, hw_lock_val; 10136 u32 rc, fw, hw_lock_reg, hw_lock_val;
10039 struct bnx2x_prev_path_list *prev_list;
10040 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 10137 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10041 10138
10042 /* clear hw from errors which may have resulted from an interrupted 10139 /* clear hw from errors which may have resulted from an interrupted
@@ -10049,7 +10146,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10049 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 10146 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10050 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 10147 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10051 10148
10052 hw_lock_val = (REG_RD(bp, hw_lock_reg)); 10149 hw_lock_val = REG_RD(bp, hw_lock_reg);
10053 if (hw_lock_val) { 10150 if (hw_lock_val) {
10054 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 10151 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10055 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 10152 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
@@ -10064,7 +10161,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10064 10161
10065 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 10162 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10066 BNX2X_DEV_INFO("Release previously held alr\n"); 10163 BNX2X_DEV_INFO("Release previously held alr\n");
10067 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 10164 bnx2x_release_alr(bp);
10068 } 10165 }
10069 10166
10070 do { 10167 do {
@@ -10093,7 +10190,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10093 break; 10190 break;
10094 } 10191 }
10095 10192
10096 /* non-common reply from MCP night require looping */ 10193 /* non-common reply from MCP might require looping */
10097 rc = bnx2x_prev_unload_uncommon(bp); 10194 rc = bnx2x_prev_unload_uncommon(bp);
10098 if (rc != BNX2X_PREV_WAIT_NEEDED) 10195 if (rc != BNX2X_PREV_WAIT_NEEDED)
10099 break; 10196 break;
@@ -10107,8 +10204,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10107 } 10204 }
10108 10205
10109 /* Mark function if its port was used to boot from SAN */ 10206 /* Mark function if its port was used to boot from SAN */
10110 prev_list = bnx2x_prev_path_get_entry(bp); 10207 if (bnx2x_port_after_undi(bp))
10111 if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
10112 bp->link_params.feature_config_flags |= 10208 bp->link_params.feature_config_flags |=
10113 FEATURE_CONFIG_BOOT_FROM_SAN; 10209 FEATURE_CONFIG_BOOT_FROM_SAN;
10114 10210
@@ -10192,8 +10288,6 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10192 10288
10193 bnx2x_init_shmem(bp); 10289 bnx2x_init_shmem(bp);
10194 10290
10195
10196
10197 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 10291 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10198 MISC_REG_GENERIC_CR_1 : 10292 MISC_REG_GENERIC_CR_1 :
10199 MISC_REG_GENERIC_CR_0)); 10293 MISC_REG_GENERIC_CR_0));
@@ -10455,6 +10549,9 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
10455 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 10549 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
10456 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 10550 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
10457 10551
10552 if (!(bp->link_params.speed_cap_mask[idx] &
10553 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
10554 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
10458 } 10555 }
10459 10556
10460 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 10557 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
@@ -10765,7 +10862,6 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
10765 */ 10862 */
10766 if (!bp->cnic_eth_dev.max_iscsi_conn) 10863 if (!bp->cnic_eth_dev.max_iscsi_conn)
10767 bp->flags |= no_flags; 10864 bp->flags |= no_flags;
10768
10769} 10865}
10770 10866
10771static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 10867static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
@@ -10782,12 +10878,56 @@ static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10782 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10878 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10783 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 10879 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10784} 10880}
10881
10882static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
10883{
10884 u8 count = 0;
10885
10886 if (IS_MF(bp)) {
10887 u8 fid;
10888
10889 /* iterate over absolute function ids for this path: */
10890 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
10891 if (IS_MF_SD(bp)) {
10892 u32 cfg = MF_CFG_RD(bp,
10893 func_mf_config[fid].config);
10894
10895 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
10896 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
10897 FUNC_MF_CFG_PROTOCOL_FCOE))
10898 count++;
10899 } else {
10900 u32 cfg = MF_CFG_RD(bp,
10901 func_ext_config[fid].
10902 func_cfg);
10903
10904 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
10905 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
10906 count++;
10907 }
10908 }
10909 } else { /* SF */
10910 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
10911
10912 for (port = 0; port < port_cnt; port++) {
10913 u32 lic = SHMEM_RD(bp,
10914 drv_lic_key[port].max_fcoe_conn) ^
10915 FW_ENCODE_32BIT_PATTERN;
10916 if (lic)
10917 count++;
10918 }
10919 }
10920
10921 return count;
10922}
10923
10785static void bnx2x_get_fcoe_info(struct bnx2x *bp) 10924static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10786{ 10925{
10787 int port = BP_PORT(bp); 10926 int port = BP_PORT(bp);
10788 int func = BP_ABS_FUNC(bp); 10927 int func = BP_ABS_FUNC(bp);
10789 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10928 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10790 drv_lic_key[port].max_fcoe_conn); 10929 drv_lic_key[port].max_fcoe_conn);
10930 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
10791 10931
10792 if (!CNIC_SUPPORT(bp)) { 10932 if (!CNIC_SUPPORT(bp)) {
10793 bp->flags |= NO_FCOE_FLAG; 10933 bp->flags |= NO_FCOE_FLAG;
@@ -10801,9 +10941,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10801 10941
10802 /* Calculate the number of maximum allowed FCoE tasks */ 10942 /* Calculate the number of maximum allowed FCoE tasks */
10803 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; 10943 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
10804 if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp)) 10944
10805 bp->cnic_eth_dev.max_fcoe_exchanges /= 10945 /* check if FCoE resources must be shared between different functions */
10806 MAX_FCOE_FUNCS_PER_ENGINE; 10946 if (num_fcoe_func)
10947 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
10807 10948
10808 /* Read the WWN: */ 10949 /* Read the WWN: */
10809 if (!IS_MF(bp)) { 10950 if (!IS_MF(bp)) {
@@ -11031,7 +11172,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11031 } else { 11172 } else {
11032 bp->common.int_block = INT_BLOCK_IGU; 11173 bp->common.int_block = INT_BLOCK_IGU;
11033 11174
11034 /* do not allow device reset during IGU info preocessing */ 11175 /* do not allow device reset during IGU info processing */
11035 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11036 11177
11037 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 11178 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
@@ -11110,7 +11251,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11110 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 11251 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
11111 /* 11252 /*
11112 * get mf configuration: 11253 * get mf configuration:
11113 * 1. existence of MF configuration 11254 * 1. Existence of MF configuration
11114 * 2. MAC address must be legal (check only upper bytes) 11255 * 2. MAC address must be legal (check only upper bytes)
11115 * for Switch-Independent mode; 11256 * for Switch-Independent mode;
11116 * OVLAN must be legal for Switch-Dependent mode 11257 * OVLAN must be legal for Switch-Dependent mode
@@ -11384,7 +11525,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11384 mutex_init(&bp->fw_mb_mutex); 11525 mutex_init(&bp->fw_mb_mutex);
11385 spin_lock_init(&bp->stats_lock); 11526 spin_lock_init(&bp->stats_lock);
11386 11527
11387
11388 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11528 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11389 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11529 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11390 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 11530 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
@@ -11393,7 +11533,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11393 if (rc) 11533 if (rc)
11394 return rc; 11534 return rc;
11395 } else { 11535 } else {
11396 random_ether_addr(bp->dev->dev_addr); 11536 eth_zero_addr(bp->dev->dev_addr);
11397 } 11537 }
11398 11538
11399 bnx2x_set_modes_bitmap(bp); 11539 bnx2x_set_modes_bitmap(bp);
@@ -11417,7 +11557,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11417 bnx2x_prev_unload(bp); 11557 bnx2x_prev_unload(bp);
11418 } 11558 }
11419 11559
11420
11421 if (CHIP_REV_IS_FPGA(bp)) 11560 if (CHIP_REV_IS_FPGA(bp))
11422 dev_err(&bp->pdev->dev, "FPGA detected\n"); 11561 dev_err(&bp->pdev->dev, "FPGA detected\n");
11423 11562
@@ -11489,7 +11628,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11489 11628
11490 /* We need at least one default status block for slow-path events, 11629 /* We need at least one default status block for slow-path events,
11491 * second status block for the L2 queue, and a third status block for 11630 * second status block for the L2 queue, and a third status block for
11492 * CNIC if supproted. 11631 * CNIC if supported.
11493 */ 11632 */
11494 if (CNIC_SUPPORT(bp)) 11633 if (CNIC_SUPPORT(bp))
11495 bp->min_msix_vec_cnt = 3; 11634 bp->min_msix_vec_cnt = 3;
@@ -11497,10 +11636,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11497 bp->min_msix_vec_cnt = 2; 11636 bp->min_msix_vec_cnt = 2;
11498 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); 11637 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11499 11638
11639 bp->dump_preset_idx = 1;
11640
11500 return rc; 11641 return rc;
11501} 11642}
11502 11643
11503
11504/**************************************************************************** 11644/****************************************************************************
11505* General service functions 11645* General service functions
11506****************************************************************************/ 11646****************************************************************************/
@@ -11585,9 +11725,6 @@ static int bnx2x_close(struct net_device *dev)
11585 /* Unload the driver, release IRQs */ 11725 /* Unload the driver, release IRQs */
11586 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 11726 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11587 11727
11588 /* Power off */
11589 bnx2x_set_power_state(bp, PCI_D3hot);
11590
11591 return 0; 11728 return 0;
11592} 11729}
11593 11730
@@ -11852,6 +11989,10 @@ static int bnx2x_validate_addr(struct net_device *dev)
11852{ 11989{
11853 struct bnx2x *bp = netdev_priv(dev); 11990 struct bnx2x *bp = netdev_priv(dev);
11854 11991
11992 /* query the bulletin board for mac address configured by the PF */
11993 if (IS_VF(bp))
11994 bnx2x_sample_bulletin(bp);
11995
11855 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { 11996 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
11856 BNX2X_ERR("Non-valid Ethernet address\n"); 11997 BNX2X_ERR("Non-valid Ethernet address\n");
11857 return -EADDRNOTAVAIL; 11998 return -EADDRNOTAVAIL;
@@ -11878,12 +12019,16 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11878 .ndo_setup_tc = bnx2x_setup_tc, 12019 .ndo_setup_tc = bnx2x_setup_tc,
11879#ifdef CONFIG_BNX2X_SRIOV 12020#ifdef CONFIG_BNX2X_SRIOV
11880 .ndo_set_vf_mac = bnx2x_set_vf_mac, 12021 .ndo_set_vf_mac = bnx2x_set_vf_mac,
11881 .ndo_set_vf_vlan = bnx2x_set_vf_vlan, 12022 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
11882 .ndo_get_vf_config = bnx2x_get_vf_config, 12023 .ndo_get_vf_config = bnx2x_get_vf_config,
11883#endif 12024#endif
11884#ifdef NETDEV_FCOE_WWNN 12025#ifdef NETDEV_FCOE_WWNN
11885 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 12026 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11886#endif 12027#endif
12028
12029#ifdef CONFIG_NET_LL_RX_POLL
12030 .ndo_ll_poll = bnx2x_low_latency_recv,
12031#endif
11887}; 12032};
11888 12033
11889static int bnx2x_set_coherency_mask(struct bnx2x *bp) 12034static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -11959,7 +12104,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
11959 } 12104 }
11960 12105
11961 if (IS_PF(bp)) { 12106 if (IS_PF(bp)) {
11962 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 12107 bp->pm_cap = pdev->pm_cap;
11963 if (bp->pm_cap == 0) { 12108 if (bp->pm_cap == 0) {
11964 dev_err(&bp->pdev->dev, 12109 dev_err(&bp->pdev->dev,
11965 "Cannot find power management capability, aborting\n"); 12110 "Cannot find power management capability, aborting\n");
@@ -12008,8 +12153,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12008 } 12153 }
12009 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 12154 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
12010 12155
12011 bnx2x_set_power_state(bp, PCI_D0);
12012
12013 /* clean indirect addresses */ 12156 /* clean indirect addresses */
12014 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 12157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12015 PCICFG_VENDOR_ID_OFFSET); 12158 PCICFG_VENDOR_ID_OFFSET);
@@ -12094,15 +12237,26 @@ err_out:
12094 return rc; 12237 return rc;
12095} 12238}
12096 12239
12097static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed) 12240static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
12241 enum bnx2x_pci_bus_speed *speed)
12098{ 12242{
12099 u32 val = 0; 12243 u32 link_speed, val = 0;
12100 12244
12101 pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); 12245 pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
12102 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; 12246 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12103 12247
12104 /* return value of 1=2.5GHz 2=5GHz */ 12248 link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12105 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; 12249
12250 switch (link_speed) {
12251 case 3:
12252 *speed = BNX2X_PCI_LINK_SPEED_8000;
12253 break;
12254 case 2:
12255 *speed = BNX2X_PCI_LINK_SPEED_5000;
12256 break;
12257 default:
12258 *speed = BNX2X_PCI_LINK_SPEED_2500;
12259 }
12106} 12260}
12107 12261
12108static int bnx2x_check_firmware(struct bnx2x *bp) 12262static int bnx2x_check_firmware(struct bnx2x *bp)
@@ -12327,7 +12481,6 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
12327 bp->firmware = NULL; 12481 bp->firmware = NULL;
12328} 12482}
12329 12483
12330
12331static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 12484static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
12332 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 12485 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
12333 .init_hw_cmn = bnx2x_init_hw_common, 12486 .init_hw_cmn = bnx2x_init_hw_common,
@@ -12465,7 +12618,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12465{ 12618{
12466 struct net_device *dev = NULL; 12619 struct net_device *dev = NULL;
12467 struct bnx2x *bp; 12620 struct bnx2x *bp;
12468 int pcie_width, pcie_speed; 12621 int pcie_width;
12622 enum bnx2x_pci_bus_speed pcie_speed;
12469 int rc, max_non_def_sbs; 12623 int rc, max_non_def_sbs;
12470 int rx_count, tx_count, rss_count, doorbell_size; 12624 int rx_count, tx_count, rss_count, doorbell_size;
12471 int max_cos_est; 12625 int max_cos_est;
@@ -12605,7 +12759,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12605 } 12759 }
12606 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); 12760 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
12607 12761
12608
12609 if (!NO_FCOE(bp)) { 12762 if (!NO_FCOE(bp)) {
12610 /* Add storage MAC address */ 12763 /* Add storage MAC address */
12611 rtnl_lock(); 12764 rtnl_lock();
@@ -12617,15 +12770,15 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12617 BNX2X_DEV_INFO("got pcie width %d and speed %d\n", 12770 BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
12618 pcie_width, pcie_speed); 12771 pcie_width, pcie_speed);
12619 12772
12620 BNX2X_DEV_INFO( 12773 BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12621 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 12774 board_info[ent->driver_data].name,
12622 board_info[ent->driver_data].name, 12775 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12623 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 12776 pcie_width,
12624 pcie_width, 12777 pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
12625 ((!CHIP_IS_E2(bp) && pcie_speed == 2) || 12778 pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
12626 (CHIP_IS_E2(bp) && pcie_speed == 1)) ? 12779 pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
12627 "5GHz (Gen2)" : "2.5GHz", 12780 "Unknown",
12628 dev->base_addr, bp->pdev->irq, dev->dev_addr); 12781 dev->base_addr, bp->pdev->irq, dev->dev_addr);
12629 12782
12630 return 0; 12783 return 0;
12631 12784
@@ -12647,17 +12800,11 @@ init_one_exit:
12647 return rc; 12800 return rc;
12648} 12801}
12649 12802
12650static void bnx2x_remove_one(struct pci_dev *pdev) 12803static void __bnx2x_remove(struct pci_dev *pdev,
12804 struct net_device *dev,
12805 struct bnx2x *bp,
12806 bool remove_netdev)
12651{ 12807{
12652 struct net_device *dev = pci_get_drvdata(pdev);
12653 struct bnx2x *bp;
12654
12655 if (!dev) {
12656 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12657 return;
12658 }
12659 bp = netdev_priv(dev);
12660
12661 /* Delete storage MAC address */ 12808 /* Delete storage MAC address */
12662 if (!NO_FCOE(bp)) { 12809 if (!NO_FCOE(bp)) {
12663 rtnl_lock(); 12810 rtnl_lock();
@@ -12670,7 +12817,17 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
12670 bnx2x_dcbnl_update_applist(bp, true); 12817 bnx2x_dcbnl_update_applist(bp, true);
12671#endif 12818#endif
12672 12819
12673 unregister_netdev(dev); 12820 /* Close the interface - either directly or implicitly */
12821 if (remove_netdev) {
12822 unregister_netdev(dev);
12823 } else {
12824 rtnl_lock();
12825 if (netif_running(dev))
12826 bnx2x_close(dev);
12827 rtnl_unlock();
12828 }
12829
12830 bnx2x_iov_remove_one(bp);
12674 12831
12675 /* Power on: we can't let PCI layer write to us while we are in D3 */ 12832 /* Power on: we can't let PCI layer write to us while we are in D3 */
12676 if (IS_PF(bp)) 12833 if (IS_PF(bp))
@@ -12686,12 +12843,16 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
12686 /* Make sure RESET task is not scheduled before continuing */ 12843 /* Make sure RESET task is not scheduled before continuing */
12687 cancel_delayed_work_sync(&bp->sp_rtnl_task); 12844 cancel_delayed_work_sync(&bp->sp_rtnl_task);
12688 12845
12689 bnx2x_iov_remove_one(bp);
12690
12691 /* send message via vfpf channel to release the resources of this vf */ 12846 /* send message via vfpf channel to release the resources of this vf */
12692 if (IS_VF(bp)) 12847 if (IS_VF(bp))
12693 bnx2x_vfpf_release(bp); 12848 bnx2x_vfpf_release(bp);
12694 12849
12850 /* Assumes no further PCIe PM changes will occur */
12851 if (system_state == SYSTEM_POWER_OFF) {
12852 pci_wake_from_d3(pdev, bp->wol);
12853 pci_set_power_state(pdev, PCI_D3hot);
12854 }
12855
12695 if (bp->regview) 12856 if (bp->regview)
12696 iounmap(bp->regview); 12857 iounmap(bp->regview);
12697 12858
@@ -12706,7 +12867,8 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
12706 } 12867 }
12707 bnx2x_free_mem_bp(bp); 12868 bnx2x_free_mem_bp(bp);
12708 12869
12709 free_netdev(dev); 12870 if (remove_netdev)
12871 free_netdev(dev);
12710 12872
12711 if (atomic_read(&pdev->enable_cnt) == 1) 12873 if (atomic_read(&pdev->enable_cnt) == 1)
12712 pci_release_regions(pdev); 12874 pci_release_regions(pdev);
@@ -12715,6 +12877,20 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
12715 pci_set_drvdata(pdev, NULL); 12877 pci_set_drvdata(pdev, NULL);
12716} 12878}
12717 12879
12880static void bnx2x_remove_one(struct pci_dev *pdev)
12881{
12882 struct net_device *dev = pci_get_drvdata(pdev);
12883 struct bnx2x *bp;
12884
12885 if (!dev) {
12886 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12887 return;
12888 }
12889 bp = netdev_priv(dev);
12890
12891 __bnx2x_remove(pdev, dev, bp, true);
12892}
12893
12718static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 12894static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12719{ 12895{
12720 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 12896 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
@@ -12747,19 +12923,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12747 return 0; 12923 return 0;
12748} 12924}
12749 12925
12750static void bnx2x_eeh_recover(struct bnx2x *bp)
12751{
12752 u32 val;
12753
12754 mutex_init(&bp->port.phy_mutex);
12755
12756
12757 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12758 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12759 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12760 BNX2X_ERR("BAD MCP validity signature\n");
12761}
12762
12763/** 12926/**
12764 * bnx2x_io_error_detected - called when PCI error is detected 12927 * bnx2x_io_error_detected - called when PCI error is detected
12765 * @pdev: Pointer to PCI device 12928 * @pdev: Pointer to PCI device
@@ -12828,6 +12991,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12828 12991
12829 if (netif_running(dev)) { 12992 if (netif_running(dev)) {
12830 BNX2X_ERR("IO slot reset --> driver unload\n"); 12993 BNX2X_ERR("IO slot reset --> driver unload\n");
12994
12995 /* MCP should have been reset; Need to wait for validity */
12996 bnx2x_init_shmem(bp);
12997
12831 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 12998 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
12832 u32 v; 12999 u32 v;
12833 13000
@@ -12849,7 +13016,7 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12849 13016
12850 bnx2x_prev_unload(bp); 13017 bnx2x_prev_unload(bp);
12851 13018
12852 /* We should have resetted the engine, so It's fair to 13019 /* We should have reseted the engine, so It's fair to
12853 * assume the FW will no longer write to the bnx2x driver. 13020 * assume the FW will no longer write to the bnx2x driver.
12854 */ 13021 */
12855 bnx2x_squeeze_objects(bp); 13022 bnx2x_squeeze_objects(bp);
@@ -12886,8 +13053,6 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12886 13053
12887 rtnl_lock(); 13054 rtnl_lock();
12888 13055
12889 bnx2x_eeh_recover(bp);
12890
12891 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 13056 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12892 DRV_MSG_SEQ_NUMBER_MASK; 13057 DRV_MSG_SEQ_NUMBER_MASK;
12893 13058
@@ -12905,6 +13070,29 @@ static const struct pci_error_handlers bnx2x_err_handler = {
12905 .resume = bnx2x_io_resume, 13070 .resume = bnx2x_io_resume,
12906}; 13071};
12907 13072
13073static void bnx2x_shutdown(struct pci_dev *pdev)
13074{
13075 struct net_device *dev = pci_get_drvdata(pdev);
13076 struct bnx2x *bp;
13077
13078 if (!dev)
13079 return;
13080
13081 bp = netdev_priv(dev);
13082 if (!bp)
13083 return;
13084
13085 rtnl_lock();
13086 netif_device_detach(dev);
13087 rtnl_unlock();
13088
13089 /* Don't remove the netdevice, as there are scenarios which will cause
13090 * the kernel to hang, e.g., when trying to remove bnx2i while the
13091 * rootfs is mounted from SAN.
13092 */
13093 __bnx2x_remove(pdev, dev, bp, false);
13094}
13095
12908static struct pci_driver bnx2x_pci_driver = { 13096static struct pci_driver bnx2x_pci_driver = {
12909 .name = DRV_MODULE_NAME, 13097 .name = DRV_MODULE_NAME,
12910 .id_table = bnx2x_pci_tbl, 13098 .id_table = bnx2x_pci_tbl,
@@ -12916,6 +13104,7 @@ static struct pci_driver bnx2x_pci_driver = {
12916#ifdef CONFIG_BNX2X_SRIOV 13104#ifdef CONFIG_BNX2X_SRIOV
12917 .sriov_configure = bnx2x_sriov_configure, 13105 .sriov_configure = bnx2x_sriov_configure,
12918#endif 13106#endif
13107 .shutdown = bnx2x_shutdown,
12919}; 13108};
12920 13109
12921static int __init bnx2x_init(void) 13110static int __init bnx2x_init(void)
@@ -12941,11 +13130,12 @@ static int __init bnx2x_init(void)
12941static void __exit bnx2x_cleanup(void) 13130static void __exit bnx2x_cleanup(void)
12942{ 13131{
12943 struct list_head *pos, *q; 13132 struct list_head *pos, *q;
13133
12944 pci_unregister_driver(&bnx2x_pci_driver); 13134 pci_unregister_driver(&bnx2x_pci_driver);
12945 13135
12946 destroy_workqueue(bnx2x_wq); 13136 destroy_workqueue(bnx2x_wq);
12947 13137
12948 /* Free globablly allocated resources */ 13138 /* Free globally allocated resources */
12949 list_for_each_safe(pos, q, &bnx2x_prev_list) { 13139 list_for_each_safe(pos, q, &bnx2x_prev_list) {
12950 struct bnx2x_prev_path_list *tmp = 13140 struct bnx2x_prev_path_list *tmp =
12951 list_entry(pos, struct bnx2x_prev_path_list, list); 13141 list_entry(pos, struct bnx2x_prev_path_list, list);
@@ -12968,7 +13158,7 @@ module_exit(bnx2x_cleanup);
12968 * @bp: driver handle 13158 * @bp: driver handle
12969 * @set: set or clear the CAM entry 13159 * @set: set or clear the CAM entry
12970 * 13160 *
12971 * This function will wait until the ramdord completion returns. 13161 * This function will wait until the ramrod completion returns.
12972 * Return 0 if success, -ENODEV if ramrod doesn't return. 13162 * Return 0 if success, -ENODEV if ramrod doesn't return.
12973 */ 13163 */
12974static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 13164static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
@@ -12996,7 +13186,6 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12996 BUG_ON(bp->cnic_spq_pending < count); 13186 BUG_ON(bp->cnic_spq_pending < count);
12997 bp->cnic_spq_pending -= count; 13187 bp->cnic_spq_pending -= count;
12998 13188
12999
13000 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 13189 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
13001 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 13190 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
13002 & SPE_HDR_CONN_TYPE) >> 13191 & SPE_HDR_CONN_TYPE) >>
@@ -13169,7 +13358,6 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
13169 bnx2x_cnic_sp_post(bp, 0); 13358 bnx2x_cnic_sp_post(bp, 0);
13170} 13359}
13171 13360
13172
13173/* Called with netif_addr_lock_bh() taken. 13361/* Called with netif_addr_lock_bh() taken.
13174 * Sets an rx_mode config for an iSCSI ETH client. 13362 * Sets an rx_mode config for an iSCSI ETH client.
13175 * Doesn't block. 13363 * Doesn't block.
@@ -13210,7 +13398,6 @@ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
13210 } 13398 }
13211} 13399}
13212 13400
13213
13214static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 13401static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13215{ 13402{
13216 struct bnx2x *bp = netdev_priv(dev); 13403 struct bnx2x *bp = netdev_priv(dev);
@@ -13398,7 +13585,6 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
13398{ 13585{
13399 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13586 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13400 13587
13401
13402 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 13588 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13403 bnx2x_cid_ilt_lines(bp); 13589 bnx2x_cid_ilt_lines(bp);
13404 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 13590 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
@@ -13434,7 +13620,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13434 BNX2X_ERR("CNIC-related load failed\n"); 13620 BNX2X_ERR("CNIC-related load failed\n");
13435 return rc; 13621 return rc;
13436 } 13622 }
13437
13438 } 13623 }
13439 13624
13440 bp->cnic_enabled = true; 13625 bp->cnic_enabled = true;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index d22bc40091ec..8e627b886d7b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -35,6 +35,8 @@
35#define ATC_REG_ATC_INT_STS_CLR 0x1101c0 35#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
36/* [RW 5] Parity mask register #0 read/write */ 36/* [RW 5] Parity mask register #0 read/write */
37#define ATC_REG_ATC_PRTY_MASK 0x1101d8 37#define ATC_REG_ATC_PRTY_MASK 0x1101d8
38/* [R 5] Parity register #0 read */
39#define ATC_REG_ATC_PRTY_STS 0x1101cc
38/* [RC 5] Parity register #0 read clear */ 40/* [RC 5] Parity register #0 read clear */
39#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 41#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0
40/* [RW 19] Interrupt mask register #0 read/write */ 42/* [RW 19] Interrupt mask register #0 read/write */
@@ -2750,6 +2752,8 @@
2750#define PBF_REG_PBF_INT_STS 0x1401c8 2752#define PBF_REG_PBF_INT_STS 0x1401c8
2751/* [RW 20] Parity mask register #0 read/write */ 2753/* [RW 20] Parity mask register #0 read/write */
2752#define PBF_REG_PBF_PRTY_MASK 0x1401e4 2754#define PBF_REG_PBF_PRTY_MASK 0x1401e4
2755/* [R 28] Parity register #0 read */
2756#define PBF_REG_PBF_PRTY_STS 0x1401d8
2753/* [RC 20] Parity register #0 read clear */ 2757/* [RC 20] Parity register #0 read clear */
2754#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc 2758#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
2755/* [RW 16] The Ethernet type value for L2 tag 0 */ 2759/* [RW 16] The Ethernet type value for L2 tag 0 */
@@ -4517,6 +4521,8 @@
4517#define TM_REG_TM_INT_STS 0x1640f0 4521#define TM_REG_TM_INT_STS 0x1640f0
4518/* [RW 7] Parity mask register #0 read/write */ 4522/* [RW 7] Parity mask register #0 read/write */
4519#define TM_REG_TM_PRTY_MASK 0x16410c 4523#define TM_REG_TM_PRTY_MASK 0x16410c
4524/* [R 7] Parity register #0 read */
4525#define TM_REG_TM_PRTY_STS 0x164100
4520/* [RC 7] Parity register #0 read clear */ 4526/* [RC 7] Parity register #0 read clear */
4521#define TM_REG_TM_PRTY_STS_CLR 0x164104 4527#define TM_REG_TM_PRTY_STS_CLR 0x164104
4522/* [RW 8] The event id for aggregated interrupt 0 */ 4528/* [RW 8] The event id for aggregated interrupt 0 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32a9609cc98b..8f03c984550f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -35,9 +35,9 @@
35/** 35/**
36 * bnx2x_exe_queue_init - init the Exe Queue object 36 * bnx2x_exe_queue_init - init the Exe Queue object
37 * 37 *
38 * @o: poiter to the object 38 * @o: pointer to the object
39 * @exe_len: length 39 * @exe_len: length
40 * @owner: poiter to the owner 40 * @owner: pointer to the owner
41 * @validate: validate function pointer 41 * @validate: validate function pointer
42 * @optimize: optimize function pointer 42 * @optimize: optimize function pointer
43 * @exec: execute function pointer 43 * @exec: execute function pointer
@@ -142,7 +142,6 @@ free_and_exit:
142 spin_unlock_bh(&o->lock); 142 spin_unlock_bh(&o->lock);
143 143
144 return rc; 144 return rc;
145
146} 145}
147 146
148static inline void __bnx2x_exe_queue_reset_pending( 147static inline void __bnx2x_exe_queue_reset_pending(
@@ -163,13 +162,11 @@ static inline void __bnx2x_exe_queue_reset_pending(
163static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, 162static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
164 struct bnx2x_exe_queue_obj *o) 163 struct bnx2x_exe_queue_obj *o)
165{ 164{
166
167 spin_lock_bh(&o->lock); 165 spin_lock_bh(&o->lock);
168 166
169 __bnx2x_exe_queue_reset_pending(bp, o); 167 __bnx2x_exe_queue_reset_pending(bp, o);
170 168
171 spin_unlock_bh(&o->lock); 169 spin_unlock_bh(&o->lock);
172
173} 170}
174 171
175/** 172/**
@@ -179,7 +176,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
179 * @o: queue 176 * @o: queue
180 * @ramrod_flags: flags 177 * @ramrod_flags: flags
181 * 178 *
182 * (Atomicy is ensured using the exe_queue->lock). 179 * (Atomicity is ensured using the exe_queue->lock).
183 */ 180 */
184static inline int bnx2x_exe_queue_step(struct bnx2x *bp, 181static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
185 struct bnx2x_exe_queue_obj *o, 182 struct bnx2x_exe_queue_obj *o,
@@ -192,8 +189,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
192 189
193 spin_lock_bh(&o->lock); 190 spin_lock_bh(&o->lock);
194 191
195 /* 192 /* Next step should not be performed until the current is finished,
196 * Next step should not be performed until the current is finished,
197 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 193 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
198 * properly clear object internals without sending any command to the FW 194 * properly clear object internals without sending any command to the FW
199 * which also implies there won't be any completion to clear the 195 * which also implies there won't be any completion to clear the
@@ -209,8 +205,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
209 } 205 }
210 } 206 }
211 207
212 /* 208 /* Run through the pending commands list and create a next
213 * Run through the pending commands list and create a next
214 * execution chunk. 209 * execution chunk.
215 */ 210 */
216 while (!list_empty(&o->exe_queue)) { 211 while (!list_empty(&o->exe_queue)) {
@@ -220,8 +215,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
220 215
221 if (cur_len + elem->cmd_len <= o->exe_chunk_len) { 216 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
222 cur_len += elem->cmd_len; 217 cur_len += elem->cmd_len;
223 /* 218 /* Prevent from both lists being empty when moving an
224 * Prevent from both lists being empty when moving an
225 * element. This will allow the call of 219 * element. This will allow the call of
226 * bnx2x_exe_queue_empty() without locking. 220 * bnx2x_exe_queue_empty() without locking.
227 */ 221 */
@@ -241,14 +235,12 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
241 235
242 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); 236 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
243 if (rc < 0) 237 if (rc < 0)
244 /* 238 /* In case of an error return the commands back to the queue
245 * In case of an error return the commands back to the queue 239 * and reset the pending_comp.
246 * and reset the pending_comp.
247 */ 240 */
248 list_splice_init(&o->pending_comp, &o->exe_queue); 241 list_splice_init(&o->pending_comp, &o->exe_queue);
249 else if (!rc) 242 else if (!rc)
250 /* 243 /* If zero is returned, means there are no outstanding pending
251 * If zero is returned, means there are no outstanding pending
252 * completions and we may dismiss the pending list. 244 * completions and we may dismiss the pending list.
253 */ 245 */
254 __bnx2x_exe_queue_reset_pending(bp, o); 246 __bnx2x_exe_queue_reset_pending(bp, o);
@@ -308,7 +300,6 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
308 /* can take a while if any port is running */ 300 /* can take a while if any port is running */
309 int cnt = 5000; 301 int cnt = 5000;
310 302
311
312 if (CHIP_REV_IS_EMUL(bp)) 303 if (CHIP_REV_IS_EMUL(bp))
313 cnt *= 20; 304 cnt *= 20;
314 305
@@ -456,7 +447,6 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
456 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n", 447 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
457 counter, next); 448 counter, next);
458 next += stride + size; 449 next += stride + size;
459
460 } 450 }
461 } 451 }
462 return counter * ETH_ALEN; 452 return counter * ETH_ALEN;
@@ -518,7 +508,6 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
518 return 0; 508 return 0;
519} 509}
520 510
521
522/* check_del() callbacks */ 511/* check_del() callbacks */
523static struct bnx2x_vlan_mac_registry_elem * 512static struct bnx2x_vlan_mac_registry_elem *
524 bnx2x_check_mac_del(struct bnx2x *bp, 513 bnx2x_check_mac_del(struct bnx2x *bp,
@@ -609,7 +598,6 @@ static bool bnx2x_check_move_always_err(
609 return false; 598 return false;
610} 599}
611 600
612
613static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) 601static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
614{ 602{
615 struct bnx2x_raw_obj *raw = &o->raw; 603 struct bnx2x_raw_obj *raw = &o->raw;
@@ -626,7 +614,6 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
626 return rx_tx_flag; 614 return rx_tx_flag;
627} 615}
628 616
629
630void bnx2x_set_mac_in_nig(struct bnx2x *bp, 617void bnx2x_set_mac_in_nig(struct bnx2x *bp,
631 bool add, unsigned char *dev_addr, int index) 618 bool add, unsigned char *dev_addr, int index)
632{ 619{
@@ -693,7 +680,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
693 * 680 *
694 * @cid: connection id 681 * @cid: connection id
695 * @type: BNX2X_FILTER_XXX_PENDING 682 * @type: BNX2X_FILTER_XXX_PENDING
696 * @hdr: poiter to header to setup 683 * @hdr: pointer to header to setup
697 * @rule_cnt: 684 * @rule_cnt:
698 * 685 *
699 * currently we always configure one rule and echo field to contain a CID and an 686 * currently we always configure one rule and echo field to contain a CID and an
@@ -707,7 +694,6 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
707 hdr->rule_cnt = (u8)rule_cnt; 694 hdr->rule_cnt = (u8)rule_cnt;
708} 695}
709 696
710
711/* hw_config() callbacks */ 697/* hw_config() callbacks */
712static void bnx2x_set_one_mac_e2(struct bnx2x *bp, 698static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
713 struct bnx2x_vlan_mac_obj *o, 699 struct bnx2x_vlan_mac_obj *o,
@@ -723,8 +709,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
723 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; 709 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
724 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; 710 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
725 711
726 /* 712 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
727 * Set LLH CAM entry: currently only iSCSI and ETH macs are
728 * relevant. In addition, current implementation is tuned for a 713 * relevant. In addition, current implementation is tuned for a
729 * single ETH MAC. 714 * single ETH MAC.
730 * 715 *
@@ -879,8 +864,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
879 struct bnx2x_raw_obj *raw = &o->raw; 864 struct bnx2x_raw_obj *raw = &o->raw;
880 struct mac_configuration_cmd *config = 865 struct mac_configuration_cmd *config =
881 (struct mac_configuration_cmd *)(raw->rdata); 866 (struct mac_configuration_cmd *)(raw->rdata);
882 /* 867 /* 57710 and 57711 do not support MOVE command,
883 * 57710 and 57711 do not support MOVE command,
884 * so it's either ADD or DEL 868 * so it's either ADD or DEL
885 */ 869 */
886 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 870 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
@@ -960,7 +944,6 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
960 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; 944 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
961 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; 945 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
962 946
963
964 /* Reset the ramrod data buffer for the first rule */ 947 /* Reset the ramrod data buffer for the first rule */
965 if (rule_idx == 0) 948 if (rule_idx == 0)
966 memset(data, 0, sizeof(*data)); 949 memset(data, 0, sizeof(*data));
@@ -969,7 +952,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
969 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, 952 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
970 &rule_entry->pair.header); 953 &rule_entry->pair.header);
971 954
972 /* Set VLAN and MAC themselvs */ 955 /* Set VLAN and MAC themselves */
973 rule_entry->pair.vlan = cpu_to_le16(vlan); 956 rule_entry->pair.vlan = cpu_to_le16(vlan);
974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 957 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975 &rule_entry->pair.mac_mid, 958 &rule_entry->pair.mac_mid,
@@ -1021,8 +1004,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1021 struct bnx2x_raw_obj *raw = &o->raw; 1004 struct bnx2x_raw_obj *raw = &o->raw;
1022 struct mac_configuration_cmd *config = 1005 struct mac_configuration_cmd *config =
1023 (struct mac_configuration_cmd *)(raw->rdata); 1006 (struct mac_configuration_cmd *)(raw->rdata);
1024 /* 1007 /* 57710 and 57711 do not support MOVE command,
1025 * 57710 and 57711 do not support MOVE command,
1026 * so it's either ADD or DEL 1008 * so it's either ADD or DEL
1027 */ 1009 */
1028 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1010 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
@@ -1046,7 +1028,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1046 * 1028 *
1047 * @bp: device handle 1029 * @bp: device handle
1048 * @p: command parameters 1030 * @p: command parameters
1049 * @ppos: pointer to the cooky 1031 * @ppos: pointer to the cookie
1050 * 1032 *
1051 * reconfigure next MAC/VLAN/VLAN-MAC element from the 1033 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1052 * previously configured elements list. 1034 * previously configured elements list.
@@ -1054,7 +1036,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1054 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken 1036 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1055 * into an account 1037 * into an account
1056 * 1038 *
1057 * pointer to the cooky - that should be given back in the next call to make 1039 * pointer to the cookie - that should be given back in the next call to make
1058 * function handle the next element. If *ppos is set to NULL it will restart the 1040 * function handle the next element. If *ppos is set to NULL it will restart the
1059 * iterator. If returned *ppos == NULL this means that the last element has been 1041 * iterator. If returned *ppos == NULL this means that the last element has been
1060 * handled. 1042 * handled.
@@ -1102,8 +1084,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1102 return bnx2x_config_vlan_mac(bp, p); 1084 return bnx2x_config_vlan_mac(bp, p);
1103} 1085}
1104 1086
1105/* 1087/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1106 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1107 * pointer to an element with a specific criteria and NULL if such an element 1088 * pointer to an element with a specific criteria and NULL if such an element
1108 * hasn't been found. 1089 * hasn't been found.
1109 */ 1090 */
@@ -1187,8 +1168,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1187 return rc; 1168 return rc;
1188 } 1169 }
1189 1170
1190 /* 1171 /* Check if there is a pending ADD command for this
1191 * Check if there is a pending ADD command for this
1192 * MAC/VLAN/VLAN-MAC. Return an error if there is. 1172 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1193 */ 1173 */
1194 if (exeq->get(exeq, elem)) { 1174 if (exeq->get(exeq, elem)) {
@@ -1196,8 +1176,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1196 return -EEXIST; 1176 return -EEXIST;
1197 } 1177 }
1198 1178
1199 /* 1179 /* TODO: Check the pending MOVE from other objects where this
1200 * TODO: Check the pending MOVE from other objects where this
1201 * object is a destination object. 1180 * object is a destination object.
1202 */ 1181 */
1203 1182
@@ -1240,8 +1219,7 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1240 return -EEXIST; 1219 return -EEXIST;
1241 } 1220 }
1242 1221
1243 /* 1222 /* Check if there are pending DEL or MOVE commands for this
1244 * Check if there are pending DEL or MOVE commands for this
1245 * MAC/VLAN/VLAN-MAC. Return an error if so. 1223 * MAC/VLAN/VLAN-MAC. Return an error if so.
1246 */ 1224 */
1247 memcpy(&query_elem, elem, sizeof(query_elem)); 1225 memcpy(&query_elem, elem, sizeof(query_elem));
@@ -1292,8 +1270,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1292 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; 1270 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1293 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; 1271 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1294 1272
1295 /* 1273 /* Check if we can perform this operation based on the current registry
1296 * Check if we can perform this operation based on the current registry
1297 * state. 1274 * state.
1298 */ 1275 */
1299 if (!src_o->check_move(bp, src_o, dest_o, 1276 if (!src_o->check_move(bp, src_o, dest_o,
@@ -1302,8 +1279,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1302 return -EINVAL; 1279 return -EINVAL;
1303 } 1280 }
1304 1281
1305 /* 1282 /* Check if there is an already pending DEL or MOVE command for the
1306 * Check if there is an already pending DEL or MOVE command for the
1307 * source object or ADD command for a destination object. Return an 1283 * source object or ADD command for a destination object. Return an
1308 * error if so. 1284 * error if so.
1309 */ 1285 */
@@ -1392,7 +1368,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1392} 1368}
1393 1369
1394/** 1370/**
1395 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. 1371 * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1396 * 1372 *
1397 * @bp: device handle 1373 * @bp: device handle
1398 * @o: bnx2x_vlan_mac_obj 1374 * @o: bnx2x_vlan_mac_obj
@@ -1550,9 +1526,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
1550 1526
1551 /* Get a new CAM offset */ 1527 /* Get a new CAM offset */
1552 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) { 1528 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1553 /* 1529 /* This shall never happen, because we have checked the
1554 * This shell never happen, because we have checked the 1530 * CAM availability in the 'validate'.
1555 * CAM availiability in the 'validate'.
1556 */ 1531 */
1557 WARN_ON(1); 1532 WARN_ON(1);
1558 kfree(reg_elem); 1533 kfree(reg_elem);
@@ -1599,8 +1574,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1599 struct bnx2x_vlan_mac_registry_elem *reg_elem; 1574 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1600 enum bnx2x_vlan_mac_cmd cmd; 1575 enum bnx2x_vlan_mac_cmd cmd;
1601 1576
1602 /* 1577 /* If DRIVER_ONLY execution is requested, cleanup a registry
1603 * If DRIVER_ONLY execution is requested, cleanup a registry
1604 * and exit. Otherwise send a ramrod to FW. 1578 * and exit. Otherwise send a ramrod to FW.
1605 */ 1579 */
1606 if (!drv_only) { 1580 if (!drv_only) {
@@ -1609,11 +1583,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1609 /* Set pending */ 1583 /* Set pending */
1610 r->set_pending(r); 1584 r->set_pending(r);
1611 1585
1612 /* Fill tha ramrod data */ 1586 /* Fill the ramrod data */
1613 list_for_each_entry(elem, exe_chunk, link) { 1587 list_for_each_entry(elem, exe_chunk, link) {
1614 cmd = elem->cmd_data.vlan_mac.cmd; 1588 cmd = elem->cmd_data.vlan_mac.cmd;
1615 /* 1589 /* We will add to the target object in MOVE command, so
1616 * We will add to the target object in MOVE command, so
1617 * change the object for a CAM search. 1590 * change the object for a CAM search.
1618 */ 1591 */
1619 if (cmd == BNX2X_VLAN_MAC_MOVE) 1592 if (cmd == BNX2X_VLAN_MAC_MOVE)
@@ -1646,12 +1619,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1646 idx++; 1619 idx++;
1647 } 1620 }
1648 1621
1649 /* 1622 /* No need for an explicit memory barrier here as long we would
1650 * No need for an explicit memory barrier here as long we would 1623 * need to ensure the ordering of writing to the SPQ element
1651 * need to ensure the ordering of writing to the SPQ element 1624 * and updating of the SPQ producer which involves a memory
1652 * and updating of the SPQ producer which involves a memory 1625 * read and we will have to put a full memory barrier there
1653 * read and we will have to put a full memory barrier there 1626 * (inside bnx2x_sp_post()).
1654 * (inside bnx2x_sp_post()).
1655 */ 1627 */
1656 1628
1657 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, 1629 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
@@ -1766,8 +1738,7 @@ int bnx2x_config_vlan_mac(
1766 return rc; 1738 return rc;
1767 } 1739 }
1768 1740
1769 /* 1741 /* If nothing will be executed further in this iteration we want to
1770 * If nothing will be executed further in this iteration we want to
1771 * return PENDING if there are pending commands 1742 * return PENDING if there are pending commands
1772 */ 1743 */
1773 if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1744 if (!bnx2x_exe_queue_empty(&o->exe_queue))
@@ -1786,13 +1757,11 @@ int bnx2x_config_vlan_mac(
1786 return rc; 1757 return rc;
1787 } 1758 }
1788 1759
1789 /* 1760 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1790 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1791 * then user want to wait until the last command is done. 1761 * then user want to wait until the last command is done.
1792 */ 1762 */
1793 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 1763 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1794 /* 1764 /* Wait maximum for the current exe_queue length iterations plus
1795 * Wait maximum for the current exe_queue length iterations plus
1796 * one (for the current pending command). 1765 * one (for the current pending command).
1797 */ 1766 */
1798 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; 1767 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
@@ -1818,8 +1787,6 @@ int bnx2x_config_vlan_mac(
1818 return rc; 1787 return rc;
1819} 1788}
1820 1789
1821
1822
1823/** 1790/**
1824 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec 1791 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1825 * 1792 *
@@ -1829,7 +1796,7 @@ int bnx2x_config_vlan_mac(
1829 * @ramrod_flags: execution flags to be used for this deletion 1796 * @ramrod_flags: execution flags to be used for this deletion
1830 * 1797 *
1831 * if the last operation has completed successfully and there are no 1798 * if the last operation has completed successfully and there are no
1832 * moreelements left, positive value if the last operation has completed 1799 * more elements left, positive value if the last operation has completed
1833 * successfully and there are more previously configured elements, negative 1800 * successfully and there are more previously configured elements, negative
1834 * value is current operation has failed. 1801 * value is current operation has failed.
1835 */ 1802 */
@@ -1870,8 +1837,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1870 p.ramrod_flags = *ramrod_flags; 1837 p.ramrod_flags = *ramrod_flags;
1871 p.user_req.cmd = BNX2X_VLAN_MAC_DEL; 1838 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1872 1839
1873 /* 1840 /* Add all but the last VLAN-MAC to the execution queue without actually
1874 * Add all but the last VLAN-MAC to the execution queue without actually
1875 * execution anything. 1841 * execution anything.
1876 */ 1842 */
1877 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); 1843 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
@@ -1934,7 +1900,6 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1934 state, pstate, type); 1900 state, pstate, type);
1935} 1901}
1936 1902
1937
1938void bnx2x_init_mac_obj(struct bnx2x *bp, 1903void bnx2x_init_mac_obj(struct bnx2x *bp,
1939 struct bnx2x_vlan_mac_obj *mac_obj, 1904 struct bnx2x_vlan_mac_obj *mac_obj,
1940 u8 cl_id, u32 cid, u8 func_id, void *rdata, 1905 u8 cl_id, u32 cid, u8 func_id, void *rdata,
@@ -2048,8 +2013,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2048 /* CAM pool handling */ 2013 /* CAM pool handling */
2049 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; 2014 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2050 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; 2015 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2051 /* 2016 /* CAM offset is relevant for 57710 and 57711 chips only which have a
2052 * CAM offset is relevant for 57710 and 57711 chips only which have a
2053 * single CAM for both MACs and VLAN-MAC pairs. So the offset 2017 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2054 * will be taken from MACs' pool object only. 2018 * will be taken from MACs' pool object only.
2055 */ 2019 */
@@ -2092,7 +2056,6 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2092 bnx2x_execute_vlan_mac, 2056 bnx2x_execute_vlan_mac,
2093 bnx2x_exeq_get_vlan_mac); 2057 bnx2x_exeq_get_vlan_mac);
2094 } 2058 }
2095
2096} 2059}
2097 2060
2098/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 2061/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
@@ -2117,12 +2080,12 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2117 struct tstorm_eth_mac_filter_config *mac_filters = 2080 struct tstorm_eth_mac_filter_config *mac_filters =
2118 (struct tstorm_eth_mac_filter_config *)p->rdata; 2081 (struct tstorm_eth_mac_filter_config *)p->rdata;
2119 2082
2120 /* initial seeting is drop-all */ 2083 /* initial setting is drop-all */
2121 u8 drop_all_ucast = 1, drop_all_mcast = 1; 2084 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2122 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2085 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2123 u8 unmatched_unicast = 0; 2086 u8 unmatched_unicast = 0;
2124 2087
2125 /* In e1x there we only take into account rx acceot flag since tx switching 2088 /* In e1x there we only take into account rx accept flag since tx switching
2126 * isn't enabled. */ 2089 * isn't enabled. */
2127 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) 2090 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2128 /* accept matched ucast */ 2091 /* accept matched ucast */
@@ -2245,7 +2208,6 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2245 } 2208 }
2246 2209
2247 cmd->state = cpu_to_le16(state); 2210 cmd->state = cpu_to_le16(state);
2248
2249} 2211}
2250 2212
2251static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, 2213static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
@@ -2286,9 +2248,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2286 false); 2248 false);
2287 } 2249 }
2288 2250
2289 2251 /* If FCoE Queue configuration has been requested configure the Rx and
2290 /*
2291 * If FCoE Queue configuration has been requested configure the Rx and
2292 * internal switching modes for this queue in separate rules. 2252 * internal switching modes for this queue in separate rules.
2293 * 2253 *
2294 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: 2254 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
@@ -2324,8 +2284,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2324 } 2284 }
2325 } 2285 }
2326 2286
2327 /* 2287 /* Set the ramrod header (most importantly - number of rules to
2328 * Set the ramrod header (most importantly - number of rules to
2329 * configure). 2288 * configure).
2330 */ 2289 */
2331 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); 2290 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
@@ -2334,12 +2293,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2334 data->header.rule_cnt, p->rx_accept_flags, 2293 data->header.rule_cnt, p->rx_accept_flags,
2335 p->tx_accept_flags); 2294 p->tx_accept_flags);
2336 2295
2337 /* 2296 /* No need for an explicit memory barrier here as long we would
2338 * No need for an explicit memory barrier here as long we would 2297 * need to ensure the ordering of writing to the SPQ element
2339 * need to ensure the ordering of writing to the SPQ element 2298 * and updating of the SPQ producer which involves a memory
2340 * and updating of the SPQ producer which involves a memory 2299 * read and we will have to put a full memory barrier there
2341 * read and we will have to put a full memory barrier there 2300 * (inside bnx2x_sp_post()).
2342 * (inside bnx2x_sp_post()).
2343 */ 2301 */
2344 2302
2345 /* Send a ramrod */ 2303 /* Send a ramrod */
@@ -2476,7 +2434,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2476 cur_mac = (struct bnx2x_mcast_mac_elem *) 2434 cur_mac = (struct bnx2x_mcast_mac_elem *)
2477 ((u8 *)new_cmd + sizeof(*new_cmd)); 2435 ((u8 *)new_cmd + sizeof(*new_cmd));
2478 2436
2479 /* Push the MACs of the current command into the pendig command 2437 /* Push the MACs of the current command into the pending command
2480 * MACs list: FIFO 2438 * MACs list: FIFO
2481 */ 2439 */
2482 list_for_each_entry(pos, &p->mcast_list, link) { 2440 list_for_each_entry(pos, &p->mcast_list, link) {
@@ -2909,7 +2867,6 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2909 default: 2867 default:
2910 BNX2X_ERR("Unknown command: %d\n", cmd); 2868 BNX2X_ERR("Unknown command: %d\n", cmd);
2911 return -EINVAL; 2869 return -EINVAL;
2912
2913 } 2870 }
2914 2871
2915 /* Increase the total number of MACs pending to be configured */ 2872 /* Increase the total number of MACs pending to be configured */
@@ -3034,20 +2991,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
3034 if (!o->total_pending_num) 2991 if (!o->total_pending_num)
3035 bnx2x_mcast_refresh_registry_e2(bp, o); 2992 bnx2x_mcast_refresh_registry_e2(bp, o);
3036 2993
3037 /* 2994 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3038 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3039 * RAMROD_PENDING status immediately. 2995 * RAMROD_PENDING status immediately.
3040 */ 2996 */
3041 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 2997 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3042 raw->clear_pending(raw); 2998 raw->clear_pending(raw);
3043 return 0; 2999 return 0;
3044 } else { 3000 } else {
3045 /* 3001 /* No need for an explicit memory barrier here as long we would
3046 * No need for an explicit memory barrier here as long we would 3002 * need to ensure the ordering of writing to the SPQ element
3047 * need to ensure the ordering of writing to the SPQ element 3003 * and updating of the SPQ producer which involves a memory
3048 * and updating of the SPQ producer which involves a memory 3004 * read and we will have to put a full memory barrier there
3049 * read and we will have to put a full memory barrier there 3005 * (inside bnx2x_sp_post()).
3050 * (inside bnx2x_sp_post()).
3051 */ 3006 */
3052 3007
3053 /* Send a ramrod */ 3008 /* Send a ramrod */
@@ -3121,7 +3076,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3121 } 3076 }
3122} 3077}
3123 3078
3124/* On 57711 we write the multicast MACs' aproximate match 3079/* On 57711 we write the multicast MACs' approximate match
3125 * table by directly into the TSTORM's internal RAM. So we don't 3080 * table by directly into the TSTORM's internal RAM. So we don't
3126 * really need to handle any tricks to make it work. 3081 * really need to handle any tricks to make it work.
3127 */ 3082 */
@@ -3223,7 +3178,6 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3223 default: 3178 default:
3224 BNX2X_ERR("Unknown command: %d\n", cmd); 3179 BNX2X_ERR("Unknown command: %d\n", cmd);
3225 return -EINVAL; 3180 return -EINVAL;
3226
3227 } 3181 }
3228 3182
3229 /* We want to ensure that commands are executed one by one for 57710. 3183 /* We want to ensure that commands are executed one by one for 57710.
@@ -3245,7 +3199,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3245 3199
3246 /* If current command hasn't been handled yet and we are 3200 /* If current command hasn't been handled yet and we are
3247 * here means that it's meant to be dropped and we have to 3201 * here means that it's meant to be dropped and we have to
3248 * update the number of outstandling MACs accordingly. 3202 * update the number of outstanding MACs accordingly.
3249 */ 3203 */
3250 if (p->mcast_list_len) 3204 if (p->mcast_list_len)
3251 o->total_pending_num -= o->max_cmd_len; 3205 o->total_pending_num -= o->max_cmd_len;
@@ -3342,7 +3296,6 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
3342 return -1; 3296 return -1;
3343} 3297}
3344 3298
3345
3346static inline int bnx2x_mcast_handle_pending_cmds_e1( 3299static inline int bnx2x_mcast_handle_pending_cmds_e1(
3347 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) 3300 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3348{ 3301{
@@ -3352,7 +3305,6 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
3352 union bnx2x_mcast_config_data cfg_data = {NULL}; 3305 union bnx2x_mcast_config_data cfg_data = {NULL};
3353 int cnt = 0; 3306 int cnt = 0;
3354 3307
3355
3356 /* If nothing to be done - return */ 3308 /* If nothing to be done - return */
3357 if (list_empty(&o->pending_cmds_head)) 3309 if (list_empty(&o->pending_cmds_head))
3358 return 0; 3310 return 0;
@@ -3523,20 +3475,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3523 if (rc) 3475 if (rc)
3524 return rc; 3476 return rc;
3525 3477
3526 /* 3478 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3527 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3528 * RAMROD_PENDING status immediately. 3479 * RAMROD_PENDING status immediately.
3529 */ 3480 */
3530 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3481 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3531 raw->clear_pending(raw); 3482 raw->clear_pending(raw);
3532 return 0; 3483 return 0;
3533 } else { 3484 } else {
3534 /* 3485 /* No need for an explicit memory barrier here as long we would
3535 * No need for an explicit memory barrier here as long we would 3486 * need to ensure the ordering of writing to the SPQ element
3536 * need to ensure the ordering of writing to the SPQ element 3487 * and updating of the SPQ producer which involves a memory
3537 * and updating of the SPQ producer which involves a memory 3488 * read and we will have to put a full memory barrier there
3538 * read and we will have to put a full memory barrier there 3489 * (inside bnx2x_sp_post()).
3539 * (inside bnx2x_sp_post()).
3540 */ 3490 */
3541 3491
3542 /* Send a ramrod */ 3492 /* Send a ramrod */
@@ -3550,7 +3500,6 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3550 /* Ramrod completion is pending */ 3500 /* Ramrod completion is pending */
3551 return 1; 3501 return 1;
3552 } 3502 }
3553
3554} 3503}
3555 3504
3556static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) 3505static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
@@ -3848,7 +3797,6 @@ static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3848 return true; 3797 return true;
3849} 3798}
3850 3799
3851
3852static bool bnx2x_credit_pool_get_entry( 3800static bool bnx2x_credit_pool_get_entry(
3853 struct bnx2x_credit_pool_obj *o, 3801 struct bnx2x_credit_pool_obj *o,
3854 int *offset) 3802 int *offset)
@@ -3999,8 +3947,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3999 3947
4000 } else { 3948 } else {
4001 3949
4002 /* 3950 /* CAM credit is equaly divided between all active functions
4003 * CAM credit is equaly divided between all active functions
4004 * on the PATH. 3951 * on the PATH.
4005 */ 3952 */
4006 if ((func_num > 0)) { 3953 if ((func_num > 0)) {
@@ -4009,8 +3956,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
4009 else 3956 else
4010 cam_sz = BNX2X_CAM_SIZE_EMUL; 3957 cam_sz = BNX2X_CAM_SIZE_EMUL;
4011 3958
4012 /* 3959 /* No need for CAM entries handling for 57712 and
4013 * No need for CAM entries handling for 57712 and
4014 * newer. 3960 * newer.
4015 */ 3961 */
4016 bnx2x_init_credit_pool(p, -1, cam_sz); 3962 bnx2x_init_credit_pool(p, -1, cam_sz);
@@ -4018,7 +3964,6 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
4018 /* this should never happen! Block MAC operations. */ 3964 /* this should never happen! Block MAC operations. */
4019 bnx2x_init_credit_pool(p, 0, 0); 3965 bnx2x_init_credit_pool(p, 0, 0);
4020 } 3966 }
4021
4022 } 3967 }
4023} 3968}
4024 3969
@@ -4028,14 +3973,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4028 u8 func_num) 3973 u8 func_num)
4029{ 3974{
4030 if (CHIP_IS_E1x(bp)) { 3975 if (CHIP_IS_E1x(bp)) {
4031 /* 3976 /* There is no VLAN credit in HW on 57710 and 57711 only
4032 * There is no VLAN credit in HW on 57710 and 57711 only
4033 * MAC / MAC-VLAN can be set 3977 * MAC / MAC-VLAN can be set
4034 */ 3978 */
4035 bnx2x_init_credit_pool(p, 0, -1); 3979 bnx2x_init_credit_pool(p, 0, -1);
4036 } else { 3980 } else {
4037 /* 3981 /* CAM credit is equally divided between all active functions
4038 * CAM credit is equaly divided between all active functions
4039 * on the PATH. 3982 * on the PATH.
4040 */ 3983 */
4041 if (func_num > 0) { 3984 if (func_num > 0) {
@@ -4051,7 +3994,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4051/** 3994/**
4052 * bnx2x_debug_print_ind_table - prints the indirection table configuration. 3995 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4053 * 3996 *
4054 * @bp: driver hanlde 3997 * @bp: driver handle
4055 * @p: pointer to rss configuration 3998 * @p: pointer to rss configuration
4056 * 3999 *
4057 * Prints it when NETIF_MSG_IFUP debug level is configured. 4000 * Prints it when NETIF_MSG_IFUP debug level is configured.
@@ -4164,12 +4107,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4164 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4107 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4165 } 4108 }
4166 4109
4167 /* 4110 /* No need for an explicit memory barrier here as long we would
4168 * No need for an explicit memory barrier here as long we would 4111 * need to ensure the ordering of writing to the SPQ element
4169 * need to ensure the ordering of writing to the SPQ element 4112 * and updating of the SPQ producer which involves a memory
4170 * and updating of the SPQ producer which involves a memory 4113 * read and we will have to put a full memory barrier there
4171 * read and we will have to put a full memory barrier there 4114 * (inside bnx2x_sp_post()).
4172 * (inside bnx2x_sp_post()).
4173 */ 4115 */
4174 4116
4175 /* Send a ramrod */ 4117 /* Send a ramrod */
@@ -4215,7 +4157,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
4215 return rc; 4157 return rc;
4216} 4158}
4217 4159
4218
4219void bnx2x_init_rss_config_obj(struct bnx2x *bp, 4160void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4220 struct bnx2x_rss_config_obj *rss_obj, 4161 struct bnx2x_rss_config_obj *rss_obj,
4221 u8 cl_id, u32 cid, u8 func_id, u8 engine_id, 4162 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
@@ -4288,7 +4229,6 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
4288 return !!test_bit(pending_bit, pending); 4229 return !!test_bit(pending_bit, pending);
4289} 4230}
4290 4231
4291
4292static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, 4232static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4293 struct bnx2x_queue_state_params *params) 4233 struct bnx2x_queue_state_params *params)
4294{ 4234{
@@ -4337,7 +4277,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4337 } 4277 }
4338 4278
4339 if (o->next_tx_only >= o->max_cos) 4279 if (o->next_tx_only >= o->max_cos)
4340 /* >= becuase tx only must always be smaller than cos since the 4280 /* >= because tx only must always be smaller than cos since the
4341 * primary connection supports COS 0 4281 * primary connection supports COS 0
4342 */ 4282 */
4343 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", 4283 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
@@ -4403,7 +4343,6 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4403 gen_data->mtu = cpu_to_le16(params->mtu); 4343 gen_data->mtu = cpu_to_le16(params->mtu);
4404 gen_data->func_id = o->func_id; 4344 gen_data->func_id = o->func_id;
4405 4345
4406
4407 gen_data->cos = params->cos; 4346 gen_data->cos = params->cos;
4408 4347
4409 gen_data->traffic_type = 4348 gen_data->traffic_type =
@@ -4530,7 +4469,6 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4530 cpu_to_le16(params->silent_removal_value); 4469 cpu_to_le16(params->silent_removal_value);
4531 rx_data->silent_vlan_mask = 4470 rx_data->silent_vlan_mask =
4532 cpu_to_le16(params->silent_removal_mask); 4471 cpu_to_le16(params->silent_removal_mask);
4533
4534} 4472}
4535 4473
4536/* initialize the general, tx and rx parts of a queue object */ 4474/* initialize the general, tx and rx parts of a queue object */
@@ -4652,12 +4590,11 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4652 /* Fill the ramrod data */ 4590 /* Fill the ramrod data */
4653 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4591 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4654 4592
4655 /* 4593 /* No need for an explicit memory barrier here as long we would
4656 * No need for an explicit memory barrier here as long we would 4594 * need to ensure the ordering of writing to the SPQ element
4657 * need to ensure the ordering of writing to the SPQ element 4595 * and updating of the SPQ producer which involves a memory
4658 * and updating of the SPQ producer which involves a memory 4596 * read and we will have to put a full memory barrier there
4659 * read and we will have to put a full memory barrier there 4597 * (inside bnx2x_sp_post()).
4660 * (inside bnx2x_sp_post()).
4661 */ 4598 */
4662 4599
4663 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4600 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
@@ -4681,12 +4618,11 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4681 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4618 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4682 bnx2x_q_fill_setup_data_e2(bp, params, rdata); 4619 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4683 4620
4684 /* 4621 /* No need for an explicit memory barrier here as long we would
4685 * No need for an explicit memory barrier here as long we would 4622 * need to ensure the ordering of writing to the SPQ element
4686 * need to ensure the ordering of writing to the SPQ element 4623 * and updating of the SPQ producer which involves a memory
4687 * and updating of the SPQ producer which involves a memory 4624 * read and we will have to put a full memory barrier there
4688 * read and we will have to put a full memory barrier there 4625 * (inside bnx2x_sp_post()).
4689 * (inside bnx2x_sp_post()).
4690 */ 4626 */
4691 4627
4692 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4628 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
@@ -4706,7 +4642,6 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4706 &params->params.tx_only; 4642 &params->params.tx_only;
4707 u8 cid_index = tx_only_params->cid_index; 4643 u8 cid_index = tx_only_params->cid_index;
4708 4644
4709
4710 if (cid_index >= o->max_cos) { 4645 if (cid_index >= o->max_cos) {
4711 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4646 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4712 o->cl_id, cid_index); 4647 o->cl_id, cid_index);
@@ -4727,12 +4662,11 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4727 o->cids[cid_index], rdata->general.client_id, 4662 o->cids[cid_index], rdata->general.client_id,
4728 rdata->general.sp_client_id, rdata->general.cos); 4663 rdata->general.sp_client_id, rdata->general.cos);
4729 4664
4730 /* 4665 /* No need for an explicit memory barrier here as long we would
4731 * No need for an explicit memory barrier here as long we would 4666 * need to ensure the ordering of writing to the SPQ element
4732 * need to ensure the ordering of writing to the SPQ element 4667 * and updating of the SPQ producer which involves a memory
4733 * and updating of the SPQ producer which involves a memory 4668 * read and we will have to put a full memory barrier there
4734 * read and we will have to put a full memory barrier there 4669 * (inside bnx2x_sp_post()).
4735 * (inside bnx2x_sp_post()).
4736 */ 4670 */
4737 4671
4738 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], 4672 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
@@ -4761,7 +4695,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4761 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, 4695 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4762 &params->update_flags); 4696 &params->update_flags);
4763 4697
4764 /* Outer VLAN sripping */ 4698 /* Outer VLAN stripping */
4765 data->outer_vlan_removal_enable_flg = 4699 data->outer_vlan_removal_enable_flg =
4766 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags); 4700 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4767 data->outer_vlan_removal_change_flg = 4701 data->outer_vlan_removal_change_flg =
@@ -4816,19 +4750,17 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
4816 return -EINVAL; 4750 return -EINVAL;
4817 } 4751 }
4818 4752
4819
4820 /* Clear the ramrod data */ 4753 /* Clear the ramrod data */
4821 memset(rdata, 0, sizeof(*rdata)); 4754 memset(rdata, 0, sizeof(*rdata));
4822 4755
4823 /* Fill the ramrod data */ 4756 /* Fill the ramrod data */
4824 bnx2x_q_fill_update_data(bp, o, update_params, rdata); 4757 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4825 4758
4826 /* 4759 /* No need for an explicit memory barrier here as long we would
4827 * No need for an explicit memory barrier here as long we would 4760 * need to ensure the ordering of writing to the SPQ element
4828 * need to ensure the ordering of writing to the SPQ element 4761 * and updating of the SPQ producer which involves a memory
4829 * and updating of the SPQ producer which involves a memory 4762 * read and we will have to put a full memory barrier there
4830 * read and we will have to put a full memory barrier there 4763 * (inside bnx2x_sp_post()).
4831 * (inside bnx2x_sp_post()).
4832 */ 4764 */
4833 4765
4834 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 4766 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
@@ -5038,8 +4970,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5038 &params->params.update; 4970 &params->params.update;
5039 u8 next_tx_only = o->num_tx_only; 4971 u8 next_tx_only = o->num_tx_only;
5040 4972
5041 /* 4973 /* Forget all pending for completion commands if a driver only state
5042 * Forget all pending for completion commands if a driver only state
5043 * transition has been requested. 4974 * transition has been requested.
5044 */ 4975 */
5045 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) { 4976 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
@@ -5047,8 +4978,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5047 o->next_state = BNX2X_Q_STATE_MAX; 4978 o->next_state = BNX2X_Q_STATE_MAX;
5048 } 4979 }
5049 4980
5050 /* 4981 /* Don't allow a next state transition if we are in the middle of
5051 * Don't allow a next state transition if we are in the middle of
5052 * the previous one. 4982 * the previous one.
5053 */ 4983 */
5054 if (o->pending) { 4984 if (o->pending) {
@@ -5257,8 +5187,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5257 if (o->pending) 5187 if (o->pending)
5258 return BNX2X_F_STATE_MAX; 5188 return BNX2X_F_STATE_MAX;
5259 5189
5260 /* 5190 /* unsure the order of reading of o->pending and o->state
5261 * unsure the order of reading of o->pending and o->state
5262 * o->pending should be read first 5191 * o->pending should be read first
5263 */ 5192 */
5264 rmb(); 5193 rmb();
@@ -5356,8 +5285,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5356 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; 5285 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5357 enum bnx2x_func_cmd cmd = params->cmd; 5286 enum bnx2x_func_cmd cmd = params->cmd;
5358 5287
5359 /* 5288 /* Forget all pending for completion commands if a driver only state
5360 * Forget all pending for completion commands if a driver only state
5361 * transition has been requested. 5289 * transition has been requested.
5362 */ 5290 */
5363 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) { 5291 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
@@ -5365,8 +5293,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5365 o->next_state = BNX2X_F_STATE_MAX; 5293 o->next_state = BNX2X_F_STATE_MAX;
5366 } 5294 }
5367 5295
5368 /* 5296 /* Don't allow a next state transition if we are in the middle of
5369 * Don't allow a next state transition if we are in the middle of
5370 * the previous one. 5297 * the previous one.
5371 */ 5298 */
5372 if (o->pending) 5299 if (o->pending)
@@ -5539,7 +5466,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5539 goto init_err; 5466 goto init_err;
5540 } 5467 }
5541 5468
5542 /* Handle the beginning of COMMON_XXX pases separatelly... */ 5469 /* Handle the beginning of COMMON_XXX pases separately... */
5543 switch (load_code) { 5470 switch (load_code) {
5544 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5471 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5545 rc = bnx2x_func_init_cmn_chip(bp, drv); 5472 rc = bnx2x_func_init_cmn_chip(bp, drv);
@@ -5573,7 +5500,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5573init_err: 5500init_err:
5574 drv->gunzip_end(bp); 5501 drv->gunzip_end(bp);
5575 5502
5576 /* In case of success, complete the comand immediatelly: no ramrods 5503 /* In case of success, complete the command immediately: no ramrods
5577 * have been sent. 5504 * have been sent.
5578 */ 5505 */
5579 if (!rc) 5506 if (!rc)
@@ -5598,7 +5525,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5598} 5525}
5599 5526
5600/** 5527/**
5601 * bnx2x_func_reset_port - reser HW at port stage 5528 * bnx2x_func_reset_port - reset HW at port stage
5602 * 5529 *
5603 * @bp: device handle 5530 * @bp: device handle
5604 * @drv: 5531 * @drv:
@@ -5620,7 +5547,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5620} 5547}
5621 5548
5622/** 5549/**
5623 * bnx2x_func_reset_cmn - reser HW at common stage 5550 * bnx2x_func_reset_cmn - reset HW at common stage
5624 * 5551 *
5625 * @bp: device handle 5552 * @bp: device handle
5626 * @drv: 5553 * @drv:
@@ -5636,7 +5563,6 @@ static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5636 drv->reset_hw_cmn(bp); 5563 drv->reset_hw_cmn(bp);
5637} 5564}
5638 5565
5639
5640static inline int bnx2x_func_hw_reset(struct bnx2x *bp, 5566static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5641 struct bnx2x_func_state_params *params) 5567 struct bnx2x_func_state_params *params)
5642{ 5568{
@@ -5663,7 +5589,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5663 break; 5589 break;
5664 } 5590 }
5665 5591
5666 /* Complete the comand immediatelly: no ramrods have been sent. */ 5592 /* Complete the command immediately: no ramrods have been sent. */
5667 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); 5593 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5668 5594
5669 return 0; 5595 return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 43c00bc84a08..798dfe996733 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -34,8 +34,7 @@ enum {
34 RAMROD_RESTORE, 34 RAMROD_RESTORE,
35 /* Execute the next command now */ 35 /* Execute the next command now */
36 RAMROD_EXEC, 36 RAMROD_EXEC,
37 /* 37 /* Don't add a new command and continue execution of postponed
38 * Don't add a new command and continue execution of posponed
39 * commands. If not set a new command will be added to the 38 * commands. If not set a new command will be added to the
40 * pending commands list. 39 * pending commands list.
41 */ 40 */
@@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd {
129struct bnx2x_vlan_mac_data { 128struct bnx2x_vlan_mac_data {
130 /* Requested command: BNX2X_VLAN_MAC_XX */ 129 /* Requested command: BNX2X_VLAN_MAC_XX */
131 enum bnx2x_vlan_mac_cmd cmd; 130 enum bnx2x_vlan_mac_cmd cmd;
132 /* 131 /* used to contain the data related vlan_mac_flags bits from
133 * used to contain the data related vlan_mac_flags bits from
134 * ramrod parameters. 132 * ramrod parameters.
135 */ 133 */
136 unsigned long vlan_mac_flags; 134 unsigned long vlan_mac_flags;
@@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem *
190 struct bnx2x_exeq_elem *elem); 188 struct bnx2x_exeq_elem *elem);
191 189
192struct bnx2x_exe_queue_obj { 190struct bnx2x_exe_queue_obj {
193 /* 191 /* Commands pending for an execution. */
194 * Commands pending for an execution.
195 */
196 struct list_head exe_queue; 192 struct list_head exe_queue;
197 193
198 /* 194 /* Commands pending for an completion. */
199 * Commands pending for an completion.
200 */
201 struct list_head pending_comp; 195 struct list_head pending_comp;
202 196
203 spinlock_t lock; 197 spinlock_t lock;
@@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj {
245}; 239};
246/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 240/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
247/* 241/*
248 * Element in the VLAN_MAC registry list having all currenty configured 242 * Element in the VLAN_MAC registry list having all currently configured
249 * rules. 243 * rules.
250 */ 244 */
251struct bnx2x_vlan_mac_registry_elem { 245struct bnx2x_vlan_mac_registry_elem {
252 struct list_head link; 246 struct list_head link;
253 247
254 /* 248 /* Used to store the cam offset used for the mac/vlan/vlan-mac.
255 * Used to store the cam offset used for the mac/vlan/vlan-mac.
256 * Relevant for 57710 and 57711 only. VLANs and MACs share the 249 * Relevant for 57710 and 57711 only. VLANs and MACs share the
257 * same CAM for these chips. 250 * same CAM for these chips.
258 */ 251 */
@@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj {
310 * @param n number of elements to get 303 * @param n number of elements to get
311 * @param buf buffer preallocated by caller into which elements 304 * @param buf buffer preallocated by caller into which elements
312 * will be copied. Note elements are 4-byte aligned 305 * will be copied. Note elements are 4-byte aligned
313 * so buffer size must be able to accomodate the 306 * so buffer size must be able to accommodate the
314 * aligned elements. 307 * aligned elements.
315 * 308 *
316 * @return number of copied bytes 309 * @return number of copied bytes
@@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj {
395 * @param bp 388 * @param bp
396 * @param p Command parameters (RAMROD_COMP_WAIT bit in 389 * @param p Command parameters (RAMROD_COMP_WAIT bit in
397 * ramrod_flags is only taken into an account) 390 * ramrod_flags is only taken into an account)
398 * @param ppos a pointer to the cooky that should be given back in the 391 * @param ppos a pointer to the cookie that should be given back in the
399 * next call to make function handle the next element. If 392 * next call to make function handle the next element. If
400 * *ppos is set to NULL it will restart the iterator. 393 * *ppos is set to NULL it will restart the iterator.
401 * If returned *ppos == NULL this means that the last 394 * If returned *ppos == NULL this means that the last
@@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj {
408 struct bnx2x_vlan_mac_registry_elem **ppos); 401 struct bnx2x_vlan_mac_registry_elem **ppos);
409 402
410 /** 403 /**
411 * Should be called on a completion arival. 404 * Should be called on a completion arrival.
412 * 405 *
413 * @param bp 406 * @param bp
414 * @param o 407 * @param o
@@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp,
447 440
448/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 441/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
449 442
450/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in 443/* RX_MODE ramrod special flags: set in rx_mode_flags field in
451 * a bnx2x_rx_mode_ramrod_params. 444 * a bnx2x_rx_mode_ramrod_params.
452 */ 445 */
453enum { 446enum {
@@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params {
475 unsigned long ramrod_flags; 468 unsigned long ramrod_flags;
476 unsigned long rx_mode_flags; 469 unsigned long rx_mode_flags;
477 470
478 /* 471 /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
479 * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
480 * a tstorm_eth_mac_filter_config (e1x). 472 * a tstorm_eth_mac_filter_config (e1x).
481 */ 473 */
482 void *rdata; 474 void *rdata;
@@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj {
646 /* Maximum allowed credit. put() will check against it. */ 638 /* Maximum allowed credit. put() will check against it. */
647 int pool_sz; 639 int pool_sz;
648 640
649 /* 641 /* Allocate a pool table statically.
650 * Allocate a pool table statically.
651 * 642 *
652 * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) 643 * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
653 * 644 *
654 * The set bit in the table will mean that the entry is available. 645 * The set bit in the table will mean that the entry is available.
655 */ 646 */
656#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) 647#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
657 u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; 648 u64 pool_mirror[BNX2X_POOL_VEC_SIZE];
@@ -832,7 +823,7 @@ enum {
832 BNX2X_Q_FLG_TUN_INC_INNER_IP_ID 823 BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
833}; 824};
834 825
835/* Queue type options: queue type may be a compination of below. */ 826/* Queue type options: queue type may be a combination of below. */
836enum bnx2x_q_type { 827enum bnx2x_q_type {
837 /** TODO: Consider moving both these flags into the init() 828 /** TODO: Consider moving both these flags into the init()
838 * ramrod params. 829 * ramrod params.
@@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj {
1002 u8 cl_id; 993 u8 cl_id;
1003 u8 func_id; 994 u8 func_id;
1004 995
1005 /* 996 /* number of traffic classes supported by queue.
1006 * number of traffic classes supported by queue. 997 * The primary connection of the queue supports the first traffic
1007 * The primary connection of the queue suppotrs the first traffic 998 * class. Any further traffic class is supported by a tx-only
1008 * class. Any further traffic class is suppoted by a tx-only
1009 * connection. 999 * connection.
1010 * 1000 *
1011 * Therefore max_cos is also a number of valid entries in the cids 1001 * Therefore max_cos is also a number of valid entries in the cids
@@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj {
1021 1011
1022 /* BNX2X_Q_CMD_XX bits. This object implements "one 1012 /* BNX2X_Q_CMD_XX bits. This object implements "one
1023 * pending" paradigm but for debug and tracing purposes it's 1013 * pending" paradigm but for debug and tracing purposes it's
1024 * more convinient to have different bits for different 1014 * more convenient to have different bits for different
1025 * commands. 1015 * commands.
1026 */ 1016 */
1027 unsigned long pending; 1017 unsigned long pending;
@@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj {
1210 1200
1211 /* BNX2X_FUNC_CMD_XX bits. This object implements "one 1201 /* BNX2X_FUNC_CMD_XX bits. This object implements "one
1212 * pending" paradigm but for debug and tracing purposes it's 1202 * pending" paradigm but for debug and tracing purposes it's
1213 * more convinient to have different bits for different 1203 * more convenient to have different bits for different
1214 * commands. 1204 * commands.
1215 */ 1205 */
1216 unsigned long pending; 1206 unsigned long pending;
@@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
1329 * 1319 *
1330 * @p: Command parameters 1320 * @p: Command parameters
1331 * 1321 *
1332 * Return: 0 - if operation was successfull and there is no pending completions, 1322 * Return: 0 - if operation was successful and there is no pending completions,
1333 * positive number - if there are pending completions, 1323 * positive number - if there are pending completions,
1334 * negative - if there were errors 1324 * negative - if there were errors
1335 */ 1325 */
@@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
1361 * the current command will be enqueued to the tail of the 1351 * the current command will be enqueued to the tail of the
1362 * pending commands list. 1352 * pending commands list.
1363 * 1353 *
1364 * Return: 0 is operation was successfull and there are no pending completions, 1354 * Return: 0 is operation was successful and there are no pending completions,
1365 * negative if there were errors, positive if there are pending 1355 * negative if there were errors, positive if there are pending
1366 * completions. 1356 * completions.
1367 */ 1357 */
@@ -1377,7 +1367,6 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
1377 struct bnx2x_credit_pool_obj *p, u8 func_id, 1367 struct bnx2x_credit_pool_obj *p, u8 func_id,
1378 u8 func_num); 1368 u8 func_num);
1379 1369
1380
1381/****************** RSS CONFIGURATION ****************/ 1370/****************** RSS CONFIGURATION ****************/
1382void bnx2x_init_rss_config_obj(struct bnx2x *bp, 1371void bnx2x_init_rss_config_obj(struct bnx2x *bp,
1383 struct bnx2x_rss_config_obj *rss_obj, 1372 struct bnx2x_rss_config_obj *rss_obj,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2ce7c7471367..95861efb5051 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1341 */ 1341 */
1342 1342
1343/* internal vf enable - until vf is enabled internally all transactions 1343/* internal vf enable - until vf is enabled internally all transactions
1344 * are blocked. this routine should always be called last with pretend. 1344 * are blocked. This routine should always be called last with pretend.
1345 */ 1345 */
1346static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1346static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1347{ 1347{
@@ -1459,21 +1459,16 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1459 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1459 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1460 1460
1461 if (!vf) 1461 if (!vf)
1462 goto unknown_dev; 1462 return false;
1463 1463
1464 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1464 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1465 if (dev) 1465 if (dev)
1466 return bnx2x_is_pcie_pending(dev); 1466 return bnx2x_is_pcie_pending(dev);
1467
1468unknown_dev:
1469 return false; 1467 return false;
1470} 1468}
1471 1469
1472int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1470int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1473{ 1471{
1474 /* Wait 100ms */
1475 msleep(100);
1476
1477 /* Verify no pending pci transactions */ 1472 /* Verify no pending pci transactions */
1478 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1473 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1479 BNX2X_ERR("PCIE Transactions still pending\n"); 1474 BNX2X_ERR("PCIE Transactions still pending\n");
@@ -1620,7 +1615,7 @@ next_vf_to_clean:
1620 i++) 1615 i++)
1621 ; 1616 ;
1622 1617
1623 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i, 1618 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
1624 BNX2X_NR_VIRTFN(bp)); 1619 BNX2X_NR_VIRTFN(bp));
1625 1620
1626 if (i < BNX2X_NR_VIRTFN(bp)) { 1621 if (i < BNX2X_NR_VIRTFN(bp)) {
@@ -1743,7 +1738,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1743 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1738 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1744 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1739 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1745 1740
1746 /* set the number of VF alllowed doorbells to the full DQ range */ 1741 /* set the number of VF allowed doorbells to the full DQ range */
1747 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 1742 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1748 1743
1749 /* set the VF doorbell threshold */ 1744 /* set the VF doorbell threshold */
@@ -2176,6 +2171,9 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2176 2171
2177 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2172 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2178 2173
2174 /* let FLR complete ... */
2175 msleep(100);
2176
2179 /* initialize vf database */ 2177 /* initialize vf database */
2180 for_each_vf(bp, vfid) { 2178 for_each_vf(bp, vfid) {
2181 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2179 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
@@ -2403,7 +2401,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2403 2401
2404 /* extract vf and rxq index from vf_cid - relies on the following: 2402 /* extract vf and rxq index from vf_cid - relies on the following:
2405 * 1. vfid on cid reflects the true abs_vfid 2403 * 1. vfid on cid reflects the true abs_vfid
2406 * 2. the max number of VFs (per path) is 64 2404 * 2. The max number of VFs (per path) is 64
2407 */ 2405 */
2408 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2406 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2409 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2407 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
@@ -2461,7 +2459,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2461{ 2459{
2462 /* extract the vf from vf_cid - relies on the following: 2460 /* extract the vf from vf_cid - relies on the following:
2463 * 1. vfid on cid reflects the true abs_vfid 2461 * 1. vfid on cid reflects the true abs_vfid
2464 * 2. the max number of VFs (per path) is 64 2462 * 2. The max number of VFs (per path) is 64
2465 */ 2463 */
2466 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2464 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2467 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2465 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
@@ -2480,7 +2478,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2480 if (vf) { 2478 if (vf) {
2481 /* extract queue index from vf_cid - relies on the following: 2479 /* extract queue index from vf_cid - relies on the following:
2482 * 1. vfid on cid reflects the true abs_vfid 2480 * 1. vfid on cid reflects the true abs_vfid
2483 * 2. the max number of VFs (per path) is 64 2481 * 2. The max number of VFs (per path) is 64
2484 */ 2482 */
2485 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2483 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2486 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2484 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
@@ -2705,7 +2703,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2705 } 2703 }
2706 2704
2707 /* static allocation: 2705 /* static allocation:
2708 * the global maximum number are fixed per VF. fail the request if 2706 * the global maximum number are fixed per VF. Fail the request if
2709 * requested number exceed these globals 2707 * requested number exceed these globals
2710 */ 2708 */
2711 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2709 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
@@ -2777,6 +2775,10 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2777 vf->abs_vfid, vf->state); 2775 vf->abs_vfid, vf->state);
2778 return -EINVAL; 2776 return -EINVAL;
2779 } 2777 }
2778
2779 /* let FLR complete ... */
2780 msleep(100);
2781
2780 /* FLR cleanup epilogue */ 2782 /* FLR cleanup epilogue */
2781 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2783 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2782 return -EBUSY; 2784 return -EBUSY;
@@ -2890,7 +2892,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2890 return -ENOMEM; 2892 return -ENOMEM;
2891} 2893}
2892 2894
2893/* VF release can be called either: 1. the VF was acquired but 2895/* VF release can be called either: 1. The VF was acquired but
2894 * not enabled 2. the vf was enabled or in the process of being 2896 * not enabled 2. the vf was enabled or in the process of being
2895 * enabled 2897 * enabled
2896 */ 2898 */
@@ -3024,7 +3026,6 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3024 3026
3025int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3027int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3026{ 3028{
3027
3028 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3029 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3029 3030
3030 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3031 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
@@ -3032,7 +3033,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3032 3033
3033 /* HW channel is only operational when PF is up */ 3034 /* HW channel is only operational when PF is up */
3034 if (bp->state != BNX2X_STATE_OPEN) { 3035 if (bp->state != BNX2X_STATE_OPEN) {
3035 BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down"); 3036 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3036 return -EINVAL; 3037 return -EINVAL;
3037 } 3038 }
3038 3039
@@ -3086,6 +3087,11 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
3086static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, 3087static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
3087 struct bnx2x_virtf *vf) 3088 struct bnx2x_virtf *vf)
3088{ 3089{
3090 if (bp->state != BNX2X_STATE_OPEN) {
3091 BNX2X_ERR("vf ndo called though PF is down\n");
3092 return -EINVAL;
3093 }
3094
3089 if (!IS_SRIOV(bp)) { 3095 if (!IS_SRIOV(bp)) {
3090 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3096 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3091 return -EINVAL; 3097 return -EINVAL;
@@ -3141,7 +3147,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3141 /* mac configured by ndo so its in bulletin board */ 3147 /* mac configured by ndo so its in bulletin board */
3142 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3148 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3143 else 3149 else
3144 /* funtion has not been loaded yet. Show mac as 0s */ 3150 /* function has not been loaded yet. Show mac as 0s */
3145 memset(&ivi->mac, 0, ETH_ALEN); 3151 memset(&ivi->mac, 0, ETH_ALEN);
3146 3152
3147 /* vlan */ 3153 /* vlan */
@@ -3149,7 +3155,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3149 /* vlan configured by ndo so its in bulletin board */ 3155 /* vlan configured by ndo so its in bulletin board */
3150 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3156 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3151 else 3157 else
3152 /* funtion has not been loaded yet. Show vlans as 0s */ 3158 /* function has not been loaded yet. Show vlans as 0s */
3153 memset(&ivi->vlan, 0, VLAN_HLEN); 3159 memset(&ivi->vlan, 0, VLAN_HLEN);
3154 } 3160 }
3155 3161
@@ -3189,7 +3195,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3189 return -EINVAL; 3195 return -EINVAL;
3190 } 3196 }
3191 3197
3192 /* update PF's copy of the VF's bulletin. will no longer accept mac 3198 /* update PF's copy of the VF's bulletin. Will no longer accept mac
3193 * configuration requests from vf unless match this mac 3199 * configuration requests from vf unless match this mac
3194 */ 3200 */
3195 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3201 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
@@ -3358,8 +3364,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3358 return 0; 3364 return 0;
3359} 3365}
3360 3366
3361/* crc is the first field in the bulletin board. compute the crc over the 3367/* crc is the first field in the bulletin board. Compute the crc over the
3362 * entire bulletin board excluding the crc field itself 3368 * entire bulletin board excluding the crc field itself. Use the length field
3369 * as the Bulletin Board was posted by a PF with possibly a different version
3370 * from the vf which will sample it. Therefore, the length is computed by the
3371 * PF and the used blindly by the VF.
3363 */ 3372 */
3364u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3373u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3365 struct pf_vf_bulletin_content *bulletin) 3374 struct pf_vf_bulletin_content *bulletin)
@@ -3389,7 +3398,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3389 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3398 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3390 &bulletin)) 3399 &bulletin))
3391 break; 3400 break;
3392 BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n", 3401 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3393 bulletin.crc, 3402 bulletin.crc,
3394 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3403 bnx2x_crc_vf_bulletin(bp, &bulletin));
3395 } 3404 }
@@ -3417,6 +3426,20 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3417 return PFVF_BULLETIN_UPDATED; 3426 return PFVF_BULLETIN_UPDATED;
3418} 3427}
3419 3428
3429void bnx2x_timer_sriov(struct bnx2x *bp)
3430{
3431 bnx2x_sample_bulletin(bp);
3432
3433 /* if channel is down we need to self destruct */
3434 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
3435 smp_mb__before_clear_bit();
3436 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3437 &bp->sp_rtnl_state);
3438 smp_mb__after_clear_bit();
3439 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3440 }
3441}
3442
3420void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3443void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3421{ 3444{
3422 /* vf doorbells are embedded within the regview */ 3445 /* vf doorbells are embedded within the regview */
@@ -3452,7 +3475,7 @@ int bnx2x_open_epilog(struct bnx2x *bp)
3452 * register_netdevice which must have rtnl lock taken. As we are holding 3475 * register_netdevice which must have rtnl lock taken. As we are holding
3453 * the lock right now, that could only work if the probe would not take 3476 * the lock right now, that could only work if the probe would not take
3454 * the lock. However, as the probe of the vf may be called from other 3477 * the lock. However, as the probe of the vf may be called from other
3455 * contexts as well (such as passthrough to vm failes) it can't assume 3478 * contexts as well (such as passthrough to vm fails) it can't assume
3456 * the lock is being held for it. Using delayed work here allows the 3479 * the lock is being held for it. Using delayed work here allows the
3457 * probe code to simply take the lock (i.e. wait for it to be released 3480 * probe code to simply take the lock (i.e. wait for it to be released
3458 * if it is being held). We only want to do this if the number of VFs 3481 * if it is being held). We only want to do this if the number of VFs
@@ -3467,3 +3490,23 @@ int bnx2x_open_epilog(struct bnx2x *bp)
3467 3490
3468 return 0; 3491 return 0;
3469} 3492}
3493
3494void bnx2x_iov_channel_down(struct bnx2x *bp)
3495{
3496 int vf_idx;
3497 struct pf_vf_bulletin_content *bulletin;
3498
3499 if (!IS_SRIOV(bp))
3500 return;
3501
3502 for_each_vf(bp, vf_idx) {
3503 /* locate this VFs bulletin board and update the channel down
3504 * bit
3505 */
3506 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3507 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3508
3509 /* update vf bulletin board */
3510 bnx2x_post_vf_bulletin(bp, vf_idx);
3511 }
3512}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d67ddc554c0f..d143a7cdbbbe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -197,7 +197,7 @@ struct bnx2x_virtf {
197 197
198 u8 state; 198 u8 state;
199#define VF_FREE 0 /* VF ready to be acquired holds no resc */ 199#define VF_FREE 0 /* VF ready to be acquired holds no resc */
200#define VF_ACQUIRED 1 /* VF aquired, but not initalized */ 200#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
201#define VF_ENABLED 2 /* VF Enabled */ 201#define VF_ENABLED 2 /* VF Enabled */
202#define VF_RESET 3 /* VF FLR'd, pending cleanup */ 202#define VF_RESET 3 /* VF FLR'd, pending cleanup */
203 203
@@ -496,7 +496,7 @@ enum {
496 else if ((next) == VFOP_VERIFY_PEND) \ 496 else if ((next) == VFOP_VERIFY_PEND) \
497 BNX2X_ERR("expected pending\n"); \ 497 BNX2X_ERR("expected pending\n"); \
498 else { \ 498 else { \
499 DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \ 499 DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
500 atomic_set(&vf->op_in_progress, 1); \ 500 atomic_set(&vf->op_in_progress, 1); \
501 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ 501 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
502 return; \ 502 return; \
@@ -722,7 +722,6 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
722 struct pf_vf_bulletin_content *bulletin); 722 struct pf_vf_bulletin_content *bulletin);
723int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); 723int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
724 724
725
726enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 725enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
727 726
728/* VF side vfpf channel functions */ 727/* VF side vfpf channel functions */
@@ -752,6 +751,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
752} 751}
753 752
754enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 753enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
754void bnx2x_timer_sriov(struct bnx2x *bp);
755void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 755void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
756int bnx2x_vf_pci_alloc(struct bnx2x *bp); 756int bnx2x_vf_pci_alloc(struct bnx2x *bp);
757int bnx2x_enable_sriov(struct bnx2x *bp); 757int bnx2x_enable_sriov(struct bnx2x *bp);
@@ -762,6 +762,7 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
762} 762}
763void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); 763void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
764int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 764int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
765void bnx2x_iov_channel_down(struct bnx2x *bp);
765int bnx2x_open_epilog(struct bnx2x *bp); 766int bnx2x_open_epilog(struct bnx2x *bp);
766 767
767#else /* CONFIG_BNX2X_SRIOV */ 768#else /* CONFIG_BNX2X_SRIOV */
@@ -809,6 +810,7 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
809{ 810{
810 return PFVF_BULLETIN_UNCHANGED; 811 return PFVF_BULLETIN_UNCHANGED;
811} 812}
813static inline void bnx2x_timer_sriov(struct bnx2x *bp) {}
812 814
813static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 815static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
814{ 816{
@@ -818,6 +820,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
818static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 820static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
819static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 821static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
820static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 822static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
823static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
821static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; } 824static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
822 825
823#endif /* CONFIG_BNX2X_SRIOV */ 826#endif /* CONFIG_BNX2X_SRIOV */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 2ca3d94fcec2..98366abd02bd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1002,7 +1002,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
1002 qstats->valid_bytes_received_lo = 1002 qstats->valid_bytes_received_lo =
1003 qstats->total_bytes_received_lo; 1003 qstats->total_bytes_received_lo;
1004 1004
1005
1006 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 1005 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
1007 total_unicast_packets_received); 1006 total_unicast_packets_received);
1008 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 1007 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index d117f472816c..853824d258e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -40,7 +40,6 @@ struct nig_stats {
40 u32 egress_mac_pkt1_hi; 40 u32 egress_mac_pkt1_hi;
41}; 41};
42 42
43
44enum bnx2x_stats_event { 43enum bnx2x_stats_event {
45 STATS_EVENT_PMF = 0, 44 STATS_EVENT_PMF = 0,
46 STATS_EVENT_LINK_UP, 45 STATS_EVENT_LINK_UP,
@@ -208,7 +207,6 @@ struct bnx2x_eth_stats {
208 u32 eee_tx_lpi; 207 u32 eee_tx_lpi;
209}; 208};
210 209
211
212struct bnx2x_eth_q_stats { 210struct bnx2x_eth_q_stats {
213 u32 total_unicast_bytes_received_hi; 211 u32 total_unicast_bytes_received_hi;
214 u32 total_unicast_bytes_received_lo; 212 u32 total_unicast_bytes_received_lo;
@@ -331,7 +329,6 @@ struct bnx2x_fw_port_stats_old {
331 u32 mac_discard; 329 u32 mac_discard;
332}; 330};
333 331
334
335/**************************************************************************** 332/****************************************************************************
336* Macros 333* Macros
337****************************************************************************/ 334****************************************************************************/
@@ -536,7 +533,6 @@ struct bnx2x_fw_port_stats_old {
536 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ 533 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
537 } while (0) 534 } while (0)
538 535
539
540/* forward */ 536/* forward */
541struct bnx2x; 537struct bnx2x;
542 538
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 928b074d7d80..2088063151d6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -113,7 +113,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
113{ 113{
114 struct cstorm_vf_zone_data __iomem *zone_data = 114 struct cstorm_vf_zone_data __iomem *zone_data =
115 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); 115 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
116 int tout = 600, interval = 100; /* wait for 60 seconds */ 116 int tout = 100, interval = 100; /* wait for 10 seconds */
117 117
118 if (*done) { 118 if (*done) {
119 BNX2X_ERR("done was non zero before message to pf was sent\n"); 119 BNX2X_ERR("done was non zero before message to pf was sent\n");
@@ -121,6 +121,16 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
121 return -EINVAL; 121 return -EINVAL;
122 } 122 }
123 123
124 /* if PF indicated channel is down avoid sending message. Return success
125 * so calling flow can continue
126 */
127 bnx2x_sample_bulletin(bp);
128 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
129 DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
130 *done = PFVF_STATUS_SUCCESS;
131 return 0;
132 }
133
124 /* Write message address */ 134 /* Write message address */
125 writel(U64_LO(msg_mapping), 135 writel(U64_LO(msg_mapping),
126 &zone_data->non_trigger.vf_pf_channel.msg_addr_lo); 136 &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
@@ -233,7 +243,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
233 243
234 attempts++; 244 attempts++;
235 245
236 /* test whether the PF accepted our request. If not, humble the 246 /* test whether the PF accepted our request. If not, humble
237 * the request and try again. 247 * the request and try again.
238 */ 248 */
239 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { 249 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
@@ -333,7 +343,7 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
333 DP(BNX2X_MSG_SP, "vf released\n"); 343 DP(BNX2X_MSG_SP, "vf released\n");
334 } else { 344 } else {
335 /* PF reports error */ 345 /* PF reports error */
336 BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n", 346 BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
337 resp->hdr.status); 347 resp->hdr.status);
338 rc = -EAGAIN; 348 rc = -EAGAIN;
339 goto out; 349 goto out;
@@ -787,7 +797,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
787 storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); 797 storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
788} 798}
789 799
790/* enable vf_pf mailbox (aka vf-pf-chanell) */ 800/* enable vf_pf mailbox (aka vf-pf-channel) */
791void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) 801void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
792{ 802{
793 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); 803 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
@@ -844,7 +854,6 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
844 dmae.dst_addr_hi = vf_addr_hi; 854 dmae.dst_addr_hi = vf_addr_hi;
845 } 855 }
846 dmae.len = len32; 856 dmae.len = len32;
847 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
848 857
849 /* issue the command and wait for completion */ 858 /* issue the command and wait for completion */
850 return bnx2x_issue_dmae_with_comp(bp, &dmae); 859 return bnx2x_issue_dmae_with_comp(bp, &dmae);
@@ -1072,7 +1081,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1072 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) 1081 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1073 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); 1082 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1074 1083
1075 /* outer vlan removal is set according to the PF's multi fuction mode */ 1084 /* outer vlan removal is set according to PF's multi function mode */
1076 if (IS_MF_SD(bp)) 1085 if (IS_MF_SD(bp))
1077 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); 1086 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1078} 1087}
@@ -1104,7 +1113,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1104 struct bnx2x_queue_init_params *init_p; 1113 struct bnx2x_queue_init_params *init_p;
1105 struct bnx2x_queue_setup_params *setup_p; 1114 struct bnx2x_queue_setup_params *setup_p;
1106 1115
1107 /* reinit the VF operation context */ 1116 /* re-init the VF operation context */
1108 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); 1117 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
1109 setup_p = &vf->op_params.qctor.prep_qsetup; 1118 setup_p = &vf->op_params.qctor.prep_qsetup;
1110 init_p = &vf->op_params.qctor.qstate.params.init; 1119 init_p = &vf->op_params.qctor.qstate.params.init;
@@ -1588,8 +1597,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1588 * support them. Or this may be because someone wrote a crappy 1597 * support them. Or this may be because someone wrote a crappy
1589 * VF driver and is sending garbage over the channel. 1598 * VF driver and is sending garbage over the channel.
1590 */ 1599 */
1591 BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", 1600 BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
1592 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); 1601 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
1602 vf->state);
1593 for (i = 0; i < 20; i++) 1603 for (i = 0; i < 20; i++)
1594 DP_CONT(BNX2X_MSG_IOV, "%x ", 1604 DP_CONT(BNX2X_MSG_IOV, "%x ",
1595 mbx->msg->req.tlv_buf_size.tlv_buffer[i]); 1605 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
@@ -1605,8 +1615,11 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1605 bnx2x_vf_mbx_resp(bp, vf); 1615 bnx2x_vf_mbx_resp(bp, vf);
1606 } else { 1616 } else {
1607 /* can't send a response since this VF is unknown to us 1617 /* can't send a response since this VF is unknown to us
1608 * just unlock the channel and be done with. 1618 * just ack the FW to release the mailbox and unlock
1619 * the channel.
1609 */ 1620 */
1621 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1622 mmiowb();
1610 bnx2x_unlock_vf_pf_channel(bp, vf, 1623 bnx2x_unlock_vf_pf_channel(bp, vf,
1611 mbx->first_tlv.tl.type); 1624 mbx->first_tlv.tl.type);
1612 } 1625 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 41708faab575..f3ad174a3a63 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -331,7 +331,10 @@ struct pf_vf_bulletin_content {
331#define VLAN_VALID 1 /* when set, the vf should not access 331#define VLAN_VALID 1 /* when set, the vf should not access
332 * the vfpf channel 332 * the vfpf channel
333 */ 333 */
334 334#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not
335 * to attempt to send messages on the
336 * channel after this bit is set
337 */
335 u8 mac[ETH_ALEN]; 338 u8 mac[ETH_ALEN];
336 u8 mac_padding[2]; 339 u8 mac_padding[2];
337 340
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 6b0dc131b20e..d78d4cf140ed 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5622,7 +5622,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5622static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 5622static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5623 void *ptr) 5623 void *ptr)
5624{ 5624{
5625 struct net_device *netdev = ptr; 5625 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5626 struct cnic_dev *dev; 5626 struct cnic_dev *dev;
5627 int new_dev = 0; 5627 int new_dev = 0;
5628 5628
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index e80bfb60c3ef..c2777712da99 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2197,7 +2197,7 @@ static const struct net_device_ops sbmac_netdev_ops = {
2197 2197
2198static int sbmac_init(struct platform_device *pldev, long long base) 2198static int sbmac_init(struct platform_device *pldev, long long base)
2199{ 2199{
2200 struct net_device *dev = dev_get_drvdata(&pldev->dev); 2200 struct net_device *dev = platform_get_drvdata(pldev);
2201 int idx = pldev->id; 2201 int idx = pldev->id;
2202 struct sbmac_softc *sc = netdev_priv(dev); 2202 struct sbmac_softc *sc = netdev_priv(dev);
2203 unsigned char *eaddr; 2203 unsigned char *eaddr;
@@ -2275,7 +2275,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2275 dev->name); 2275 dev->name);
2276 goto free_mdio; 2276 goto free_mdio;
2277 } 2277 }
2278 dev_set_drvdata(&pldev->dev, sc->mii_bus); 2278 platform_set_drvdata(pldev, sc->mii_bus);
2279 2279
2280 err = register_netdev(dev); 2280 err = register_netdev(dev);
2281 if (err) { 2281 if (err) {
@@ -2300,7 +2300,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2300 return 0; 2300 return 0;
2301unreg_mdio: 2301unreg_mdio:
2302 mdiobus_unregister(sc->mii_bus); 2302 mdiobus_unregister(sc->mii_bus);
2303 dev_set_drvdata(&pldev->dev, NULL);
2304free_mdio: 2303free_mdio:
2305 mdiobus_free(sc->mii_bus); 2304 mdiobus_free(sc->mii_bus);
2306uninit_ctx: 2305uninit_ctx:
@@ -2624,7 +2623,7 @@ static int sbmac_probe(struct platform_device *pldev)
2624 goto out_unmap; 2623 goto out_unmap;
2625 } 2624 }
2626 2625
2627 dev_set_drvdata(&pldev->dev, dev); 2626 platform_set_drvdata(pldev, dev);
2628 SET_NETDEV_DEV(dev, &pldev->dev); 2627 SET_NETDEV_DEV(dev, &pldev->dev);
2629 2628
2630 sc = netdev_priv(dev); 2629 sc = netdev_priv(dev);
@@ -2649,7 +2648,7 @@ out_out:
2649 2648
2650static int __exit sbmac_remove(struct platform_device *pldev) 2649static int __exit sbmac_remove(struct platform_device *pldev)
2651{ 2650{
2652 struct net_device *dev = dev_get_drvdata(&pldev->dev); 2651 struct net_device *dev = platform_get_drvdata(pldev);
2653 struct sbmac_softc *sc = netdev_priv(dev); 2652 struct sbmac_softc *sc = netdev_priv(dev);
2654 2653
2655 unregister_netdev(dev); 2654 unregister_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a13463e8a2c3..d964f302ac94 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -968,9 +968,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
968 968
969 event = APE_EVENT_STATUS_STATE_UNLOAD; 969 event = APE_EVENT_STATUS_STATE_UNLOAD;
970 break; 970 break;
971 case RESET_KIND_SUSPEND:
972 event = APE_EVENT_STATUS_STATE_SUSPEND;
973 break;
974 default: 971 default:
975 return; 972 return;
976 } 973 }
@@ -1317,8 +1314,8 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 1314
1318 if (err) 1315 if (err)
1319 return err; 1316 return err;
1320 if (enable)
1321 1317
1318 if (enable)
1322 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 else 1320 else
1324 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
@@ -1745,10 +1742,6 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1745 break; 1742 break;
1746 } 1743 }
1747 } 1744 }
1748
1749 if (kind == RESET_KIND_INIT ||
1750 kind == RESET_KIND_SUSPEND)
1751 tg3_ape_driver_state_change(tp, kind);
1752} 1745}
1753 1746
1754/* tp->lock is held. */ 1747/* tp->lock is held. */
@@ -1770,9 +1763,6 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1770 break; 1763 break;
1771 } 1764 }
1772 } 1765 }
1773
1774 if (kind == RESET_KIND_SHUTDOWN)
1775 tg3_ape_driver_state_change(tp, kind);
1776} 1766}
1777 1767
1778/* tp->lock is held. */ 1768/* tp->lock is held. */
@@ -2341,6 +2331,46 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
2341 tg3_phy_toggle_auxctl_smdsp(tp, false); 2331 tg3_phy_toggle_auxctl_smdsp(tp, false);
2342} 2332}
2343 2333
2334static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2335{
2336 u32 val;
2337 struct ethtool_eee *dest = &tp->eee;
2338
2339 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2340 return;
2341
2342 if (eee)
2343 dest = eee;
2344
2345 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2346 return;
2347
2348 /* Pull eee_active */
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351 dest->eee_active = 1;
2352 } else
2353 dest->eee_active = 0;
2354
2355 /* Pull lp advertised settings */
2356 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2357 return;
2358 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359
2360 /* Pull advertised and eee_enabled settings */
2361 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2362 return;
2363 dest->eee_enabled = !!val;
2364 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365
2366 /* Pull tx_lpi_enabled */
2367 val = tr32(TG3_CPMU_EEE_MODE);
2368 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2369
2370 /* Pull lpi timer value */
2371 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2372}
2373
2344static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2374static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2345{ 2375{
2346 u32 val; 2376 u32 val;
@@ -2364,11 +2394,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2364 2394
2365 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2395 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2366 2396
2367 tg3_phy_cl45_read(tp, MDIO_MMD_AN, 2397 tg3_eee_pull_config(tp, NULL);
2368 TG3_CL45_D7_EEERES_STAT, &val); 2398 if (tp->eee.eee_active)
2369
2370 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2371 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2372 tp->setlpicnt = 2; 2399 tp->setlpicnt = 2;
2373 } 2400 }
2374 2401
@@ -4192,6 +4219,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
4192 4219
4193 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4220 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4194 4221
4222 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4223
4195 return 0; 4224 return 0;
4196} 4225}
4197 4226
@@ -4292,6 +4321,16 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4292 /* Advertise 1000-BaseT EEE ability */ 4321 /* Advertise 1000-BaseT EEE ability */
4293 if (advertise & ADVERTISED_1000baseT_Full) 4322 if (advertise & ADVERTISED_1000baseT_Full)
4294 val |= MDIO_AN_EEE_ADV_1000T; 4323 val |= MDIO_AN_EEE_ADV_1000T;
4324
4325 if (!tp->eee.eee_enabled) {
4326 val = 0;
4327 tp->eee.advertised = 0;
4328 } else {
4329 tp->eee.advertised = advertise &
4330 (ADVERTISED_100baseT_Full |
4331 ADVERTISED_1000baseT_Full);
4332 }
4333
4295 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4334 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4296 if (err) 4335 if (err)
4297 val = 0; 4336 val = 0;
@@ -4536,26 +4575,23 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
4536 4575
4537static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4576static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4538{ 4577{
4539 u32 val; 4578 struct ethtool_eee eee;
4540 u32 tgtadv = 0;
4541 u32 advertising = tp->link_config.advertising;
4542 4579
4543 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4580 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4544 return true; 4581 return true;
4545 4582
4546 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 4583 tg3_eee_pull_config(tp, &eee);
4547 return false;
4548
4549 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4550
4551
4552 if (advertising & ADVERTISED_100baseT_Full)
4553 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4554 if (advertising & ADVERTISED_1000baseT_Full)
4555 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4556 4584
4557 if (val != tgtadv) 4585 if (tp->eee.eee_enabled) {
4558 return false; 4586 if (tp->eee.advertised != eee.advertised ||
4587 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4588 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4589 return false;
4590 } else {
4591 /* EEE is disabled but we're advertising */
4592 if (eee.advertised)
4593 return false;
4594 }
4559 4595
4560 return true; 4596 return true;
4561} 4597}
@@ -4656,6 +4692,42 @@ static void tg3_clear_mac_status(struct tg3 *tp)
4656 udelay(40); 4692 udelay(40);
4657} 4693}
4658 4694
4695static void tg3_setup_eee(struct tg3 *tp)
4696{
4697 u32 val;
4698
4699 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4700 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4701 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4702 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4703
4704 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4705
4706 tw32_f(TG3_CPMU_EEE_CTRL,
4707 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4708
4709 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4710 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4711 TG3_CPMU_EEEMD_LPI_IN_RX |
4712 TG3_CPMU_EEEMD_EEE_ENABLE;
4713
4714 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4715 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4716
4717 if (tg3_flag(tp, ENABLE_APE))
4718 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4719
4720 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4721
4722 tw32_f(TG3_CPMU_EEE_DBTMR1,
4723 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4724 (tp->eee.tx_lpi_timer & 0xffff));
4725
4726 tw32_f(TG3_CPMU_EEE_DBTMR2,
4727 TG3_CPMU_DBTMR2_APE_TX_2047US |
4728 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4729}
4730
4659static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4731static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4660{ 4732{
4661 bool current_link_up; 4733 bool current_link_up;
@@ -4822,8 +4894,10 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4822 */ 4894 */
4823 if (!eee_config_ok && 4895 if (!eee_config_ok &&
4824 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4896 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4825 !force_reset) 4897 !force_reset) {
4898 tg3_setup_eee(tp);
4826 tg3_phy_reset(tp); 4899 tg3_phy_reset(tp);
4900 }
4827 } else { 4901 } else {
4828 if (!(bmcr & BMCR_ANENABLE) && 4902 if (!(bmcr & BMCR_ANENABLE) &&
4829 tp->link_config.speed == current_speed && 4903 tp->link_config.speed == current_speed &&
@@ -6335,9 +6409,7 @@ static void tg3_tx_recover(struct tg3 *tp)
6335 "Please report the problem to the driver maintainer " 6409 "Please report the problem to the driver maintainer "
6336 "and include system chipset information.\n"); 6410 "and include system chipset information.\n");
6337 6411
6338 spin_lock(&tp->lock);
6339 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6412 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6340 spin_unlock(&tp->lock);
6341} 6413}
6342 6414
6343static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6415static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
@@ -9205,11 +9277,9 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9205} 9277}
9206 9278
9207/* tp->lock is held. */ 9279/* tp->lock is held. */
9208static void tg3_rings_reset(struct tg3 *tp) 9280static void tg3_tx_rcbs_disable(struct tg3 *tp)
9209{ 9281{
9210 int i; 9282 u32 txrcb, limit;
9211 u32 stblk, txrcb, rxrcb, limit;
9212 struct tg3_napi *tnapi = &tp->napi[0];
9213 9283
9214 /* Disable all transmit rings but the first. */ 9284 /* Disable all transmit rings but the first. */
9215 if (!tg3_flag(tp, 5705_PLUS)) 9285 if (!tg3_flag(tp, 5705_PLUS))
@@ -9226,7 +9296,33 @@ static void tg3_rings_reset(struct tg3 *tp)
9226 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9296 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9227 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9297 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9228 BDINFO_FLAGS_DISABLED); 9298 BDINFO_FLAGS_DISABLED);
9299}
9300
9301/* tp->lock is held. */
9302static void tg3_tx_rcbs_init(struct tg3 *tp)
9303{
9304 int i = 0;
9305 u32 txrcb = NIC_SRAM_SEND_RCB;
9306
9307 if (tg3_flag(tp, ENABLE_TSS))
9308 i++;
9309
9310 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9311 struct tg3_napi *tnapi = &tp->napi[i];
9312
9313 if (!tnapi->tx_ring)
9314 continue;
9315
9316 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9317 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9318 NIC_SRAM_TX_BUFFER_DESC);
9319 }
9320}
9229 9321
9322/* tp->lock is held. */
9323static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9324{
9325 u32 rxrcb, limit;
9230 9326
9231 /* Disable all receive return rings but the first. */ 9327 /* Disable all receive return rings but the first. */
9232 if (tg3_flag(tp, 5717_PLUS)) 9328 if (tg3_flag(tp, 5717_PLUS))
@@ -9244,6 +9340,39 @@ static void tg3_rings_reset(struct tg3 *tp)
9244 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9340 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9245 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9341 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9246 BDINFO_FLAGS_DISABLED); 9342 BDINFO_FLAGS_DISABLED);
9343}
9344
9345/* tp->lock is held. */
9346static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9347{
9348 int i = 0;
9349 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9350
9351 if (tg3_flag(tp, ENABLE_RSS))
9352 i++;
9353
9354 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9355 struct tg3_napi *tnapi = &tp->napi[i];
9356
9357 if (!tnapi->rx_rcb)
9358 continue;
9359
9360 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9361 (tp->rx_ret_ring_mask + 1) <<
9362 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9363 }
9364}
9365
9366/* tp->lock is held. */
9367static void tg3_rings_reset(struct tg3 *tp)
9368{
9369 int i;
9370 u32 stblk;
9371 struct tg3_napi *tnapi = &tp->napi[0];
9372
9373 tg3_tx_rcbs_disable(tp);
9374
9375 tg3_rx_ret_rcbs_disable(tp);
9247 9376
9248 /* Disable interrupts */ 9377 /* Disable interrupts */
9249 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9378 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
@@ -9280,9 +9409,6 @@ static void tg3_rings_reset(struct tg3 *tp)
9280 tw32_tx_mbox(mbox + i * 8, 0); 9409 tw32_tx_mbox(mbox + i * 8, 0);
9281 } 9410 }
9282 9411
9283 txrcb = NIC_SRAM_SEND_RCB;
9284 rxrcb = NIC_SRAM_RCV_RET_RCB;
9285
9286 /* Clear status block in ram. */ 9412 /* Clear status block in ram. */
9287 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9413 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9288 9414
@@ -9292,46 +9418,20 @@ static void tg3_rings_reset(struct tg3 *tp)
9292 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9418 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9293 ((u64) tnapi->status_mapping & 0xffffffff)); 9419 ((u64) tnapi->status_mapping & 0xffffffff));
9294 9420
9295 if (tnapi->tx_ring) {
9296 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9297 (TG3_TX_RING_SIZE <<
9298 BDINFO_FLAGS_MAXLEN_SHIFT),
9299 NIC_SRAM_TX_BUFFER_DESC);
9300 txrcb += TG3_BDINFO_SIZE;
9301 }
9302
9303 if (tnapi->rx_rcb) {
9304 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9305 (tp->rx_ret_ring_mask + 1) <<
9306 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9307 rxrcb += TG3_BDINFO_SIZE;
9308 }
9309
9310 stblk = HOSTCC_STATBLCK_RING1; 9421 stblk = HOSTCC_STATBLCK_RING1;
9311 9422
9312 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9423 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9313 u64 mapping = (u64)tnapi->status_mapping; 9424 u64 mapping = (u64)tnapi->status_mapping;
9314 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9425 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9315 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9426 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9427 stblk += 8;
9316 9428
9317 /* Clear status block in ram. */ 9429 /* Clear status block in ram. */
9318 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9430 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9319
9320 if (tnapi->tx_ring) {
9321 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9322 (TG3_TX_RING_SIZE <<
9323 BDINFO_FLAGS_MAXLEN_SHIFT),
9324 NIC_SRAM_TX_BUFFER_DESC);
9325 txrcb += TG3_BDINFO_SIZE;
9326 }
9327
9328 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9329 ((tp->rx_ret_ring_mask + 1) <<
9330 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9331
9332 stblk += 8;
9333 rxrcb += TG3_BDINFO_SIZE;
9334 } 9431 }
9432
9433 tg3_tx_rcbs_init(tp);
9434 tg3_rx_ret_rcbs_init(tp);
9335} 9435}
9336 9436
9337static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9437static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
@@ -9531,46 +9631,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9531 if (tg3_flag(tp, INIT_COMPLETE)) 9631 if (tg3_flag(tp, INIT_COMPLETE))
9532 tg3_abort_hw(tp, 1); 9632 tg3_abort_hw(tp, 1);
9533 9633
9534 /* Enable MAC control of LPI */
9535 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9536 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9537 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9538 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9539 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9540
9541 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9542
9543 tw32_f(TG3_CPMU_EEE_CTRL,
9544 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9545
9546 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9547 TG3_CPMU_EEEMD_LPI_IN_TX |
9548 TG3_CPMU_EEEMD_LPI_IN_RX |
9549 TG3_CPMU_EEEMD_EEE_ENABLE;
9550
9551 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9552 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9553
9554 if (tg3_flag(tp, ENABLE_APE))
9555 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9556
9557 tw32_f(TG3_CPMU_EEE_MODE, val);
9558
9559 tw32_f(TG3_CPMU_EEE_DBTMR1,
9560 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9561 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9562
9563 tw32_f(TG3_CPMU_EEE_DBTMR2,
9564 TG3_CPMU_DBTMR2_APE_TX_2047US |
9565 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9566 }
9567
9568 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9634 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9569 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9635 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9570 tg3_phy_pull_config(tp); 9636 tg3_phy_pull_config(tp);
9637 tg3_eee_pull_config(tp, NULL);
9571 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9638 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9572 } 9639 }
9573 9640
9641 /* Enable MAC control of LPI */
9642 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9643 tg3_setup_eee(tp);
9644
9574 if (reset_phy) 9645 if (reset_phy)
9575 tg3_phy_reset(tp); 9646 tg3_phy_reset(tp);
9576 9647
@@ -11226,7 +11297,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11226 */ 11297 */
11227 err = tg3_alloc_consistent(tp); 11298 err = tg3_alloc_consistent(tp);
11228 if (err) 11299 if (err)
11229 goto err_out1; 11300 goto out_ints_fini;
11230 11301
11231 tg3_napi_init(tp); 11302 tg3_napi_init(tp);
11232 11303
@@ -11240,12 +11311,15 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11240 tnapi = &tp->napi[i]; 11311 tnapi = &tp->napi[i];
11241 free_irq(tnapi->irq_vec, tnapi); 11312 free_irq(tnapi->irq_vec, tnapi);
11242 } 11313 }
11243 goto err_out2; 11314 goto out_napi_fini;
11244 } 11315 }
11245 } 11316 }
11246 11317
11247 tg3_full_lock(tp, 0); 11318 tg3_full_lock(tp, 0);
11248 11319
11320 if (init)
11321 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11322
11249 err = tg3_init_hw(tp, reset_phy); 11323 err = tg3_init_hw(tp, reset_phy);
11250 if (err) { 11324 if (err) {
11251 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11325 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -11255,7 +11329,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11255 tg3_full_unlock(tp); 11329 tg3_full_unlock(tp);
11256 11330
11257 if (err) 11331 if (err)
11258 goto err_out3; 11332 goto out_free_irq;
11259 11333
11260 if (test_irq && tg3_flag(tp, USING_MSI)) { 11334 if (test_irq && tg3_flag(tp, USING_MSI)) {
11261 err = tg3_test_msi(tp); 11335 err = tg3_test_msi(tp);
@@ -11266,7 +11340,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11266 tg3_free_rings(tp); 11340 tg3_free_rings(tp);
11267 tg3_full_unlock(tp); 11341 tg3_full_unlock(tp);
11268 11342
11269 goto err_out2; 11343 goto out_napi_fini;
11270 } 11344 }
11271 11345
11272 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11346 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
@@ -11306,18 +11380,18 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11306 11380
11307 return 0; 11381 return 0;
11308 11382
11309err_out3: 11383out_free_irq:
11310 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11384 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11311 struct tg3_napi *tnapi = &tp->napi[i]; 11385 struct tg3_napi *tnapi = &tp->napi[i];
11312 free_irq(tnapi->irq_vec, tnapi); 11386 free_irq(tnapi->irq_vec, tnapi);
11313 } 11387 }
11314 11388
11315err_out2: 11389out_napi_fini:
11316 tg3_napi_disable(tp); 11390 tg3_napi_disable(tp);
11317 tg3_napi_fini(tp); 11391 tg3_napi_fini(tp);
11318 tg3_free_consistent(tp); 11392 tg3_free_consistent(tp);
11319 11393
11320err_out1: 11394out_ints_fini:
11321 tg3_ints_fini(tp); 11395 tg3_ints_fini(tp);
11322 11396
11323 return err; 11397 return err;
@@ -13362,11 +13436,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13362 struct tg3 *tp = netdev_priv(dev); 13436 struct tg3 *tp = netdev_priv(dev);
13363 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13437 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13364 13438
13365 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 13439 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13366 tg3_power_up(tp)) { 13440 if (tg3_power_up(tp)) {
13367 etest->flags |= ETH_TEST_FL_FAILED; 13441 etest->flags |= ETH_TEST_FL_FAILED;
13368 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13442 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13369 return; 13443 return;
13444 }
13445 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13370 } 13446 }
13371 13447
13372 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13448 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
@@ -13657,6 +13733,57 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13657 return 0; 13733 return 0;
13658} 13734}
13659 13735
13736static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13737{
13738 struct tg3 *tp = netdev_priv(dev);
13739
13740 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13741 netdev_warn(tp->dev, "Board does not support EEE!\n");
13742 return -EOPNOTSUPP;
13743 }
13744
13745 if (edata->advertised != tp->eee.advertised) {
13746 netdev_warn(tp->dev,
13747 "Direct manipulation of EEE advertisement is not supported\n");
13748 return -EINVAL;
13749 }
13750
13751 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13752 netdev_warn(tp->dev,
13753 "Maximal Tx Lpi timer supported is %#x(u)\n",
13754 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13755 return -EINVAL;
13756 }
13757
13758 tp->eee = *edata;
13759
13760 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13761 tg3_warn_mgmt_link_flap(tp);
13762
13763 if (netif_running(tp->dev)) {
13764 tg3_full_lock(tp, 0);
13765 tg3_setup_eee(tp);
13766 tg3_phy_reset(tp);
13767 tg3_full_unlock(tp);
13768 }
13769
13770 return 0;
13771}
13772
13773static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13774{
13775 struct tg3 *tp = netdev_priv(dev);
13776
13777 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13778 netdev_warn(tp->dev,
13779 "Board does not support EEE!\n");
13780 return -EOPNOTSUPP;
13781 }
13782
13783 *edata = tp->eee;
13784 return 0;
13785}
13786
13660static const struct ethtool_ops tg3_ethtool_ops = { 13787static const struct ethtool_ops tg3_ethtool_ops = {
13661 .get_settings = tg3_get_settings, 13788 .get_settings = tg3_get_settings,
13662 .set_settings = tg3_set_settings, 13789 .set_settings = tg3_set_settings,
@@ -13690,6 +13817,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
13690 .get_channels = tg3_get_channels, 13817 .get_channels = tg3_get_channels,
13691 .set_channels = tg3_set_channels, 13818 .set_channels = tg3_set_channels,
13692 .get_ts_info = tg3_get_ts_info, 13819 .get_ts_info = tg3_get_ts_info,
13820 .get_eee = tg3_get_eee,
13821 .set_eee = tg3_set_eee,
13693}; 13822};
13694 13823
13695static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, 13824static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
@@ -15038,9 +15167,18 @@ static int tg3_phy_probe(struct tg3 *tp)
15038 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15167 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15039 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15168 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15040 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15169 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15041 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) 15170 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15042 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15171 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15043 15172
15173 tp->eee.supported = SUPPORTED_100baseT_Full |
15174 SUPPORTED_1000baseT_Full;
15175 tp->eee.advertised = ADVERTISED_100baseT_Full |
15176 ADVERTISED_1000baseT_Full;
15177 tp->eee.eee_enabled = 1;
15178 tp->eee.tx_lpi_enabled = 1;
15179 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15180 }
15181
15044 tg3_phy_init_link_config(tp); 15182 tg3_phy_init_link_config(tp);
15045 15183
15046 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15184 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
@@ -17112,7 +17250,7 @@ static int tg3_init_one(struct pci_dev *pdev,
17112{ 17250{
17113 struct net_device *dev; 17251 struct net_device *dev;
17114 struct tg3 *tp; 17252 struct tg3 *tp;
17115 int i, err, pm_cap; 17253 int i, err;
17116 u32 sndmbx, rcvmbx, intmbx; 17254 u32 sndmbx, rcvmbx, intmbx;
17117 char str[40]; 17255 char str[40];
17118 u64 dma_mask, persist_dma_mask; 17256 u64 dma_mask, persist_dma_mask;
@@ -17134,25 +17272,10 @@ static int tg3_init_one(struct pci_dev *pdev,
17134 17272
17135 pci_set_master(pdev); 17273 pci_set_master(pdev);
17136 17274
17137 /* Find power-management capability. */
17138 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17139 if (pm_cap == 0) {
17140 dev_err(&pdev->dev,
17141 "Cannot find Power Management capability, aborting\n");
17142 err = -EIO;
17143 goto err_out_free_res;
17144 }
17145
17146 err = pci_set_power_state(pdev, PCI_D0);
17147 if (err) {
17148 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17149 goto err_out_free_res;
17150 }
17151
17152 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17275 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17153 if (!dev) { 17276 if (!dev) {
17154 err = -ENOMEM; 17277 err = -ENOMEM;
17155 goto err_out_power_down; 17278 goto err_out_free_res;
17156 } 17279 }
17157 17280
17158 SET_NETDEV_DEV(dev, &pdev->dev); 17281 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -17160,7 +17283,7 @@ static int tg3_init_one(struct pci_dev *pdev,
17160 tp = netdev_priv(dev); 17283 tp = netdev_priv(dev);
17161 tp->pdev = pdev; 17284 tp->pdev = pdev;
17162 tp->dev = dev; 17285 tp->dev = dev;
17163 tp->pm_cap = pm_cap; 17286 tp->pm_cap = pdev->pm_cap;
17164 tp->rx_mode = TG3_DEF_RX_MODE; 17287 tp->rx_mode = TG3_DEF_RX_MODE;
17165 tp->tx_mode = TG3_DEF_TX_MODE; 17288 tp->tx_mode = TG3_DEF_TX_MODE;
17166 tp->irq_sync = 1; 17289 tp->irq_sync = 1;
@@ -17498,9 +17621,6 @@ err_out_iounmap:
17498err_out_free_dev: 17621err_out_free_dev:
17499 free_netdev(dev); 17622 free_netdev(dev);
17500 17623
17501err_out_power_down:
17502 pci_set_power_state(pdev, PCI_D3hot);
17503
17504err_out_free_res: 17624err_out_free_res:
17505 pci_release_regions(pdev); 17625 pci_release_regions(pdev);
17506 17626
@@ -17610,6 +17730,8 @@ static int tg3_resume(struct device *device)
17610 17730
17611 tg3_full_lock(tp, 0); 17731 tg3_full_lock(tp, 0);
17612 17732
17733 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17734
17613 tg3_flag_set(tp, INIT_COMPLETE); 17735 tg3_flag_set(tp, INIT_COMPLETE);
17614 err = tg3_restart_hw(tp, 17736 err = tg3_restart_hw(tp,
17615 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 17737 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
@@ -17671,10 +17793,13 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17671 tg3_full_unlock(tp); 17793 tg3_full_unlock(tp);
17672 17794
17673done: 17795done:
17674 if (state == pci_channel_io_perm_failure) 17796 if (state == pci_channel_io_perm_failure) {
17797 tg3_napi_enable(tp);
17798 dev_close(netdev);
17675 err = PCI_ERS_RESULT_DISCONNECT; 17799 err = PCI_ERS_RESULT_DISCONNECT;
17676 else 17800 } else {
17677 pci_disable_device(pdev); 17801 pci_disable_device(pdev);
17802 }
17678 17803
17679 rtnl_unlock(); 17804 rtnl_unlock();
17680 17805
@@ -17720,6 +17845,10 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17720 rc = PCI_ERS_RESULT_RECOVERED; 17845 rc = PCI_ERS_RESULT_RECOVERED;
17721 17846
17722done: 17847done:
17848 if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
17849 tg3_napi_enable(tp);
17850 dev_close(netdev);
17851 }
17723 rtnl_unlock(); 17852 rtnl_unlock();
17724 17853
17725 return rc; 17854 return rc;
@@ -17744,6 +17873,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
17744 goto done; 17873 goto done;
17745 17874
17746 tg3_full_lock(tp, 0); 17875 tg3_full_lock(tp, 0);
17876 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17747 tg3_flag_set(tp, INIT_COMPLETE); 17877 tg3_flag_set(tp, INIT_COMPLETE);
17748 err = tg3_restart_hw(tp, true); 17878 err = tg3_restart_hw(tp, true);
17749 if (err) { 17879 if (err) {
@@ -17781,15 +17911,4 @@ static struct pci_driver tg3_driver = {
17781 .driver.pm = &tg3_pm_ops, 17911 .driver.pm = &tg3_pm_ops,
17782}; 17912};
17783 17913
17784static int __init tg3_init(void) 17914module_pci_driver(tg3_driver);
17785{
17786 return pci_register_driver(&tg3_driver);
17787}
17788
17789static void __exit tg3_cleanup(void)
17790{
17791 pci_unregister_driver(&tg3_driver);
17792}
17793
17794module_init(tg3_init);
17795module_exit(tg3_cleanup);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ff6e30eeae35..cd63d1189aae 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1175,6 +1175,7 @@
1175#define TG3_CPMU_EEE_DBTMR1 0x000036b4 1175#define TG3_CPMU_EEE_DBTMR1 0x000036b4
1176#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 1176#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1177#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff 1177#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff
1178#define TG3_CPMU_DBTMR1_LNKIDLE_MAX 0x0000ffff
1178#define TG3_CPMU_EEE_DBTMR2 0x000036b8 1179#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1179#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 1180#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
1180#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff 1181#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff
@@ -3372,6 +3373,7 @@ struct tg3 {
3372 unsigned int irq_cnt; 3373 unsigned int irq_cnt;
3373 3374
3374 struct ethtool_coalesce coal; 3375 struct ethtool_coalesce coal;
3376 struct ethtool_eee eee;
3375 3377
3376 /* firmware info */ 3378 /* firmware info */
3377 const char *fw_needed; 3379 const char *fw_needed;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index e423f82da490..b7d8127c198f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -164,7 +164,8 @@ struct bfa_ioc_attr {
164 u8 port_mode; /*!< enum bfa_mode */ 164 u8 port_mode; /*!< enum bfa_mode */
165 u8 cap_bm; /*!< capability */ 165 u8 cap_bm; /*!< capability */
166 u8 port_mode_cfg; /*!< enum bfa_mode */ 166 u8 port_mode_cfg; /*!< enum bfa_mode */
167 u8 rsvd[4]; /*!< 64bit align */ 167 u8 def_fn; /*!< 1 if default fn */
168 u8 rsvd[3]; /*!< 64bit align */
168}; 169};
169 170
170/* Adapter capability mask definition */ 171/* Adapter capability mask definition */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index f2b73ffa9122..6f3cac060f29 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2371,7 +2371,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2371 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 2371 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2372 2372
2373 ioc_attr->state = bfa_ioc_get_state(ioc); 2373 ioc_attr->state = bfa_ioc_get_state(ioc);
2374 ioc_attr->port_id = ioc->port_id; 2374 ioc_attr->port_id = bfa_ioc_portid(ioc);
2375 ioc_attr->port_mode = ioc->port_mode; 2375 ioc_attr->port_mode = ioc->port_mode;
2376 2376
2377 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; 2377 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
@@ -2381,8 +2381,9 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2381 2381
2382 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 2382 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2383 2383
2384 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; 2384 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2385 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; 2385 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2386 ioc_attr->def_fn = bfa_ioc_is_default(ioc);
2386 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2387 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2387} 2388}
2388 2389
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 63a85e555df8..f04e0aab25b4 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -222,6 +222,8 @@ struct bfa_ioc_hwif {
222#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) 222#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
223#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) 223#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
224#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) 224#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
225#define bfa_ioc_is_default(__ioc) \
226 (bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
225#define bfa_ioc_fetch_stats(__ioc, __stats) \ 227#define bfa_ioc_fetch_stats(__ioc, __stats) \
226 (((__stats)->drv_stats) = (__ioc)->stats) 228 (((__stats)->drv_stats) = (__ioc)->stats)
227#define bfa_ioc_clr_stats(__ioc) \ 229#define bfa_ioc_clr_stats(__ioc) \
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 25dae757e9c4..f1eafc409bbd 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -455,6 +455,8 @@ void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
455void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); 455void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
456void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, 456void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
457 struct bfi_msgq_mhdr *msghdr); 457 struct bfi_msgq_mhdr *msghdr);
458void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
459 struct bfi_msgq_mhdr *msghdr);
458 460
459/* APIs for BNA */ 461/* APIs for BNA */
460void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, 462void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index db14f69d63bc..3ca77fad4851 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -298,7 +298,6 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
298 case BFI_ENET_I2H_RSS_ENABLE_RSP: 298 case BFI_ENET_I2H_RSS_ENABLE_RSP:
299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: 299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
300 case BFI_ENET_I2H_RX_DEFAULT_RSP: 300 case BFI_ENET_I2H_RX_DEFAULT_RSP:
301 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
302 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: 301 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
303 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: 302 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
304 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: 303 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
@@ -311,6 +310,12 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
311 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); 310 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
312 break; 311 break;
313 312
313 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
314 bna_rx_from_rid(bna, msghdr->enet_id, rx);
315 if (rx)
316 bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
317 break;
318
314 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: 319 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
315 bna_rx_from_rid(bna, msghdr->enet_id, rx); 320 bna_rx_from_rid(bna, msghdr->enet_id, rx);
316 if (rx) 321 if (rx)
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index ea6f4a036401..57cd1bff59f1 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -711,6 +711,21 @@ bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
711} 711}
712 712
713void 713void
714bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr)
716{
717 struct bfi_enet_rsp *rsp =
718 (struct bfi_enet_rsp *)msghdr;
719
720 if (rsp->error) {
721 /* Clear ucast from cache */
722 rxf->ucast_active_set = 0;
723 }
724
725 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
726}
727
728void
714bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, 729bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr) 730 struct bfi_msgq_mhdr *msghdr)
716{ 731{
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 07f7ef05c3f2..b78e69e0e52a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2624,6 +2624,9 @@ bnad_stop(struct net_device *netdev)
2624 bnad_destroy_tx(bnad, 0); 2624 bnad_destroy_tx(bnad, 0);
2625 bnad_destroy_rx(bnad, 0); 2625 bnad_destroy_rx(bnad, 0);
2626 2626
2627 /* These config flags are cleared in the hardware */
2628 bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
2629
2627 /* Synchronize mailbox IRQ */ 2630 /* Synchronize mailbox IRQ */
2628 bnad_mbox_irq_sync(bnad); 2631 bnad_mbox_irq_sync(bnad);
2629 2632
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index c1d0bc059bfd..aefee77523f2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
71#define BNAD_NAME "bna" 71#define BNAD_NAME "bna"
72#define BNAD_NAME_LEN 64 72#define BNAD_NAME_LEN 64
73 73
74#define BNAD_VERSION "3.1.2.1" 74#define BNAD_VERSION "3.2.21.1"
75 75
76#define BNAD_MAILBOX_MSIX_INDEX 0 76#define BNAD_MAILBOX_MSIX_INDEX 0
77#define BNAD_MAILBOX_MSIX_VECTORS 1 77#define BNAD_MAILBOX_MSIX_VECTORS 1
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 94d957d203a6..7d6aa8c87df8 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -230,32 +230,12 @@ bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
230static loff_t 230static loff_t
231bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) 231bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
232{ 232{
233 loff_t pos = file->f_pos;
234 struct bnad_debug_info *debug = file->private_data; 233 struct bnad_debug_info *debug = file->private_data;
235 234
236 if (!debug) 235 if (!debug)
237 return -EINVAL; 236 return -EINVAL;
238 237
239 switch (orig) { 238 return fixed_size_llseek(file, offset, orig, debug->buffer_len);
240 case 0:
241 file->f_pos = offset;
242 break;
243 case 1:
244 file->f_pos += offset;
245 break;
246 case 2:
247 file->f_pos = debug->buffer_len + offset;
248 break;
249 default:
250 return -EINVAL;
251 }
252
253 if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
254 file->f_pos = pos;
255 return -EINVAL;
256 }
257
258 return file->f_pos;
259} 239}
260 240
261static ssize_t 241static ssize_t
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 14ca9317c915..c37f706d9992 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
37 37
38extern char bfa_version[]; 38extern char bfa_version[];
39 39
40#define CNA_FW_FILE_CT "ctfw-3.1.0.0.bin" 40#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin"
41#define CNA_FW_FILE_CT2 "ct2fw-3.1.0.0.bin" 41#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin"
42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ 42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
43 43
44#pragma pack(1) 44#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 768285ec10f4..8030cc0396fd 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -23,7 +23,6 @@ if NET_CADENCE
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on GENERIC_HARDIRQS && HAS_DMA 25 depends on GENERIC_HARDIRQS && HAS_DMA
26 select NET_CORE
27 select MACB 26 select MACB
28 ---help--- 27 ---help---
29 If you wish to compile a kernel for the AT91RM9200 and enable 28 If you wish to compile a kernel for the AT91RM9200 and enable
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index cc9a185f0abb..3f1957158a3b 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -435,7 +435,6 @@ static int at91ether_remove(struct platform_device *pdev)
435 unregister_netdev(dev); 435 unregister_netdev(dev);
436 clk_disable(lp->pclk); 436 clk_disable(lp->pclk);
437 free_netdev(dev); 437 free_netdev(dev);
438 platform_set_drvdata(pdev, NULL);
439 438
440 return 0; 439 return 0;
441} 440}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c89aa41dd448..e866608d7d91 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -32,7 +32,8 @@
32 32
33#include "macb.h" 33#include "macb.h"
34 34
35#define RX_BUFFER_SIZE 128 35#define MACB_RX_BUFFER_SIZE 128
36#define RX_BUFFER_MULTIPLE 64 /* bytes */
36#define RX_RING_SIZE 512 /* must be power of 2 */ 37#define RX_RING_SIZE 512 /* must be power of 2 */
37#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 38#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
38 39
@@ -92,7 +93,7 @@ static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
92 93
93static void *macb_rx_buffer(struct macb *bp, unsigned int index) 94static void *macb_rx_buffer(struct macb *bp, unsigned int index)
94{ 95{
95 return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index); 96 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
96} 97}
97 98
98void macb_set_hwaddr(struct macb *bp) 99void macb_set_hwaddr(struct macb *bp)
@@ -528,6 +529,155 @@ static void macb_tx_interrupt(struct macb *bp)
528 netif_wake_queue(bp->dev); 529 netif_wake_queue(bp->dev);
529} 530}
530 531
532static void gem_rx_refill(struct macb *bp)
533{
534 unsigned int entry;
535 struct sk_buff *skb;
536 struct macb_dma_desc *desc;
537 dma_addr_t paddr;
538
539 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
540 u32 addr, ctrl;
541
542 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
543 desc = &bp->rx_ring[entry];
544
545 /* Make hw descriptor updates visible to CPU */
546 rmb();
547
548 addr = desc->addr;
549 ctrl = desc->ctrl;
550 bp->rx_prepared_head++;
551
552 if ((addr & MACB_BIT(RX_USED)))
553 continue;
554
555 if (bp->rx_skbuff[entry] == NULL) {
556 /* allocate sk_buff for this free entry in ring */
557 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
558 if (unlikely(skb == NULL)) {
559 netdev_err(bp->dev,
560 "Unable to allocate sk_buff\n");
561 break;
562 }
563 bp->rx_skbuff[entry] = skb;
564
565 /* now fill corresponding descriptor entry */
566 paddr = dma_map_single(&bp->pdev->dev, skb->data,
567 bp->rx_buffer_size, DMA_FROM_DEVICE);
568
569 if (entry == RX_RING_SIZE - 1)
570 paddr |= MACB_BIT(RX_WRAP);
571 bp->rx_ring[entry].addr = paddr;
572 bp->rx_ring[entry].ctrl = 0;
573
574 /* properly align Ethernet header */
575 skb_reserve(skb, NET_IP_ALIGN);
576 }
577 }
578
579 /* Make descriptor updates visible to hardware */
580 wmb();
581
582 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
583 bp->rx_prepared_head, bp->rx_tail);
584}
585
586/* Mark DMA descriptors from begin up to and not including end as unused */
587static void discard_partial_frame(struct macb *bp, unsigned int begin,
588 unsigned int end)
589{
590 unsigned int frag;
591
592 for (frag = begin; frag != end; frag++) {
593 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
594 desc->addr &= ~MACB_BIT(RX_USED);
595 }
596
597 /* Make descriptor updates visible to hardware */
598 wmb();
599
600 /*
601 * When this happens, the hardware stats registers for
602 * whatever caused this is updated, so we don't have to record
603 * anything.
604 */
605}
606
607static int gem_rx(struct macb *bp, int budget)
608{
609 unsigned int len;
610 unsigned int entry;
611 struct sk_buff *skb;
612 struct macb_dma_desc *desc;
613 int count = 0;
614
615 while (count < budget) {
616 u32 addr, ctrl;
617
618 entry = macb_rx_ring_wrap(bp->rx_tail);
619 desc = &bp->rx_ring[entry];
620
621 /* Make hw descriptor updates visible to CPU */
622 rmb();
623
624 addr = desc->addr;
625 ctrl = desc->ctrl;
626
627 if (!(addr & MACB_BIT(RX_USED)))
628 break;
629
630 desc->addr &= ~MACB_BIT(RX_USED);
631 bp->rx_tail++;
632 count++;
633
634 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
635 netdev_err(bp->dev,
636 "not whole frame pointed by descriptor\n");
637 bp->stats.rx_dropped++;
638 break;
639 }
640 skb = bp->rx_skbuff[entry];
641 if (unlikely(!skb)) {
642 netdev_err(bp->dev,
643 "inconsistent Rx descriptor chain\n");
644 bp->stats.rx_dropped++;
645 break;
646 }
647 /* now everything is ready for receiving packet */
648 bp->rx_skbuff[entry] = NULL;
649 len = MACB_BFEXT(RX_FRMLEN, ctrl);
650
651 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
652
653 skb_put(skb, len);
654 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
655 dma_unmap_single(&bp->pdev->dev, addr,
656 len, DMA_FROM_DEVICE);
657
658 skb->protocol = eth_type_trans(skb, bp->dev);
659 skb_checksum_none_assert(skb);
660
661 bp->stats.rx_packets++;
662 bp->stats.rx_bytes += skb->len;
663
664#if defined(DEBUG) && defined(VERBOSE_DEBUG)
665 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
666 skb->len, skb->csum);
667 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
668 skb->mac_header, 16, true);
669 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
670 skb->data, 32, true);
671#endif
672
673 netif_receive_skb(skb);
674 }
675
676 gem_rx_refill(bp);
677
678 return count;
679}
680
531static int macb_rx_frame(struct macb *bp, unsigned int first_frag, 681static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
532 unsigned int last_frag) 682 unsigned int last_frag)
533{ 683{
@@ -575,7 +725,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
575 skb_put(skb, len); 725 skb_put(skb, len);
576 726
577 for (frag = first_frag; ; frag++) { 727 for (frag = first_frag; ; frag++) {
578 unsigned int frag_len = RX_BUFFER_SIZE; 728 unsigned int frag_len = bp->rx_buffer_size;
579 729
580 if (offset + frag_len > len) { 730 if (offset + frag_len > len) {
581 BUG_ON(frag != last_frag); 731 BUG_ON(frag != last_frag);
@@ -583,7 +733,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
583 } 733 }
584 skb_copy_to_linear_data_offset(skb, offset, 734 skb_copy_to_linear_data_offset(skb, offset,
585 macb_rx_buffer(bp, frag), frag_len); 735 macb_rx_buffer(bp, frag), frag_len);
586 offset += RX_BUFFER_SIZE; 736 offset += bp->rx_buffer_size;
587 desc = macb_rx_desc(bp, frag); 737 desc = macb_rx_desc(bp, frag);
588 desc->addr &= ~MACB_BIT(RX_USED); 738 desc->addr &= ~MACB_BIT(RX_USED);
589 739
@@ -606,27 +756,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
606 return 0; 756 return 0;
607} 757}
608 758
609/* Mark DMA descriptors from begin up to and not including end as unused */
610static void discard_partial_frame(struct macb *bp, unsigned int begin,
611 unsigned int end)
612{
613 unsigned int frag;
614
615 for (frag = begin; frag != end; frag++) {
616 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
617 desc->addr &= ~MACB_BIT(RX_USED);
618 }
619
620 /* Make descriptor updates visible to hardware */
621 wmb();
622
623 /*
624 * When this happens, the hardware stats registers for
625 * whatever caused this is updated, so we don't have to record
626 * anything.
627 */
628}
629
630static int macb_rx(struct macb *bp, int budget) 759static int macb_rx(struct macb *bp, int budget)
631{ 760{
632 int received = 0; 761 int received = 0;
@@ -687,7 +816,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
687 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 816 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
688 (unsigned long)status, budget); 817 (unsigned long)status, budget);
689 818
690 work_done = macb_rx(bp, budget); 819 work_done = bp->macbgem_ops.mog_rx(bp, budget);
691 if (work_done < budget) { 820 if (work_done < budget) {
692 napi_complete(napi); 821 napi_complete(napi);
693 822
@@ -870,12 +999,71 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
870 return NETDEV_TX_OK; 999 return NETDEV_TX_OK;
871} 1000}
872 1001
1002static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1003{
1004 if (!macb_is_gem(bp)) {
1005 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1006 } else {
1007 bp->rx_buffer_size = size;
1008
1009 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1010 netdev_dbg(bp->dev,
1011 "RX buffer must be multiple of %d bytes, expanding\n",
1012 RX_BUFFER_MULTIPLE);
1013 bp->rx_buffer_size =
1014 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1015 }
1016 }
1017
1018 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1019 bp->dev->mtu, bp->rx_buffer_size);
1020}
1021
1022static void gem_free_rx_buffers(struct macb *bp)
1023{
1024 struct sk_buff *skb;
1025 struct macb_dma_desc *desc;
1026 dma_addr_t addr;
1027 int i;
1028
1029 if (!bp->rx_skbuff)
1030 return;
1031
1032 for (i = 0; i < RX_RING_SIZE; i++) {
1033 skb = bp->rx_skbuff[i];
1034
1035 if (skb == NULL)
1036 continue;
1037
1038 desc = &bp->rx_ring[i];
1039 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1040 dma_unmap_single(&bp->pdev->dev, addr, skb->len,
1041 DMA_FROM_DEVICE);
1042 dev_kfree_skb_any(skb);
1043 skb = NULL;
1044 }
1045
1046 kfree(bp->rx_skbuff);
1047 bp->rx_skbuff = NULL;
1048}
1049
1050static void macb_free_rx_buffers(struct macb *bp)
1051{
1052 if (bp->rx_buffers) {
1053 dma_free_coherent(&bp->pdev->dev,
1054 RX_RING_SIZE * bp->rx_buffer_size,
1055 bp->rx_buffers, bp->rx_buffers_dma);
1056 bp->rx_buffers = NULL;
1057 }
1058}
1059
873static void macb_free_consistent(struct macb *bp) 1060static void macb_free_consistent(struct macb *bp)
874{ 1061{
875 if (bp->tx_skb) { 1062 if (bp->tx_skb) {
876 kfree(bp->tx_skb); 1063 kfree(bp->tx_skb);
877 bp->tx_skb = NULL; 1064 bp->tx_skb = NULL;
878 } 1065 }
1066 bp->macbgem_ops.mog_free_rx_buffers(bp);
879 if (bp->rx_ring) { 1067 if (bp->rx_ring) {
880 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, 1068 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
881 bp->rx_ring, bp->rx_ring_dma); 1069 bp->rx_ring, bp->rx_ring_dma);
@@ -886,12 +1074,37 @@ static void macb_free_consistent(struct macb *bp)
886 bp->tx_ring, bp->tx_ring_dma); 1074 bp->tx_ring, bp->tx_ring_dma);
887 bp->tx_ring = NULL; 1075 bp->tx_ring = NULL;
888 } 1076 }
889 if (bp->rx_buffers) { 1077}
890 dma_free_coherent(&bp->pdev->dev, 1078
891 RX_RING_SIZE * RX_BUFFER_SIZE, 1079static int gem_alloc_rx_buffers(struct macb *bp)
892 bp->rx_buffers, bp->rx_buffers_dma); 1080{
893 bp->rx_buffers = NULL; 1081 int size;
894 } 1082
1083 size = RX_RING_SIZE * sizeof(struct sk_buff *);
1084 bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1085 if (!bp->rx_skbuff)
1086 return -ENOMEM;
1087 else
1088 netdev_dbg(bp->dev,
1089 "Allocated %d RX struct sk_buff entries at %p\n",
1090 RX_RING_SIZE, bp->rx_skbuff);
1091 return 0;
1092}
1093
1094static int macb_alloc_rx_buffers(struct macb *bp)
1095{
1096 int size;
1097
1098 size = RX_RING_SIZE * bp->rx_buffer_size;
1099 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1100 &bp->rx_buffers_dma, GFP_KERNEL);
1101 if (!bp->rx_buffers)
1102 return -ENOMEM;
1103 else
1104 netdev_dbg(bp->dev,
1105 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1106 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1107 return 0;
895} 1108}
896 1109
897static int macb_alloc_consistent(struct macb *bp) 1110static int macb_alloc_consistent(struct macb *bp)
@@ -921,14 +1134,8 @@ static int macb_alloc_consistent(struct macb *bp)
921 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", 1134 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
922 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); 1135 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
923 1136
924 size = RX_RING_SIZE * RX_BUFFER_SIZE; 1137 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
925 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
926 &bp->rx_buffers_dma, GFP_KERNEL);
927 if (!bp->rx_buffers)
928 goto out_err; 1138 goto out_err;
929 netdev_dbg(bp->dev,
930 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
931 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
932 1139
933 return 0; 1140 return 0;
934 1141
@@ -937,6 +1144,21 @@ out_err:
937 return -ENOMEM; 1144 return -ENOMEM;
938} 1145}
939 1146
1147static void gem_init_rings(struct macb *bp)
1148{
1149 int i;
1150
1151 for (i = 0; i < TX_RING_SIZE; i++) {
1152 bp->tx_ring[i].addr = 0;
1153 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1154 }
1155 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1156
1157 bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
1158
1159 gem_rx_refill(bp);
1160}
1161
940static void macb_init_rings(struct macb *bp) 1162static void macb_init_rings(struct macb *bp)
941{ 1163{
942 int i; 1164 int i;
@@ -946,7 +1168,7 @@ static void macb_init_rings(struct macb *bp)
946 for (i = 0; i < RX_RING_SIZE; i++) { 1168 for (i = 0; i < RX_RING_SIZE; i++) {
947 bp->rx_ring[i].addr = addr; 1169 bp->rx_ring[i].addr = addr;
948 bp->rx_ring[i].ctrl = 0; 1170 bp->rx_ring[i].ctrl = 0;
949 addr += RX_BUFFER_SIZE; 1171 addr += bp->rx_buffer_size;
950 } 1172 }
951 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); 1173 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
952 1174
@@ -1056,7 +1278,7 @@ static void macb_configure_dma(struct macb *bp)
1056 1278
1057 if (macb_is_gem(bp)) { 1279 if (macb_is_gem(bp)) {
1058 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1280 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1059 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); 1281 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1060 dmacfg |= GEM_BF(FBLDO, 16); 1282 dmacfg |= GEM_BF(FBLDO, 16);
1061 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1283 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1062 dmacfg &= ~GEM_BIT(ENDIA); 1284 dmacfg &= ~GEM_BIT(ENDIA);
@@ -1070,7 +1292,7 @@ static void macb_configure_dma(struct macb *bp)
1070static void macb_configure_caps(struct macb *bp) 1292static void macb_configure_caps(struct macb *bp)
1071{ 1293{
1072 if (macb_is_gem(bp)) { 1294 if (macb_is_gem(bp)) {
1073 if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0) 1295 if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
1074 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 1296 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
1075 } 1297 }
1076} 1298}
@@ -1233,6 +1455,7 @@ EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1233static int macb_open(struct net_device *dev) 1455static int macb_open(struct net_device *dev)
1234{ 1456{
1235 struct macb *bp = netdev_priv(dev); 1457 struct macb *bp = netdev_priv(dev);
1458 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1236 int err; 1459 int err;
1237 1460
1238 netdev_dbg(bp->dev, "open\n"); 1461 netdev_dbg(bp->dev, "open\n");
@@ -1244,6 +1467,9 @@ static int macb_open(struct net_device *dev)
1244 if (!bp->phy_dev) 1467 if (!bp->phy_dev)
1245 return -EAGAIN; 1468 return -EAGAIN;
1246 1469
1470 /* RX buffers initialization */
1471 macb_init_rx_buffer_size(bp, bufsz);
1472
1247 err = macb_alloc_consistent(bp); 1473 err = macb_alloc_consistent(bp);
1248 if (err) { 1474 if (err) {
1249 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 1475 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
@@ -1253,7 +1479,7 @@ static int macb_open(struct net_device *dev)
1253 1479
1254 napi_enable(&bp->napi); 1480 napi_enable(&bp->napi);
1255 1481
1256 macb_init_rings(bp); 1482 bp->macbgem_ops.mog_init_rings(bp);
1257 macb_init_hw(bp); 1483 macb_init_hw(bp);
1258 1484
1259 /* schedule a link state check */ 1485 /* schedule a link state check */
@@ -1572,6 +1798,19 @@ static int __init macb_probe(struct platform_device *pdev)
1572 1798
1573 dev->base_addr = regs->start; 1799 dev->base_addr = regs->start;
1574 1800
1801 /* setup appropriated routines according to adapter type */
1802 if (macb_is_gem(bp)) {
1803 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
1804 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
1805 bp->macbgem_ops.mog_init_rings = gem_init_rings;
1806 bp->macbgem_ops.mog_rx = gem_rx;
1807 } else {
1808 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
1809 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
1810 bp->macbgem_ops.mog_init_rings = macb_init_rings;
1811 bp->macbgem_ops.mog_rx = macb_rx;
1812 }
1813
1575 /* Set MII management clock divider */ 1814 /* Set MII management clock divider */
1576 config = macb_mdc_clk_div(bp); 1815 config = macb_mdc_clk_div(bp);
1577 config |= macb_dbw(bp); 1816 config |= macb_dbw(bp);
@@ -1649,7 +1888,6 @@ err_out_put_pclk:
1649err_out_free_dev: 1888err_out_free_dev:
1650 free_netdev(dev); 1889 free_netdev(dev);
1651err_out: 1890err_out:
1652 platform_set_drvdata(pdev, NULL);
1653 return err; 1891 return err;
1654} 1892}
1655 1893
@@ -1675,7 +1913,6 @@ static int __exit macb_remove(struct platform_device *pdev)
1675 clk_disable_unprepare(bp->pclk); 1913 clk_disable_unprepare(bp->pclk);
1676 clk_put(bp->pclk); 1914 clk_put(bp->pclk);
1677 free_netdev(dev); 1915 free_netdev(dev);
1678 platform_set_drvdata(pdev, NULL);
1679 } 1916 }
1680 1917
1681 return 0; 1918 return 0;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 548c0ecae869..f4076155bed7 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -545,12 +545,24 @@ struct gem_stats {
545 u32 rx_udp_checksum_errors; 545 u32 rx_udp_checksum_errors;
546}; 546};
547 547
548struct macb;
549
550struct macb_or_gem_ops {
551 int (*mog_alloc_rx_buffers)(struct macb *bp);
552 void (*mog_free_rx_buffers)(struct macb *bp);
553 void (*mog_init_rings)(struct macb *bp);
554 int (*mog_rx)(struct macb *bp, int budget);
555};
556
548struct macb { 557struct macb {
549 void __iomem *regs; 558 void __iomem *regs;
550 559
551 unsigned int rx_tail; 560 unsigned int rx_tail;
561 unsigned int rx_prepared_head;
552 struct macb_dma_desc *rx_ring; 562 struct macb_dma_desc *rx_ring;
563 struct sk_buff **rx_skbuff;
553 void *rx_buffers; 564 void *rx_buffers;
565 size_t rx_buffer_size;
554 566
555 unsigned int tx_head, tx_tail; 567 unsigned int tx_head, tx_tail;
556 struct macb_dma_desc *tx_ring; 568 struct macb_dma_desc *tx_ring;
@@ -573,6 +585,8 @@ struct macb {
573 dma_addr_t tx_ring_dma; 585 dma_addr_t tx_ring_dma;
574 dma_addr_t rx_buffers_dma; 586 dma_addr_t rx_buffers_dma;
575 587
588 struct macb_or_gem_ops macbgem_ops;
589
576 struct mii_bus *mii_bus; 590 struct mii_bus *mii_bus;
577 struct phy_device *phy_dev; 591 struct phy_device *phy_dev;
578 unsigned int link; 592 unsigned int link;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 4a1f2fa812ab..7cb148c495c9 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1790,7 +1790,6 @@ err_io:
1790 free_netdev(ndev); 1790 free_netdev(ndev);
1791err_alloc: 1791err_alloc:
1792 release_mem_region(res->start, resource_size(res)); 1792 release_mem_region(res->start, resource_size(res));
1793 platform_set_drvdata(pdev, NULL);
1794 return ret; 1793 return ret;
1795} 1794}
1796 1795
@@ -1813,7 +1812,6 @@ static int xgmac_remove(struct platform_device *pdev)
1813 free_irq(ndev->irq, ndev); 1812 free_irq(ndev->irq, ndev);
1814 free_irq(priv->pmt_irq, ndev); 1813 free_irq(priv->pmt_irq, ndev);
1815 1814
1816 platform_set_drvdata(pdev, NULL);
1817 unregister_netdev(ndev); 1815 unregister_netdev(ndev);
1818 netif_napi_del(&priv->napi); 1816 netif_napi_del(&priv->napi);
1819 1817
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 9624cfe7df57..d7048db9863d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1351,22 +1351,11 @@ static void remove_one(struct pci_dev *pdev)
1351 t1_sw_reset(pdev); 1351 t1_sw_reset(pdev);
1352} 1352}
1353 1353
1354static struct pci_driver driver = { 1354static struct pci_driver cxgb_pci_driver = {
1355 .name = DRV_NAME, 1355 .name = DRV_NAME,
1356 .id_table = t1_pci_tbl, 1356 .id_table = t1_pci_tbl,
1357 .probe = init_one, 1357 .probe = init_one,
1358 .remove = remove_one, 1358 .remove = remove_one,
1359}; 1359};
1360 1360
1361static int __init t1_init_module(void) 1361module_pci_driver(cxgb_pci_driver);
1362{
1363 return pci_register_driver(&driver);
1364}
1365
1366static void __exit t1_cleanup_module(void)
1367{
1368 pci_unregister_driver(&driver);
1369}
1370
1371module_init(t1_init_module);
1372module_exit(t1_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 71497e835f42..b650951791dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
3037 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", 3037 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3038 t3_read_reg(adapter, A_PCIE_PEX_ERR)); 3038 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3039 3039
3040 rtnl_lock();
3040 t3_resume_ports(adapter); 3041 t3_resume_ports(adapter);
3042 rtnl_unlock();
3041} 3043}
3042 3044
3043static const struct pci_error_handlers t3_err_handler = { 3045static const struct pci_error_handlers t3_err_handler = {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 0c96e5fe99cc..4058b856eb71 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1246,6 +1246,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1246 struct tid_range stid_range, tid_range; 1246 struct tid_range stid_range, tid_range;
1247 struct mtutab mtutab; 1247 struct mtutab mtutab;
1248 unsigned int l2t_capacity; 1248 unsigned int l2t_capacity;
1249 struct l2t_data *l2td;
1249 1250
1250 t = kzalloc(sizeof(*t), GFP_KERNEL); 1251 t = kzalloc(sizeof(*t), GFP_KERNEL);
1251 if (!t) 1252 if (!t)
@@ -1261,8 +1262,8 @@ int cxgb3_offload_activate(struct adapter *adapter)
1261 goto out_free; 1262 goto out_free;
1262 1263
1263 err = -ENOMEM; 1264 err = -ENOMEM;
1264 RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); 1265 l2td = t3_init_l2t(l2t_capacity);
1265 if (!L2DATA(dev)) 1266 if (!l2td)
1266 goto out_free; 1267 goto out_free;
1267 1268
1268 natids = min(tid_range.num / 2, MAX_ATIDS); 1269 natids = min(tid_range.num / 2, MAX_ATIDS);
@@ -1279,6 +1280,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1279 INIT_LIST_HEAD(&t->list_node); 1280 INIT_LIST_HEAD(&t->list_node);
1280 t->dev = dev; 1281 t->dev = dev;
1281 1282
1283 RCU_INIT_POINTER(dev->l2opt, l2td);
1282 T3C_DATA(dev) = t; 1284 T3C_DATA(dev) = t;
1283 dev->recv = process_rx; 1285 dev->recv = process_rx;
1284 dev->neigh_update = t3_l2t_update; 1286 dev->neigh_update = t3_l2t_update;
@@ -1294,8 +1296,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1294 return 0; 1296 return 0;
1295 1297
1296out_free_l2t: 1298out_free_l2t:
1297 t3_free_l2t(L2DATA(dev)); 1299 t3_free_l2t(l2td);
1298 RCU_INIT_POINTER(dev->l2opt, NULL);
1299out_free: 1300out_free:
1300 kfree(t); 1301 kfree(t);
1301 return err; 1302 return err;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index f12e6b85a653..687ec4a8bb48 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
455 q->pg_chunk.offset = 0; 455 q->pg_chunk.offset = 0;
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457 0, q->alloc_size, PCI_DMA_FROMDEVICE); 457 0, q->alloc_size, PCI_DMA_FROMDEVICE);
458 if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459 __free_pages(q->pg_chunk.page, order);
460 q->pg_chunk.page = NULL;
461 return -EIO;
462 }
458 q->pg_chunk.mapping = mapping; 463 q->pg_chunk.mapping = mapping;
459 } 464 }
460 sd->pg_chunk = q->pg_chunk; 465 sd->pg_chunk = q->pg_chunk;
@@ -949,40 +954,75 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
949 return flits_to_desc(flits); 954 return flits_to_desc(flits);
950} 955}
951 956
957
958/* map_skb - map a packet main body and its page fragments
959 * @pdev: the PCI device
960 * @skb: the packet
961 * @addr: placeholder to save the mapped addresses
962 *
963 * map the main body of an sk_buff and its page fragments, if any.
964 */
965static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
966 dma_addr_t *addr)
967{
968 const skb_frag_t *fp, *end;
969 const struct skb_shared_info *si;
970
971 *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972 PCI_DMA_TODEVICE);
973 if (pci_dma_mapping_error(pdev, *addr))
974 goto out_err;
975
976 si = skb_shinfo(skb);
977 end = &si->frags[si->nr_frags];
978
979 for (fp = si->frags; fp < end; fp++) {
980 *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
981 DMA_TO_DEVICE);
982 if (pci_dma_mapping_error(pdev, *addr))
983 goto unwind;
984 }
985 return 0;
986
987unwind:
988 while (fp-- > si->frags)
989 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
990 DMA_TO_DEVICE);
991
992 pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
993out_err:
994 return -ENOMEM;
995}
996
952/** 997/**
953 * make_sgl - populate a scatter/gather list for a packet 998 * write_sgl - populate a scatter/gather list for a packet
954 * @skb: the packet 999 * @skb: the packet
955 * @sgp: the SGL to populate 1000 * @sgp: the SGL to populate
956 * @start: start address of skb main body data to include in the SGL 1001 * @start: start address of skb main body data to include in the SGL
957 * @len: length of skb main body data to include in the SGL 1002 * @len: length of skb main body data to include in the SGL
958 * @pdev: the PCI device 1003 * @addr: the list of the mapped addresses
959 * 1004 *
960 * Generates a scatter/gather list for the buffers that make up a packet 1005 * Copies the scatter/gather list for the buffers that make up a packet
961 * and returns the SGL size in 8-byte words. The caller must size the SGL 1006 * and returns the SGL size in 8-byte words. The caller must size the SGL
962 * appropriately. 1007 * appropriately.
963 */ 1008 */
964static inline unsigned int make_sgl(const struct sk_buff *skb, 1009static inline unsigned int write_sgl(const struct sk_buff *skb,
965 struct sg_ent *sgp, unsigned char *start, 1010 struct sg_ent *sgp, unsigned char *start,
966 unsigned int len, struct pci_dev *pdev) 1011 unsigned int len, const dma_addr_t *addr)
967{ 1012{
968 dma_addr_t mapping; 1013 unsigned int i, j = 0, k = 0, nfrags;
969 unsigned int i, j = 0, nfrags;
970 1014
971 if (len) { 1015 if (len) {
972 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
973 sgp->len[0] = cpu_to_be32(len); 1016 sgp->len[0] = cpu_to_be32(len);
974 sgp->addr[0] = cpu_to_be64(mapping); 1017 sgp->addr[j++] = cpu_to_be64(addr[k++]);
975 j = 1;
976 } 1018 }
977 1019
978 nfrags = skb_shinfo(skb)->nr_frags; 1020 nfrags = skb_shinfo(skb)->nr_frags;
979 for (i = 0; i < nfrags; i++) { 1021 for (i = 0; i < nfrags; i++) {
980 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1022 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
981 1023
982 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
983 DMA_TO_DEVICE);
984 sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); 1024 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
985 sgp->addr[j] = cpu_to_be64(mapping); 1025 sgp->addr[j] = cpu_to_be64(addr[k++]);
986 j ^= 1; 1026 j ^= 1;
987 if (j == 0) 1027 if (j == 0)
988 ++sgp; 1028 ++sgp;
@@ -1138,7 +1178,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1138 const struct port_info *pi, 1178 const struct port_info *pi,
1139 unsigned int pidx, unsigned int gen, 1179 unsigned int pidx, unsigned int gen,
1140 struct sge_txq *q, unsigned int ndesc, 1180 struct sge_txq *q, unsigned int ndesc,
1141 unsigned int compl) 1181 unsigned int compl, const dma_addr_t *addr)
1142{ 1182{
1143 unsigned int flits, sgl_flits, cntrl, tso_info; 1183 unsigned int flits, sgl_flits, cntrl, tso_info;
1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1184 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1196,7 +1236,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1196 } 1236 }
1197 1237
1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1238 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); 1239 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1200 1240
1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, 1241 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1202 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), 1242 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1227,6 +1267,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1227 struct netdev_queue *txq; 1267 struct netdev_queue *txq;
1228 struct sge_qset *qs; 1268 struct sge_qset *qs;
1229 struct sge_txq *q; 1269 struct sge_txq *q;
1270 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1230 1271
1231 /* 1272 /*
1232 * The chip min packet length is 9 octets but play safe and reject 1273 * The chip min packet length is 9 octets but play safe and reject
@@ -1255,6 +1296,11 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1255 return NETDEV_TX_BUSY; 1296 return NETDEV_TX_BUSY;
1256 } 1297 }
1257 1298
1299 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1300 dev_kfree_skb(skb);
1301 return NETDEV_TX_OK;
1302 }
1303
1258 q->in_use += ndesc; 1304 q->in_use += ndesc;
1259 if (unlikely(credits - ndesc < q->stop_thres)) { 1305 if (unlikely(credits - ndesc < q->stop_thres)) {
1260 t3_stop_tx_queue(txq, qs, q); 1306 t3_stop_tx_queue(txq, qs, q);
@@ -1312,7 +1358,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1312 if (likely(!skb_shared(skb))) 1358 if (likely(!skb_shared(skb)))
1313 skb_orphan(skb); 1359 skb_orphan(skb);
1314 1360
1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); 1361 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1316 check_ring_tx_db(adap, q); 1362 check_ring_tx_db(adap, q);
1317 return NETDEV_TX_OK; 1363 return NETDEV_TX_OK;
1318} 1364}
@@ -1537,10 +1583,9 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
1537 dui = (struct deferred_unmap_info *)skb->head; 1583 dui = (struct deferred_unmap_info *)skb->head;
1538 p = dui->addr; 1584 p = dui->addr;
1539 1585
1540 if (skb->tail - skb->transport_header) 1586 if (skb_tail_pointer(skb) - skb_transport_header(skb))
1541 pci_unmap_single(dui->pdev, *p++, 1587 pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
1542 skb->tail - skb->transport_header, 1588 skb_transport_header(skb), PCI_DMA_TODEVICE);
1543 PCI_DMA_TODEVICE);
1544 1589
1545 si = skb_shinfo(skb); 1590 si = skb_shinfo(skb);
1546 for (i = 0; i < si->nr_frags; i++) 1591 for (i = 0; i < si->nr_frags; i++)
@@ -1578,7 +1623,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1578 */ 1623 */
1579static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, 1624static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1580 struct sge_txq *q, unsigned int pidx, 1625 struct sge_txq *q, unsigned int pidx,
1581 unsigned int gen, unsigned int ndesc) 1626 unsigned int gen, unsigned int ndesc,
1627 const dma_addr_t *addr)
1582{ 1628{
1583 unsigned int sgl_flits, flits; 1629 unsigned int sgl_flits, flits;
1584 struct work_request_hdr *from; 1630 struct work_request_hdr *from;
@@ -1599,9 +1645,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1599 1645
1600 flits = skb_transport_offset(skb) / 8; 1646 flits = skb_transport_offset(skb) / 8;
1601 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1647 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1602 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), 1648 sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1603 skb->tail - skb->transport_header, 1649 skb_tail_pointer(skb) -
1604 adap->pdev); 1650 skb_transport_header(skb), addr);
1605 if (need_skb_unmap()) { 1651 if (need_skb_unmap()) {
1606 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1652 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1607 skb->destructor = deferred_unmap_destructor; 1653 skb->destructor = deferred_unmap_destructor;
@@ -1627,7 +1673,7 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1627 1673
1628 flits = skb_transport_offset(skb) / 8; /* headers */ 1674 flits = skb_transport_offset(skb) / 8; /* headers */
1629 cnt = skb_shinfo(skb)->nr_frags; 1675 cnt = skb_shinfo(skb)->nr_frags;
1630 if (skb->tail != skb->transport_header) 1676 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1631 cnt++; 1677 cnt++;
1632 return flits_to_desc(flits + sgl_len(cnt)); 1678 return flits_to_desc(flits + sgl_len(cnt));
1633} 1679}
@@ -1659,6 +1705,11 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1659 goto again; 1705 goto again;
1660 } 1706 }
1661 1707
1708 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1709 spin_unlock(&q->lock);
1710 return NET_XMIT_SUCCESS;
1711 }
1712
1662 gen = q->gen; 1713 gen = q->gen;
1663 q->in_use += ndesc; 1714 q->in_use += ndesc;
1664 pidx = q->pidx; 1715 pidx = q->pidx;
@@ -1669,7 +1720,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1669 } 1720 }
1670 spin_unlock(&q->lock); 1721 spin_unlock(&q->lock);
1671 1722
1672 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); 1723 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1673 check_ring_tx_db(adap, q); 1724 check_ring_tx_db(adap, q);
1674 return NET_XMIT_SUCCESS; 1725 return NET_XMIT_SUCCESS;
1675} 1726}
@@ -1687,6 +1738,7 @@ static void restart_offloadq(unsigned long data)
1687 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1738 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1688 const struct port_info *pi = netdev_priv(qs->netdev); 1739 const struct port_info *pi = netdev_priv(qs->netdev);
1689 struct adapter *adap = pi->adapter; 1740 struct adapter *adap = pi->adapter;
1741 unsigned int written = 0;
1690 1742
1691 spin_lock(&q->lock); 1743 spin_lock(&q->lock);
1692again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1744again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1706,10 +1758,14 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1706 break; 1758 break;
1707 } 1759 }
1708 1760
1761 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1762 break;
1763
1709 gen = q->gen; 1764 gen = q->gen;
1710 q->in_use += ndesc; 1765 q->in_use += ndesc;
1711 pidx = q->pidx; 1766 pidx = q->pidx;
1712 q->pidx += ndesc; 1767 q->pidx += ndesc;
1768 written += ndesc;
1713 if (q->pidx >= q->size) { 1769 if (q->pidx >= q->size) {
1714 q->pidx -= q->size; 1770 q->pidx -= q->size;
1715 q->gen ^= 1; 1771 q->gen ^= 1;
@@ -1717,7 +1773,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1717 __skb_unlink(skb, &q->sendq); 1773 __skb_unlink(skb, &q->sendq);
1718 spin_unlock(&q->lock); 1774 spin_unlock(&q->lock);
1719 1775
1720 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); 1776 write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1777 (dma_addr_t *)skb->head);
1721 spin_lock(&q->lock); 1778 spin_lock(&q->lock);
1722 } 1779 }
1723 spin_unlock(&q->lock); 1780 spin_unlock(&q->lock);
@@ -1727,8 +1784,9 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1727 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1784 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1728#endif 1785#endif
1729 wmb(); 1786 wmb();
1730 t3_write_reg(adap, A_SG_KDOORBELL, 1787 if (likely(written))
1731 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1788 t3_write_reg(adap, A_SG_KDOORBELL,
1789 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1732} 1790}
1733 1791
1734/** 1792/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 681804b30a3f..2aafb809e067 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -51,7 +51,7 @@
51#include "t4_hw.h" 51#include "t4_hw.h"
52 52
53#define FW_VERSION_MAJOR 1 53#define FW_VERSION_MAJOR 1
54#define FW_VERSION_MINOR 1 54#define FW_VERSION_MINOR 4
55#define FW_VERSION_MICRO 0 55#define FW_VERSION_MICRO 0
56 56
57#define FW_VERSION_MAJOR_T5 0 57#define FW_VERSION_MAJOR_T5 0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3cd397d60434..5a3256b083f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4842,8 +4842,17 @@ static int adap_init0(struct adapter *adap)
4842 * is excessively mismatched relative to the driver.) 4842 * is excessively mismatched relative to the driver.)
4843 */ 4843 */
4844 ret = t4_check_fw_version(adap); 4844 ret = t4_check_fw_version(adap);
4845
4846 /* The error code -EFAULT is returned by t4_check_fw_version() if
4847 * firmware on adapter < supported firmware. If firmware on adapter
4848 * is too old (not supported by driver) and we're the MASTER_PF set
4849 * adapter state to DEV_STATE_UNINIT to force firmware upgrade
4850 * and reinitialization.
4851 */
4852 if ((adap->flags & MASTER_PF) && ret == -EFAULT)
4853 state = DEV_STATE_UNINIT;
4845 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { 4854 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4846 if (ret == -EINVAL || ret > 0) { 4855 if (ret == -EINVAL || ret == -EFAULT || ret > 0) {
4847 if (upgrade_fw(adap) >= 0) { 4856 if (upgrade_fw(adap) >= 0) {
4848 /* 4857 /*
4849 * Note that the chip was reset as part of the 4858 * Note that the chip was reset as part of the
@@ -4852,7 +4861,21 @@ static int adap_init0(struct adapter *adap)
4852 */ 4861 */
4853 reset = 0; 4862 reset = 0;
4854 ret = t4_check_fw_version(adap); 4863 ret = t4_check_fw_version(adap);
4855 } 4864 } else
4865 if (ret == -EFAULT) {
4866 /*
4867 * Firmware is old but still might
4868 * work if we force reinitialization
4869 * of the adapter. Ignoring FW upgrade
4870 * failure.
4871 */
4872 dev_warn(adap->pdev_dev,
4873 "Ignoring firmware upgrade "
4874 "failure, and forcing driver "
4875 "to reinitialize the "
4876 "adapter.\n");
4877 ret = 0;
4878 }
4856 } 4879 }
4857 if (ret < 0) 4880 if (ret < 0)
4858 return ret; 4881 return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 2bfbb206b35a..ac311f5f3eb9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1294,7 +1294,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1294 1294
1295 flits = skb_transport_offset(skb) / 8U; /* headers */ 1295 flits = skb_transport_offset(skb) / 8U; /* headers */
1296 cnt = skb_shinfo(skb)->nr_frags; 1296 cnt = skb_shinfo(skb)->nr_frags;
1297 if (skb->tail != skb->transport_header) 1297 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1298 cnt++; 1298 cnt++;
1299 return flits + sgl_len(cnt); 1299 return flits + sgl_len(cnt);
1300} 1300}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index d02d4e8c4417..4cbb2f9850be 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -938,6 +938,15 @@ int t4_check_fw_version(struct adapter *adapter)
938 memcpy(adapter->params.api_vers, api_vers, 938 memcpy(adapter->params.api_vers, api_vers,
939 sizeof(adapter->params.api_vers)); 939 sizeof(adapter->params.api_vers));
940 940
941 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
942 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
943 dev_err(adapter->pdev_dev,
944 "Card has firmware version %u.%u.%u, minimum "
945 "supported firmware is %u.%u.%u.\n", major, minor,
946 micro, exp_major, exp_minor, exp_micro);
947 return -EFAULT;
948 }
949
941 if (major != exp_major) { /* major mismatch - fail */ 950 if (major != exp_major) { /* major mismatch - fail */
942 dev_err(adapter->pdev_dev, 951 dev_err(adapter->pdev_dev,
943 "card FW has major version %u, driver wants %u\n", 952 "card FW has major version %u, driver wants %u\n",
@@ -3773,7 +3782,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3773 p->lport = j; 3782 p->lport = j;
3774 p->rss_size = rss_size; 3783 p->rss_size = rss_size;
3775 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 3784 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3776 adap->port[i]->dev_id = j;
3777 3785
3778 ret = ntohl(c.u.info.lstatus_to_modtype); 3786 ret = ntohl(c.u.info.lstatus_to_modtype);
3779 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 3787 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 8388e36cf08f..7403dff8f14a 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -44,7 +44,6 @@ config CS89x0_PLATFORM
44config EP93XX_ETH 44config EP93XX_ETH
45 tristate "EP93xx Ethernet support" 45 tristate "EP93xx Ethernet support"
46 depends on ARM && ARCH_EP93XX 46 depends on ARM && ARCH_EP93XX
47 select NET_CORE
48 select MII 47 select MII
49 help 48 help
50 This is a driver for the ethernet hardware included in EP93xx CPUs. 49 This is a driver for the ethernet hardware included in EP93xx CPUs.
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 67b0388b6e68..e3d4ec836f8b 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -783,7 +783,6 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
783 dev = platform_get_drvdata(pdev); 783 dev = platform_get_drvdata(pdev);
784 if (dev == NULL) 784 if (dev == NULL)
785 return 0; 785 return 0;
786 platform_set_drvdata(pdev, NULL);
787 786
788 ep = netdev_priv(dev); 787 ep = netdev_priv(dev);
789 788
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 635f55992d7e..992ec2ee64d9 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1761,6 +1761,7 @@ static void enic_change_mtu_work(struct work_struct *work)
1761 enic_synchronize_irqs(enic); 1761 enic_synchronize_irqs(enic);
1762 err = vnic_rq_disable(&enic->rq[0]); 1762 err = vnic_rq_disable(&enic->rq[0]);
1763 if (err) { 1763 if (err) {
1764 rtnl_unlock();
1764 netdev_err(netdev, "Unable to disable RQ.\n"); 1765 netdev_err(netdev, "Unable to disable RQ.\n");
1765 return; 1766 return;
1766 } 1767 }
@@ -1773,6 +1774,7 @@ static void enic_change_mtu_work(struct work_struct *work)
1773 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1774 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1774 /* Need at least one buffer on ring to get going */ 1775 /* Need at least one buffer on ring to get going */
1775 if (vnic_rq_desc_used(&enic->rq[0]) == 0) { 1776 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
1777 rtnl_unlock();
1776 netdev_err(netdev, "Unable to alloc receive buffers.\n"); 1778 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1777 return; 1779 return;
1778 } 1780 }
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 9745fe5e8039..316c5e5a92ad 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -6,7 +6,6 @@ config DM9000
6 tristate "DM9000 support" 6 tristate "DM9000 support"
7 depends on ARM || BLACKFIN || MIPS || COLDFIRE 7 depends on ARM || BLACKFIN || MIPS || COLDFIRE
8 select CRC32 8 select CRC32
9 select NET_CORE
10 select MII 9 select MII
11 ---help--- 10 ---help---
12 Support for DM9000 chipset. 11 Support for DM9000 chipset.
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9105465b2a1a..a13b312b50f2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -29,6 +29,8 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/crc32.h> 30#include <linux/crc32.h>
31#include <linux/mii.h> 31#include <linux/mii.h>
32#include <linux/of.h>
33#include <linux/of_net.h>
32#include <linux/ethtool.h> 34#include <linux/ethtool.h>
33#include <linux/dm9000.h> 35#include <linux/dm9000.h>
34#include <linux/delay.h> 36#include <linux/delay.h>
@@ -827,7 +829,7 @@ dm9000_hash_table_unlocked(struct net_device *dev)
827 struct netdev_hw_addr *ha; 829 struct netdev_hw_addr *ha;
828 int i, oft; 830 int i, oft;
829 u32 hash_val; 831 u32 hash_val;
830 u16 hash_table[4]; 832 u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
831 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; 833 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
832 834
833 dm9000_dbg(db, 1, "entering %s\n", __func__); 835 dm9000_dbg(db, 1, "entering %s\n", __func__);
@@ -835,13 +837,6 @@ dm9000_hash_table_unlocked(struct net_device *dev)
835 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) 837 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
836 iow(db, oft, dev->dev_addr[i]); 838 iow(db, oft, dev->dev_addr[i]);
837 839
838 /* Clear Hash Table */
839 for (i = 0; i < 4; i++)
840 hash_table[i] = 0x0;
841
842 /* broadcast address */
843 hash_table[3] = 0x8000;
844
845 if (dev->flags & IFF_PROMISC) 840 if (dev->flags & IFF_PROMISC)
846 rcr |= RCR_PRMSC; 841 rcr |= RCR_PRMSC;
847 842
@@ -1358,6 +1353,31 @@ static const struct net_device_ops dm9000_netdev_ops = {
1358#endif 1353#endif
1359}; 1354};
1360 1355
1356static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1357{
1358 struct dm9000_plat_data *pdata;
1359 struct device_node *np = dev->of_node;
1360 const void *mac_addr;
1361
1362 if (!IS_ENABLED(CONFIG_OF) || !np)
1363 return NULL;
1364
1365 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1366 if (!pdata)
1367 return ERR_PTR(-ENOMEM);
1368
1369 if (of_find_property(np, "davicom,ext-phy", NULL))
1370 pdata->flags |= DM9000_PLATF_EXT_PHY;
1371 if (of_find_property(np, "davicom,no-eeprom", NULL))
1372 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1373
1374 mac_addr = of_get_mac_address(np);
1375 if (mac_addr)
1376 memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
1377
1378 return pdata;
1379}
1380
1361/* 1381/*
1362 * Search DM9000 board, allocate space and register it 1382 * Search DM9000 board, allocate space and register it
1363 */ 1383 */
@@ -1373,6 +1393,12 @@ dm9000_probe(struct platform_device *pdev)
1373 int i; 1393 int i;
1374 u32 id_val; 1394 u32 id_val;
1375 1395
1396 if (!pdata) {
1397 pdata = dm9000_parse_dt(&pdev->dev);
1398 if (IS_ERR(pdata))
1399 return PTR_ERR(pdata);
1400 }
1401
1376 /* Init network device */ 1402 /* Init network device */
1377 ndev = alloc_etherdev(sizeof(struct board_info)); 1403 ndev = alloc_etherdev(sizeof(struct board_info));
1378 if (!ndev) 1404 if (!ndev)
@@ -1673,8 +1699,6 @@ dm9000_drv_remove(struct platform_device *pdev)
1673{ 1699{
1674 struct net_device *ndev = platform_get_drvdata(pdev); 1700 struct net_device *ndev = platform_get_drvdata(pdev);
1675 1701
1676 platform_set_drvdata(pdev, NULL);
1677
1678 unregister_netdev(ndev); 1702 unregister_netdev(ndev);
1679 dm9000_release_board(pdev, netdev_priv(ndev)); 1703 dm9000_release_board(pdev, netdev_priv(ndev));
1680 free_netdev(ndev); /* free device structure */ 1704 free_netdev(ndev); /* free device structure */
@@ -1683,11 +1707,20 @@ dm9000_drv_remove(struct platform_device *pdev)
1683 return 0; 1707 return 0;
1684} 1708}
1685 1709
1710#ifdef CONFIG_OF
1711static const struct of_device_id dm9000_of_matches[] = {
1712 { .compatible = "davicom,dm9000", },
1713 { /* sentinel */ }
1714};
1715MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1716#endif
1717
1686static struct platform_driver dm9000_driver = { 1718static struct platform_driver dm9000_driver = {
1687 .driver = { 1719 .driver = {
1688 .name = "dm9000", 1720 .name = "dm9000",
1689 .owner = THIS_MODULE, 1721 .owner = THIS_MODULE,
1690 .pm = &dm9000_drv_pm_ops, 1722 .pm = &dm9000_drv_pm_ops,
1723 .of_match_table = of_match_ptr(dm9000_of_matches),
1691 }, 1724 },
1692 .probe = dm9000_probe, 1725 .probe = dm9000_probe,
1693 .remove = dm9000_drv_remove, 1726 .remove = dm9000_drv_remove,
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 1df33c799c00..eb9ba6e97d04 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -126,7 +126,6 @@ config WINBOND_840
126 tristate "Winbond W89c840 Ethernet support" 126 tristate "Winbond W89c840 Ethernet support"
127 depends on PCI 127 depends on PCI
128 select CRC32 128 select CRC32
129 select NET_CORE
130 select MII 129 select MII
131 ---help--- 130 ---help---
132 This driver is for the Winbond W89c840 chip. It also works with 131 This driver is for the Winbond W89c840 chip. It also works with
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 1e9443d9fb57..c94152f1c6be 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1410,12 +1410,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1410 return i; 1410 return i;
1411 } 1411 }
1412 1412
1413 /* The chip will fail to enter a low-power state later unless
1414 * first explicitly commanded into D0 */
1415 if (pci_set_power_state(pdev, PCI_D0)) {
1416 pr_notice("Failed to set power state to D0\n");
1417 }
1418
1419 irq = pdev->irq; 1413 irq = pdev->irq;
1420 1414
1421 /* alloc_etherdev ensures aligned and zeroed private structures */ 1415 /* alloc_etherdev ensures aligned and zeroed private structures */
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index cdbcd1643141..9b84cb04fe5f 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -1171,16 +1171,4 @@ investigate_write_descriptor(struct net_device *dev,
1171 } 1171 }
1172} 1172}
1173 1173
1174static int __init xircom_init(void) 1174module_pci_driver(xircom_ops);
1175{
1176 return pci_register_driver(&xircom_ops);
1177}
1178
1179static void __exit xircom_exit(void)
1180{
1181 pci_unregister_driver(&xircom_ops);
1182}
1183
1184module_init(xircom_init)
1185module_exit(xircom_exit)
1186
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index ee26ce78e270..c543ac11ce08 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -36,7 +36,6 @@ config SUNDANCE
36 tristate "Sundance Alta support" 36 tristate "Sundance Alta support"
37 depends on PCI 37 depends on PCI
38 select CRC32 38 select CRC32
39 select NET_CORE
40 select MII 39 select MII
41 ---help--- 40 ---help---
42 This driver is for the Sundance "Alta" chip. 41 This driver is for the Sundance "Alta" chip.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 0a510684e468..c827b1b6b1ce 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -333,6 +333,9 @@ enum vf_state {
333#define BE_VF_UC_PMAC_COUNT 2 333#define BE_VF_UC_PMAC_COUNT 2
334#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 334#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
335 335
336/* Ethtool set_dump flags */
337#define LANCER_INITIATE_FW_DUMP 0x1
338
336struct phy_info { 339struct phy_info {
337 u8 transceiver; 340 u8 transceiver;
338 u8 autoneg; 341 u8 autoneg;
@@ -398,6 +401,7 @@ struct be_adapter {
398 u32 cmd_privileges; 401 u32 cmd_privileges;
399 /* Ethtool knobs and info */ 402 /* Ethtool knobs and info */
400 char fw_ver[FW_VER_LEN]; 403 char fw_ver[FW_VER_LEN];
404 char fw_on_flash[FW_VER_LEN];
401 int if_handle; /* Used to configure filtering */ 405 int if_handle; /* Used to configure filtering */
402 u32 *pmac_id; /* MAC addr handle used by BE card */ 406 u32 *pmac_id; /* MAC addr handle used by BE card */
403 u32 beacon_state; /* for set_phys_id */ 407 u32 beacon_state; /* for set_phys_id */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1db2df61b8af..6e6e0a117ee2 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3255,6 +3255,72 @@ err:
3255 return status; 3255 return status;
3256} 3256}
3257 3257
3258static int lancer_wait_idle(struct be_adapter *adapter)
3259{
3260#define SLIPORT_IDLE_TIMEOUT 30
3261 u32 reg_val;
3262 int status = 0, i;
3263
3264 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3265 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3266 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3267 break;
3268
3269 ssleep(1);
3270 }
3271
3272 if (i == SLIPORT_IDLE_TIMEOUT)
3273 status = -1;
3274
3275 return status;
3276}
3277
3278int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3279{
3280 int status = 0;
3281
3282 status = lancer_wait_idle(adapter);
3283 if (status)
3284 return status;
3285
3286 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3287
3288 return status;
3289}
3290
3291/* Routine to check whether dump image is present or not */
3292bool dump_present(struct be_adapter *adapter)
3293{
3294 u32 sliport_status = 0;
3295
3296 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3297 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3298}
3299
3300int lancer_initiate_dump(struct be_adapter *adapter)
3301{
3302 int status;
3303
3304 /* give firmware reset and diagnostic dump */
3305 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3306 PHYSDEV_CONTROL_DD_MASK);
3307 if (status < 0) {
3308 dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
3309 return status;
3310 }
3311
3312 status = lancer_wait_idle(adapter);
3313 if (status)
3314 return status;
3315
3316 if (!dump_present(adapter)) {
3317 dev_err(&adapter->pdev->dev, "Dump image not present\n");
3318 return -1;
3319 }
3320
3321 return 0;
3322}
3323
3258/* Uses sync mcc */ 3324/* Uses sync mcc */
3259int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) 3325int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3260{ 3326{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 025bdb0d1764..5228d88c5a02 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1937,6 +1937,9 @@ extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
1937 struct be_dma_mem *cmd, 1937 struct be_dma_mem *cmd,
1938 struct be_fat_conf_params *cfgs); 1938 struct be_fat_conf_params *cfgs);
1939extern int lancer_wait_ready(struct be_adapter *adapter); 1939extern int lancer_wait_ready(struct be_adapter *adapter);
1940extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
1941extern int lancer_initiate_dump(struct be_adapter *adapter);
1942extern bool dump_present(struct be_adapter *adapter);
1940extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); 1943extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1941extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); 1944extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1942extern int be_cmd_get_func_config(struct be_adapter *adapter); 1945extern int be_cmd_get_func_config(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 3d4461adb3b4..4f8c941217cc 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -177,19 +177,15 @@ static void be_get_drvinfo(struct net_device *netdev,
177 struct ethtool_drvinfo *drvinfo) 177 struct ethtool_drvinfo *drvinfo)
178{ 178{
179 struct be_adapter *adapter = netdev_priv(netdev); 179 struct be_adapter *adapter = netdev_priv(netdev);
180 char fw_on_flash[FW_VER_LEN];
181
182 memset(fw_on_flash, 0 , sizeof(fw_on_flash));
183 be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
184 180
185 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 181 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
186 strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); 182 strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
187 if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN)) 183 if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
188 strlcpy(drvinfo->fw_version, adapter->fw_ver, 184 strlcpy(drvinfo->fw_version, adapter->fw_ver,
189 sizeof(drvinfo->fw_version)); 185 sizeof(drvinfo->fw_version));
190 else 186 else
191 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 187 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
192 "%s [%s]", adapter->fw_ver, fw_on_flash); 188 "%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
193 189
194 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 190 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
195 sizeof(drvinfo->bus_info)); 191 sizeof(drvinfo->bus_info));
@@ -673,6 +669,34 @@ be_set_phys_id(struct net_device *netdev,
673 return 0; 669 return 0;
674} 670}
675 671
672static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
673{
674 struct be_adapter *adapter = netdev_priv(netdev);
675 struct device *dev = &adapter->pdev->dev;
676 int status;
677
678 if (!lancer_chip(adapter)) {
679 dev_err(dev, "FW dump not supported\n");
680 return -EOPNOTSUPP;
681 }
682
683 if (dump_present(adapter)) {
684 dev_err(dev, "Previous dump not cleared, not forcing dump\n");
685 return 0;
686 }
687
688 switch (dump->flag) {
689 case LANCER_INITIATE_FW_DUMP:
690 status = lancer_initiate_dump(adapter);
691 if (!status)
692 dev_info(dev, "F/w dump initiated successfully\n");
693 break;
694 default:
695 dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
696 return -EINVAL;
697 }
698 return status;
699}
676 700
677static void 701static void
678be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 702be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1110,6 +1134,7 @@ const struct ethtool_ops be_ethtool_ops = {
1110 .set_pauseparam = be_set_pauseparam, 1134 .set_pauseparam = be_set_pauseparam,
1111 .get_strings = be_get_stat_strings, 1135 .get_strings = be_get_stat_strings,
1112 .set_phys_id = be_set_phys_id, 1136 .set_phys_id = be_set_phys_id,
1137 .set_dump = be_set_dump,
1113 .get_msglevel = be_get_msg_level, 1138 .get_msglevel = be_get_msg_level,
1114 .set_msglevel = be_set_msg_level, 1139 .set_msglevel = be_set_msg_level,
1115 .get_sset_count = be_get_sset_count, 1140 .get_sset_count = be_get_sset_count,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 8780183c6d1c..3e2162121601 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -53,10 +53,12 @@
53#define PHYSDEV_CONTROL_OFFSET 0x414 53#define PHYSDEV_CONTROL_OFFSET 0x414
54 54
55#define SLIPORT_STATUS_ERR_MASK 0x80000000 55#define SLIPORT_STATUS_ERR_MASK 0x80000000
56#define SLIPORT_STATUS_DIP_MASK 0x02000000
56#define SLIPORT_STATUS_RN_MASK 0x01000000 57#define SLIPORT_STATUS_RN_MASK 0x01000000
57#define SLIPORT_STATUS_RDY_MASK 0x00800000 58#define SLIPORT_STATUS_RDY_MASK 0x00800000
58#define SLI_PORT_CONTROL_IP_MASK 0x08000000 59#define SLI_PORT_CONTROL_IP_MASK 0x08000000
59#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002 60#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
61#define PHYSDEV_CONTROL_DD_MASK 0x00000004
60#define PHYSDEV_CONTROL_INP_MASK 0x40000000 62#define PHYSDEV_CONTROL_INP_MASK 0x40000000
61 63
62#define SLIPORT_ERROR_NO_RESOURCE1 0x2 64#define SLIPORT_ERROR_NO_RESOURCE1 0x2
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a0b4be51f0d1..2df48bb0f1ca 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -834,32 +834,39 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; 834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835} 835}
836 836
837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) 837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
838{ 839{
839 return BE3_chip(adapter) && 840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
840 be_ipv6_exthdr_check(skb);
841} 841}
842 842
843static netdev_tx_t be_xmit(struct sk_buff *skb, 843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct net_device *netdev) 844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
845{ 846{
846 struct be_adapter *adapter = netdev_priv(netdev);
847 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
848 struct be_queue_info *txq = &txo->q;
849 struct iphdr *ip = NULL;
850 u32 wrb_cnt = 0, copied = 0;
851 u32 start = txq->head, eth_hdr_len;
852 bool dummy_wrb, stopped = false;
853 bool skip_hw_vlan = false;
854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
848 unsigned int eth_hdr_len;
849 struct iphdr *ip;
855 850
856 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? 851 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
857 VLAN_ETH_HLEN : ETH_HLEN; 852 * may cause a transmit stall on that port. So the work-around is to
853 * pad such packets to a 36-byte length.
854 */
855 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
856 if (skb_padto(skb, 36))
857 goto tx_drop;
858 skb->len = 36;
859 }
858 860
859 /* For padded packets, BE HW modifies tot_len field in IP header 861 /* For padded packets, BE HW modifies tot_len field in IP header
860 * incorrecly when VLAN tag is inserted by HW. 862 * incorrecly when VLAN tag is inserted by HW.
863 * For padded packets, Lancer computes incorrect checksum.
861 */ 864 */
862 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) { 865 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
866 VLAN_ETH_HLEN : ETH_HLEN;
867 if (skb->len <= 60 &&
868 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
869 is_ipv4_pkt(skb)) {
863 ip = (struct iphdr *)ip_hdr(skb); 870 ip = (struct iphdr *)ip_hdr(skb);
864 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); 871 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
865 } 872 }
@@ -869,15 +876,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
869 */ 876 */
870 if ((adapter->function_mode & UMC_ENABLED) && 877 if ((adapter->function_mode & UMC_ENABLED) &&
871 veh->h_vlan_proto == htons(ETH_P_8021Q)) 878 veh->h_vlan_proto == htons(ETH_P_8021Q))
872 skip_hw_vlan = true; 879 *skip_hw_vlan = true;
873 880
874 /* HW has a bug wherein it will calculate CSUM for VLAN 881 /* HW has a bug wherein it will calculate CSUM for VLAN
875 * pkts even though it is disabled. 882 * pkts even though it is disabled.
876 * Manually insert VLAN in pkt. 883 * Manually insert VLAN in pkt.
877 */ 884 */
878 if (skb->ip_summed != CHECKSUM_PARTIAL && 885 if (skb->ip_summed != CHECKSUM_PARTIAL &&
879 vlan_tx_tag_present(skb)) { 886 vlan_tx_tag_present(skb)) {
880 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); 887 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
881 if (unlikely(!skb)) 888 if (unlikely(!skb))
882 goto tx_drop; 889 goto tx_drop;
883 } 890 }
@@ -887,8 +894,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
887 * skip HW tagging is not enabled by FW. 894 * skip HW tagging is not enabled by FW.
888 */ 895 */
889 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && 896 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
890 (adapter->pvid || adapter->qnq_vid) && 897 (adapter->pvid || adapter->qnq_vid) &&
891 !qnq_async_evt_rcvd(adapter))) 898 !qnq_async_evt_rcvd(adapter)))
892 goto tx_drop; 899 goto tx_drop;
893 900
894 /* Manual VLAN tag insertion to prevent: 901 /* Manual VLAN tag insertion to prevent:
@@ -899,11 +906,31 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
899 */ 906 */
900 if (be_ipv6_tx_stall_chk(adapter, skb) && 907 if (be_ipv6_tx_stall_chk(adapter, skb) &&
901 be_vlan_tag_tx_chk(adapter, skb)) { 908 be_vlan_tag_tx_chk(adapter, skb)) {
902 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); 909 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
903 if (unlikely(!skb)) 910 if (unlikely(!skb))
904 goto tx_drop; 911 goto tx_drop;
905 } 912 }
906 913
914 return skb;
915tx_drop:
916 dev_kfree_skb_any(skb);
917 return NULL;
918}
919
920static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
921{
922 struct be_adapter *adapter = netdev_priv(netdev);
923 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
924 struct be_queue_info *txq = &txo->q;
925 bool dummy_wrb, stopped = false;
926 u32 wrb_cnt = 0, copied = 0;
927 bool skip_hw_vlan = false;
928 u32 start = txq->head;
929
930 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
931 if (!skb)
932 return NETDEV_TX_OK;
933
907 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); 934 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
908 935
909 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, 936 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
@@ -933,7 +960,6 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
933 txq->head = start; 960 txq->head = start;
934 dev_kfree_skb_any(skb); 961 dev_kfree_skb_any(skb);
935 } 962 }
936tx_drop:
937 return NETDEV_TX_OK; 963 return NETDEV_TX_OK;
938} 964}
939 965
@@ -1236,30 +1262,6 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1236 return status; 1262 return status;
1237} 1263}
1238 1264
1239static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1240{
1241 struct pci_dev *dev, *pdev = adapter->pdev;
1242 int vfs = 0, assigned_vfs = 0, pos;
1243 u16 offset, stride;
1244
1245 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1246 if (!pos)
1247 return 0;
1248 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1249 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1250
1251 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1252 while (dev) {
1253 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1254 vfs++;
1255 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1256 assigned_vfs++;
1257 }
1258 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1259 }
1260 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1261}
1262
1263static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo) 1265static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1264{ 1266{
1265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]); 1267 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
@@ -2771,7 +2773,7 @@ static void be_vf_clear(struct be_adapter *adapter)
2771 struct be_vf_cfg *vf_cfg; 2773 struct be_vf_cfg *vf_cfg;
2772 u32 vf; 2774 u32 vf;
2773 2775
2774 if (be_find_vfs(adapter, ASSIGNED)) { 2776 if (pci_vfs_assigned(adapter->pdev)) {
2775 dev_warn(&adapter->pdev->dev, 2777 dev_warn(&adapter->pdev->dev,
2776 "VFs are assigned to VMs: not disabling VFs\n"); 2778 "VFs are assigned to VMs: not disabling VFs\n");
2777 goto done; 2779 goto done;
@@ -2873,7 +2875,7 @@ static int be_vf_setup(struct be_adapter *adapter)
2873 int status, old_vfs, vf; 2875 int status, old_vfs, vf;
2874 struct device *dev = &adapter->pdev->dev; 2876 struct device *dev = &adapter->pdev->dev;
2875 2877
2876 old_vfs = be_find_vfs(adapter, ENABLED); 2878 old_vfs = pci_num_vf(adapter->pdev);
2877 if (old_vfs) { 2879 if (old_vfs) {
2878 dev_info(dev, "%d VFs are already enabled\n", old_vfs); 2880 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2879 if (old_vfs != num_vfs) 2881 if (old_vfs != num_vfs)
@@ -3184,7 +3186,7 @@ static int be_setup(struct be_adapter *adapter)
3184 if (status) 3186 if (status)
3185 goto err; 3187 goto err;
3186 3188
3187 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); 3189 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3188 3190
3189 if (adapter->vlans_added) 3191 if (adapter->vlans_added)
3190 be_vid_config(adapter); 3192 be_vid_config(adapter);
@@ -3530,40 +3532,6 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
3530 return 0; 3532 return 0;
3531} 3533}
3532 3534
3533static int lancer_wait_idle(struct be_adapter *adapter)
3534{
3535#define SLIPORT_IDLE_TIMEOUT 30
3536 u32 reg_val;
3537 int status = 0, i;
3538
3539 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3540 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3541 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3542 break;
3543
3544 ssleep(1);
3545 }
3546
3547 if (i == SLIPORT_IDLE_TIMEOUT)
3548 status = -1;
3549
3550 return status;
3551}
3552
3553static int lancer_fw_reset(struct be_adapter *adapter)
3554{
3555 int status = 0;
3556
3557 status = lancer_wait_idle(adapter);
3558 if (status)
3559 return status;
3560
3561 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3562 PHYSDEV_CONTROL_OFFSET);
3563
3564 return status;
3565}
3566
3567static int lancer_fw_download(struct be_adapter *adapter, 3535static int lancer_fw_download(struct be_adapter *adapter,
3568 const struct firmware *fw) 3536 const struct firmware *fw)
3569{ 3537{
@@ -3641,7 +3609,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
3641 } 3609 }
3642 3610
3643 if (change_status == LANCER_FW_RESET_NEEDED) { 3611 if (change_status == LANCER_FW_RESET_NEEDED) {
3644 status = lancer_fw_reset(adapter); 3612 status = lancer_physdev_ctrl(adapter,
3613 PHYSDEV_CONTROL_FW_RESET_MASK);
3645 if (status) { 3614 if (status) {
3646 dev_err(&adapter->pdev->dev, 3615 dev_err(&adapter->pdev->dev,
3647 "Adapter busy for FW reset.\n" 3616 "Adapter busy for FW reset.\n"
@@ -3776,6 +3745,10 @@ int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3776 else 3745 else
3777 status = be_fw_download(adapter, fw); 3746 status = be_fw_download(adapter, fw);
3778 3747
3748 if (!status)
3749 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3750 adapter->fw_on_flash);
3751
3779fw_exit: 3752fw_exit:
3780 release_firmware(fw); 3753 release_firmware(fw);
3781 return status; 3754 return status;
@@ -4203,9 +4176,10 @@ reschedule:
4203 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 4176 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4204} 4177}
4205 4178
4179/* If any VFs are already enabled don't FLR the PF */
4206static bool be_reset_required(struct be_adapter *adapter) 4180static bool be_reset_required(struct be_adapter *adapter)
4207{ 4181{
4208 return be_find_vfs(adapter, ENABLED) > 0 ? false : true; 4182 return pci_num_vf(adapter->pdev) ? false : true;
4209} 4183}
4210 4184
4211static char *mc_name(struct be_adapter *adapter) 4185static char *mc_name(struct be_adapter *adapter)
@@ -4390,7 +4364,7 @@ static int be_resume(struct pci_dev *pdev)
4390 if (status) 4364 if (status)
4391 return status; 4365 return status;
4392 4366
4393 pci_set_power_state(pdev, 0); 4367 pci_set_power_state(pdev, PCI_D0);
4394 pci_restore_state(pdev); 4368 pci_restore_state(pdev);
4395 4369
4396 /* tell fw we're ready to fire cmds */ 4370 /* tell fw we're ready to fire cmds */
@@ -4486,7 +4460,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4486 return PCI_ERS_RESULT_DISCONNECT; 4460 return PCI_ERS_RESULT_DISCONNECT;
4487 4461
4488 pci_set_master(pdev); 4462 pci_set_master(pdev);
4489 pci_set_power_state(pdev, 0); 4463 pci_set_power_state(pdev, PCI_D0);
4490 pci_restore_state(pdev); 4464 pci_restore_state(pdev);
4491 4465
4492 /* Check if card is ok and fw is ready */ 4466 /* Check if card is ok and fw is ready */
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 5722bc61fa58..cf579fb39bc5 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1147,8 +1147,6 @@ static int ethoc_remove(struct platform_device *pdev)
1147 struct net_device *netdev = platform_get_drvdata(pdev); 1147 struct net_device *netdev = platform_get_drvdata(pdev);
1148 struct ethoc *priv = netdev_priv(netdev); 1148 struct ethoc *priv = netdev_priv(netdev);
1149 1149
1150 platform_set_drvdata(pdev, NULL);
1151
1152 if (netdev) { 1150 if (netdev) {
1153 netif_napi_del(&priv->napi); 1151 netif_napi_del(&priv->napi);
1154 phy_disconnect(priv->phy); 1152 phy_disconnect(priv->phy);
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index b8974b9e3b47..5918c6891694 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -21,7 +21,6 @@ if NET_VENDOR_FARADAY
21config FTMAC100 21config FTMAC100
22 tristate "Faraday FTMAC100 10/100 Ethernet support" 22 tristate "Faraday FTMAC100 10/100 Ethernet support"
23 depends on ARM 23 depends on ARM
24 select NET_CORE
25 select MII 24 select MII
26 ---help--- 25 ---help---
27 This driver supports the FTMAC100 10/100 Ethernet controller 26 This driver supports the FTMAC100 10/100 Ethernet controller
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 21b85fb7d05f..934e1ae279f0 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1311,7 +1311,6 @@ err_ioremap:
1311 release_resource(priv->res); 1311 release_resource(priv->res);
1312err_req_mem: 1312err_req_mem:
1313 netif_napi_del(&priv->napi); 1313 netif_napi_del(&priv->napi);
1314 platform_set_drvdata(pdev, NULL);
1315 free_netdev(netdev); 1314 free_netdev(netdev);
1316err_alloc_etherdev: 1315err_alloc_etherdev:
1317 return err; 1316 return err;
@@ -1335,7 +1334,6 @@ static int __exit ftgmac100_remove(struct platform_device *pdev)
1335 release_resource(priv->res); 1334 release_resource(priv->res);
1336 1335
1337 netif_napi_del(&priv->napi); 1336 netif_napi_del(&priv->napi);
1338 platform_set_drvdata(pdev, NULL);
1339 free_netdev(netdev); 1337 free_netdev(netdev);
1340 return 0; 1338 return 0;
1341} 1339}
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index a6eda8d83138..4658f4cc1969 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1149,7 +1149,6 @@ err_ioremap:
1149 release_resource(priv->res); 1149 release_resource(priv->res);
1150err_req_mem: 1150err_req_mem:
1151 netif_napi_del(&priv->napi); 1151 netif_napi_del(&priv->napi);
1152 platform_set_drvdata(pdev, NULL);
1153 free_netdev(netdev); 1152 free_netdev(netdev);
1154err_alloc_etherdev: 1153err_alloc_etherdev:
1155 return err; 1154 return err;
@@ -1169,7 +1168,6 @@ static int __exit ftmac100_remove(struct platform_device *pdev)
1169 release_resource(priv->res); 1168 release_resource(priv->res);
1170 1169
1171 netif_napi_del(&priv->napi); 1170 netif_napi_del(&priv->napi);
1172 platform_set_drvdata(pdev, NULL);
1173 free_netdev(netdev); 1171 free_netdev(netdev);
1174 return 0; 1172 return 0;
1175} 1173}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 9ce5b7185fda..2b0a0ea4f8e7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -60,6 +60,61 @@
60#define BM_MIIGSK_CFGR_RMII 0x01 60#define BM_MIIGSK_CFGR_RMII 0x01
61#define BM_MIIGSK_CFGR_FRCONT_10M 0x40 61#define BM_MIIGSK_CFGR_FRCONT_10M 0x40
62 62
63#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
64#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
65#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
66#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */
67#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
68#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
69#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
70#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */
71#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
72#define RMON_T_COL 0x224 /* RMON TX collision count */
73#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
74#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */
75#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
76#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
77#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
78#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */
79#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
80#define RMON_T_OCTETS 0x244 /* RMON TX octets */
81#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
82#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */
83#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
84#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
85#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
86#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */
87#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
88#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
89#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
90#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */
91#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
92#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
93#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
94#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
95#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */
96#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
97#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
98#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
99#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */
100#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
101#define RMON_R_RESVD_O 0x2A4 /* Reserved */
102#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */
103#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */
104#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */
105#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */
106#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */
107#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */
108#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */
109#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */
110#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */
111#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */
112#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */
113#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */
114#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */
115#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */
116#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */
117
63#else 118#else
64 119
65#define FEC_ECNTRL 0x000 /* Ethernet control reg */ 120#define FEC_ECNTRL 0x000 /* Ethernet control reg */
@@ -148,6 +203,9 @@ struct bufdesc_ex {
148#define BD_ENET_RX_CL ((ushort)0x0001) 203#define BD_ENET_RX_CL ((ushort)0x0001)
149#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ 204#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
150 205
206/* Enhanced buffer descriptor control/status used by Ethernet receive */
207#define BD_ENET_RX_VLAN 0x00000004
208
151/* Buffer descriptor control/status used by Ethernet transmit. 209/* Buffer descriptor control/status used by Ethernet transmit.
152*/ 210*/
153#define BD_ENET_TX_READY ((ushort)0x8000) 211#define BD_ENET_TX_READY ((ushort)0x8000)
@@ -272,9 +330,10 @@ struct fec_enet_private {
272 int hwts_tx_en; 330 int hwts_tx_en;
273 struct timer_list time_keep; 331 struct timer_list time_keep;
274 struct fec_enet_delayed_work delay_work; 332 struct fec_enet_delayed_work delay_work;
333 struct regulator *reg_phy;
275}; 334};
276 335
277void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev); 336void fec_ptp_init(struct platform_device *pdev);
278void fec_ptp_start_cyclecounter(struct net_device *ndev); 337void fec_ptp_start_cyclecounter(struct net_device *ndev);
279int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); 338int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
280 339
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d48099f03b7f..d3ad5ea711d3 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -53,13 +53,15 @@
53#include <linux/of_device.h> 53#include <linux/of_device.h>
54#include <linux/of_gpio.h> 54#include <linux/of_gpio.h>
55#include <linux/of_net.h> 55#include <linux/of_net.h>
56#include <linux/pinctrl/consumer.h>
57#include <linux/regulator/consumer.h> 56#include <linux/regulator/consumer.h>
57#include <linux/if_vlan.h>
58 58
59#include <asm/cacheflush.h> 59#include <asm/cacheflush.h>
60 60
61#include "fec.h" 61#include "fec.h"
62 62
63static void set_multicast_list(struct net_device *ndev);
64
63#if defined(CONFIG_ARM) 65#if defined(CONFIG_ARM)
64#define FEC_ALIGNMENT 0xf 66#define FEC_ALIGNMENT 0xf
65#else 67#else
@@ -89,6 +91,8 @@
89#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) 91#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
90/* Controller has hardware checksum support */ 92/* Controller has hardware checksum support */
91#define FEC_QUIRK_HAS_CSUM (1 << 5) 93#define FEC_QUIRK_HAS_CSUM (1 << 5)
94/* Controller has hardware vlan support */
95#define FEC_QUIRK_HAS_VLAN (1 << 6)
92 96
93static struct platform_device_id fec_devtype[] = { 97static struct platform_device_id fec_devtype[] = {
94 { 98 {
@@ -107,7 +111,8 @@ static struct platform_device_id fec_devtype[] = {
107 }, { 111 }, {
108 .name = "imx6q-fec", 112 .name = "imx6q-fec",
109 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 113 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
110 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM, 114 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
115 FEC_QUIRK_HAS_VLAN,
111 }, { 116 }, {
112 .name = "mvf600-fec", 117 .name = "mvf600-fec",
113 .driver_data = FEC_QUIRK_ENET_MAC, 118 .driver_data = FEC_QUIRK_ENET_MAC,
@@ -178,11 +183,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
178#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) 183#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
179#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) 184#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
180 185
181/* The FEC stores dest/src/type, data, and checksum for receive packets. 186/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
182 */ 187 */
183#define PKT_MAXBUF_SIZE 1518 188#define PKT_MAXBUF_SIZE 1522
184#define PKT_MINBUF_SIZE 64 189#define PKT_MINBUF_SIZE 64
185#define PKT_MAXBLR_SIZE 1520 190#define PKT_MAXBLR_SIZE 1536
186 191
187/* FEC receive acceleration */ 192/* FEC receive acceleration */
188#define FEC_RACC_IPDIS (1 << 1) 193#define FEC_RACC_IPDIS (1 << 1)
@@ -243,7 +248,7 @@ static void *swap_buffer(void *bufaddr, int len)
243 int i; 248 int i;
244 unsigned int *buf = bufaddr; 249 unsigned int *buf = bufaddr;
245 250
246 for (i = 0; i < (len + 3) / 4; i++, buf++) 251 for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
247 *buf = cpu_to_be32(*buf); 252 *buf = cpu_to_be32(*buf);
248 253
249 return bufaddr; 254 return bufaddr;
@@ -471,9 +476,8 @@ fec_restart(struct net_device *ndev, int duplex)
471 /* Clear any outstanding interrupt. */ 476 /* Clear any outstanding interrupt. */
472 writel(0xffc00000, fep->hwp + FEC_IEVENT); 477 writel(0xffc00000, fep->hwp + FEC_IEVENT);
473 478
474 /* Reset all multicast. */ 479 /* Setup multicast filter. */
475 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 480 set_multicast_list(ndev);
476 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
477#ifndef CONFIG_M5272 481#ifndef CONFIG_M5272
478 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 482 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
479 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 483 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
@@ -609,6 +613,11 @@ fec_restart(struct net_device *ndev, int duplex)
609 if (fep->bufdesc_ex) 613 if (fep->bufdesc_ex)
610 ecntl |= (1 << 4); 614 ecntl |= (1 << 4);
611 615
616#ifndef CONFIG_M5272
617 /* Enable the MIB statistic event counters */
618 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
619#endif
620
612 /* And last, enable the transmit and receive processing */ 621 /* And last, enable the transmit and receive processing */
613 writel(ecntl, fep->hwp + FEC_ECNTRL); 622 writel(ecntl, fep->hwp + FEC_ECNTRL);
614 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 623 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
@@ -735,6 +744,7 @@ fec_enet_tx(struct net_device *ndev)
735 ndev->stats.tx_carrier_errors++; 744 ndev->stats.tx_carrier_errors++;
736 } else { 745 } else {
737 ndev->stats.tx_packets++; 746 ndev->stats.tx_packets++;
747 ndev->stats.tx_bytes += bdp->cbd_datlen;
738 } 748 }
739 749
740 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 750 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
@@ -800,6 +810,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
800 ushort pkt_len; 810 ushort pkt_len;
801 __u8 *data; 811 __u8 *data;
802 int pkt_received = 0; 812 int pkt_received = 0;
813 struct bufdesc_ex *ebdp = NULL;
814 bool vlan_packet_rcvd = false;
815 u16 vlan_tag;
803 816
804#ifdef CONFIG_M532x 817#ifdef CONFIG_M532x
805 flush_cache_all(); 818 flush_cache_all();
@@ -863,6 +876,24 @@ fec_enet_rx(struct net_device *ndev, int budget)
863 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 876 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
864 swap_buffer(data, pkt_len); 877 swap_buffer(data, pkt_len);
865 878
879 /* Extract the enhanced buffer descriptor */
880 ebdp = NULL;
881 if (fep->bufdesc_ex)
882 ebdp = (struct bufdesc_ex *)bdp;
883
884 /* If this is a VLAN packet remove the VLAN Tag */
885 vlan_packet_rcvd = false;
886 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
887 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
888 /* Push and remove the vlan tag */
889 struct vlan_hdr *vlan_header =
890 (struct vlan_hdr *) (data + ETH_HLEN);
891 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
892 pkt_len -= VLAN_HLEN;
893
894 vlan_packet_rcvd = true;
895 }
896
866 /* This does 16 byte alignment, exactly what we need. 897 /* This does 16 byte alignment, exactly what we need.
867 * The packet length includes FCS, but we don't want to 898 * The packet length includes FCS, but we don't want to
868 * include that when passing upstream as it messes up 899 * include that when passing upstream as it messes up
@@ -873,9 +904,18 @@ fec_enet_rx(struct net_device *ndev, int budget)
873 if (unlikely(!skb)) { 904 if (unlikely(!skb)) {
874 ndev->stats.rx_dropped++; 905 ndev->stats.rx_dropped++;
875 } else { 906 } else {
907 int payload_offset = (2 * ETH_ALEN);
876 skb_reserve(skb, NET_IP_ALIGN); 908 skb_reserve(skb, NET_IP_ALIGN);
877 skb_put(skb, pkt_len - 4); /* Make room */ 909 skb_put(skb, pkt_len - 4); /* Make room */
878 skb_copy_to_linear_data(skb, data, pkt_len - 4); 910
911 /* Extract the frame data without the VLAN header. */
912 skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
913 if (vlan_packet_rcvd)
914 payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
915 skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
916 data + payload_offset,
917 pkt_len - 4 - (2 * ETH_ALEN));
918
879 skb->protocol = eth_type_trans(skb, ndev); 919 skb->protocol = eth_type_trans(skb, ndev);
880 920
881 /* Get receive timestamp from the skb */ 921 /* Get receive timestamp from the skb */
@@ -883,8 +923,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
883 struct skb_shared_hwtstamps *shhwtstamps = 923 struct skb_shared_hwtstamps *shhwtstamps =
884 skb_hwtstamps(skb); 924 skb_hwtstamps(skb);
885 unsigned long flags; 925 unsigned long flags;
886 struct bufdesc_ex *ebdp =
887 (struct bufdesc_ex *)bdp;
888 926
889 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 927 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
890 928
@@ -895,9 +933,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
895 } 933 }
896 934
897 if (fep->bufdesc_ex && 935 if (fep->bufdesc_ex &&
898 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 936 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
899 struct bufdesc_ex *ebdp =
900 (struct bufdesc_ex *)bdp;
901 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { 937 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
902 /* don't check it */ 938 /* don't check it */
903 skb->ip_summed = CHECKSUM_UNNECESSARY; 939 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -906,6 +942,12 @@ fec_enet_rx(struct net_device *ndev, int budget)
906 } 942 }
907 } 943 }
908 944
945 /* Handle received VLAN packets */
946 if (vlan_packet_rcvd)
947 __vlan_hwaccel_put_tag(skb,
948 htons(ETH_P_8021Q),
949 vlan_tag);
950
909 if (!skb_defer_rx_timestamp(skb)) 951 if (!skb_defer_rx_timestamp(skb))
910 napi_gro_receive(&fep->napi, skb); 952 napi_gro_receive(&fep->napi, skb);
911 } 953 }
@@ -1444,8 +1486,117 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1444 return 0; 1486 return 0;
1445} 1487}
1446 1488
1489static const struct fec_stat {
1490 char name[ETH_GSTRING_LEN];
1491 u16 offset;
1492} fec_stats[] = {
1493 /* RMON TX */
1494 { "tx_dropped", RMON_T_DROP },
1495 { "tx_packets", RMON_T_PACKETS },
1496 { "tx_broadcast", RMON_T_BC_PKT },
1497 { "tx_multicast", RMON_T_MC_PKT },
1498 { "tx_crc_errors", RMON_T_CRC_ALIGN },
1499 { "tx_undersize", RMON_T_UNDERSIZE },
1500 { "tx_oversize", RMON_T_OVERSIZE },
1501 { "tx_fragment", RMON_T_FRAG },
1502 { "tx_jabber", RMON_T_JAB },
1503 { "tx_collision", RMON_T_COL },
1504 { "tx_64byte", RMON_T_P64 },
1505 { "tx_65to127byte", RMON_T_P65TO127 },
1506 { "tx_128to255byte", RMON_T_P128TO255 },
1507 { "tx_256to511byte", RMON_T_P256TO511 },
1508 { "tx_512to1023byte", RMON_T_P512TO1023 },
1509 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
1510 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
1511 { "tx_octets", RMON_T_OCTETS },
1512
1513 /* IEEE TX */
1514 { "IEEE_tx_drop", IEEE_T_DROP },
1515 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
1516 { "IEEE_tx_1col", IEEE_T_1COL },
1517 { "IEEE_tx_mcol", IEEE_T_MCOL },
1518 { "IEEE_tx_def", IEEE_T_DEF },
1519 { "IEEE_tx_lcol", IEEE_T_LCOL },
1520 { "IEEE_tx_excol", IEEE_T_EXCOL },
1521 { "IEEE_tx_macerr", IEEE_T_MACERR },
1522 { "IEEE_tx_cserr", IEEE_T_CSERR },
1523 { "IEEE_tx_sqe", IEEE_T_SQE },
1524 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
1525 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
1526
1527 /* RMON RX */
1528 { "rx_packets", RMON_R_PACKETS },
1529 { "rx_broadcast", RMON_R_BC_PKT },
1530 { "rx_multicast", RMON_R_MC_PKT },
1531 { "rx_crc_errors", RMON_R_CRC_ALIGN },
1532 { "rx_undersize", RMON_R_UNDERSIZE },
1533 { "rx_oversize", RMON_R_OVERSIZE },
1534 { "rx_fragment", RMON_R_FRAG },
1535 { "rx_jabber", RMON_R_JAB },
1536 { "rx_64byte", RMON_R_P64 },
1537 { "rx_65to127byte", RMON_R_P65TO127 },
1538 { "rx_128to255byte", RMON_R_P128TO255 },
1539 { "rx_256to511byte", RMON_R_P256TO511 },
1540 { "rx_512to1023byte", RMON_R_P512TO1023 },
1541 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
1542 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
1543 { "rx_octets", RMON_R_OCTETS },
1544
1545 /* IEEE RX */
1546 { "IEEE_rx_drop", IEEE_R_DROP },
1547 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
1548 { "IEEE_rx_crc", IEEE_R_CRC },
1549 { "IEEE_rx_align", IEEE_R_ALIGN },
1550 { "IEEE_rx_macerr", IEEE_R_MACERR },
1551 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
1552 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
1553};
1554
1555static void fec_enet_get_ethtool_stats(struct net_device *dev,
1556 struct ethtool_stats *stats, u64 *data)
1557{
1558 struct fec_enet_private *fep = netdev_priv(dev);
1559 int i;
1560
1561 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1562 data[i] = readl(fep->hwp + fec_stats[i].offset);
1563}
1564
1565static void fec_enet_get_strings(struct net_device *netdev,
1566 u32 stringset, u8 *data)
1567{
1568 int i;
1569 switch (stringset) {
1570 case ETH_SS_STATS:
1571 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1572 memcpy(data + i * ETH_GSTRING_LEN,
1573 fec_stats[i].name, ETH_GSTRING_LEN);
1574 break;
1575 }
1576}
1577
1578static int fec_enet_get_sset_count(struct net_device *dev, int sset)
1579{
1580 switch (sset) {
1581 case ETH_SS_STATS:
1582 return ARRAY_SIZE(fec_stats);
1583 default:
1584 return -EOPNOTSUPP;
1585 }
1586}
1447#endif /* !defined(CONFIG_M5272) */ 1587#endif /* !defined(CONFIG_M5272) */
1448 1588
1589static int fec_enet_nway_reset(struct net_device *dev)
1590{
1591 struct fec_enet_private *fep = netdev_priv(dev);
1592 struct phy_device *phydev = fep->phy_dev;
1593
1594 if (!phydev)
1595 return -ENODEV;
1596
1597 return genphy_restart_aneg(phydev);
1598}
1599
1449static const struct ethtool_ops fec_enet_ethtool_ops = { 1600static const struct ethtool_ops fec_enet_ethtool_ops = {
1450#if !defined(CONFIG_M5272) 1601#if !defined(CONFIG_M5272)
1451 .get_pauseparam = fec_enet_get_pauseparam, 1602 .get_pauseparam = fec_enet_get_pauseparam,
@@ -1456,6 +1607,12 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
1456 .get_drvinfo = fec_enet_get_drvinfo, 1607 .get_drvinfo = fec_enet_get_drvinfo,
1457 .get_link = ethtool_op_get_link, 1608 .get_link = ethtool_op_get_link,
1458 .get_ts_info = fec_enet_get_ts_info, 1609 .get_ts_info = fec_enet_get_ts_info,
1610 .nway_reset = fec_enet_nway_reset,
1611#ifndef CONFIG_M5272
1612 .get_ethtool_stats = fec_enet_get_ethtool_stats,
1613 .get_strings = fec_enet_get_strings,
1614 .get_sset_count = fec_enet_get_sset_count,
1615#endif
1459}; 1616};
1460 1617
1461static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1618static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -1803,6 +1960,12 @@ static int fec_enet_init(struct net_device *ndev)
1803 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1960 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1804 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1961 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
1805 1962
1963 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
1964 /* enable hw VLAN support */
1965 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1966 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1967 }
1968
1806 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { 1969 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
1807 /* enable hw accelerator */ 1970 /* enable hw accelerator */
1808 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 1971 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
@@ -1865,8 +2028,6 @@ fec_probe(struct platform_device *pdev)
1865 struct resource *r; 2028 struct resource *r;
1866 const struct of_device_id *of_id; 2029 const struct of_device_id *of_id;
1867 static int dev_id; 2030 static int dev_id;
1868 struct pinctrl *pinctrl;
1869 struct regulator *reg_phy;
1870 2031
1871 of_id = of_match_device(fec_dt_ids, &pdev->dev); 2032 of_id = of_match_device(fec_dt_ids, &pdev->dev);
1872 if (of_id) 2033 if (of_id)
@@ -1893,17 +2054,17 @@ fec_probe(struct platform_device *pdev)
1893 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 2054 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
1894#endif 2055#endif
1895 2056
1896 fep->hwp = devm_request_and_ioremap(&pdev->dev, r); 2057 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2058 if (IS_ERR(fep->hwp)) {
2059 ret = PTR_ERR(fep->hwp);
2060 goto failed_ioremap;
2061 }
2062
1897 fep->pdev = pdev; 2063 fep->pdev = pdev;
1898 fep->dev_id = dev_id++; 2064 fep->dev_id = dev_id++;
1899 2065
1900 fep->bufdesc_ex = 0; 2066 fep->bufdesc_ex = 0;
1901 2067
1902 if (!fep->hwp) {
1903 ret = -ENOMEM;
1904 goto failed_ioremap;
1905 }
1906
1907 platform_set_drvdata(pdev, ndev); 2068 platform_set_drvdata(pdev, ndev);
1908 2069
1909 ret = of_get_phy_mode(pdev->dev.of_node); 2070 ret = of_get_phy_mode(pdev->dev.of_node);
@@ -1917,12 +2078,6 @@ fec_probe(struct platform_device *pdev)
1917 fep->phy_interface = ret; 2078 fep->phy_interface = ret;
1918 } 2079 }
1919 2080
1920 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1921 if (IS_ERR(pinctrl)) {
1922 ret = PTR_ERR(pinctrl);
1923 goto failed_pin;
1924 }
1925
1926 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 2081 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1927 if (IS_ERR(fep->clk_ipg)) { 2082 if (IS_ERR(fep->clk_ipg)) {
1928 ret = PTR_ERR(fep->clk_ipg); 2083 ret = PTR_ERR(fep->clk_ipg);
@@ -1953,20 +2108,22 @@ fec_probe(struct platform_device *pdev)
1953 clk_prepare_enable(fep->clk_enet_out); 2108 clk_prepare_enable(fep->clk_enet_out);
1954 clk_prepare_enable(fep->clk_ptp); 2109 clk_prepare_enable(fep->clk_ptp);
1955 2110
1956 reg_phy = devm_regulator_get(&pdev->dev, "phy"); 2111 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
1957 if (!IS_ERR(reg_phy)) { 2112 if (!IS_ERR(fep->reg_phy)) {
1958 ret = regulator_enable(reg_phy); 2113 ret = regulator_enable(fep->reg_phy);
1959 if (ret) { 2114 if (ret) {
1960 dev_err(&pdev->dev, 2115 dev_err(&pdev->dev,
1961 "Failed to enable phy regulator: %d\n", ret); 2116 "Failed to enable phy regulator: %d\n", ret);
1962 goto failed_regulator; 2117 goto failed_regulator;
1963 } 2118 }
2119 } else {
2120 fep->reg_phy = NULL;
1964 } 2121 }
1965 2122
1966 fec_reset_phy(pdev); 2123 fec_reset_phy(pdev);
1967 2124
1968 if (fep->bufdesc_ex) 2125 if (fep->bufdesc_ex)
1969 fec_ptp_init(ndev, pdev); 2126 fec_ptp_init(pdev);
1970 2127
1971 ret = fec_enet_init(ndev); 2128 ret = fec_enet_init(ndev);
1972 if (ret) 2129 if (ret)
@@ -2010,19 +2167,20 @@ fec_probe(struct platform_device *pdev)
2010failed_register: 2167failed_register:
2011 fec_enet_mii_remove(fep); 2168 fec_enet_mii_remove(fep);
2012failed_mii_init: 2169failed_mii_init:
2013failed_init: 2170failed_irq:
2014 for (i = 0; i < FEC_IRQ_NUM; i++) { 2171 for (i = 0; i < FEC_IRQ_NUM; i++) {
2015 irq = platform_get_irq(pdev, i); 2172 irq = platform_get_irq(pdev, i);
2016 if (irq > 0) 2173 if (irq > 0)
2017 free_irq(irq, ndev); 2174 free_irq(irq, ndev);
2018 } 2175 }
2019failed_irq: 2176failed_init:
2177 if (fep->reg_phy)
2178 regulator_disable(fep->reg_phy);
2020failed_regulator: 2179failed_regulator:
2021 clk_disable_unprepare(fep->clk_ahb); 2180 clk_disable_unprepare(fep->clk_ahb);
2022 clk_disable_unprepare(fep->clk_ipg); 2181 clk_disable_unprepare(fep->clk_ipg);
2023 clk_disable_unprepare(fep->clk_enet_out); 2182 clk_disable_unprepare(fep->clk_enet_out);
2024 clk_disable_unprepare(fep->clk_ptp); 2183 clk_disable_unprepare(fep->clk_ptp);
2025failed_pin:
2026failed_clk: 2184failed_clk:
2027failed_ioremap: 2185failed_ioremap:
2028 free_netdev(ndev); 2186 free_netdev(ndev);
@@ -2041,21 +2199,21 @@ fec_drv_remove(struct platform_device *pdev)
2041 unregister_netdev(ndev); 2199 unregister_netdev(ndev);
2042 fec_enet_mii_remove(fep); 2200 fec_enet_mii_remove(fep);
2043 del_timer_sync(&fep->time_keep); 2201 del_timer_sync(&fep->time_keep);
2202 for (i = 0; i < FEC_IRQ_NUM; i++) {
2203 int irq = platform_get_irq(pdev, i);
2204 if (irq > 0)
2205 free_irq(irq, ndev);
2206 }
2207 if (fep->reg_phy)
2208 regulator_disable(fep->reg_phy);
2044 clk_disable_unprepare(fep->clk_ptp); 2209 clk_disable_unprepare(fep->clk_ptp);
2045 if (fep->ptp_clock) 2210 if (fep->ptp_clock)
2046 ptp_clock_unregister(fep->ptp_clock); 2211 ptp_clock_unregister(fep->ptp_clock);
2047 clk_disable_unprepare(fep->clk_enet_out); 2212 clk_disable_unprepare(fep->clk_enet_out);
2048 clk_disable_unprepare(fep->clk_ahb); 2213 clk_disable_unprepare(fep->clk_ahb);
2049 clk_disable_unprepare(fep->clk_ipg); 2214 clk_disable_unprepare(fep->clk_ipg);
2050 for (i = 0; i < FEC_IRQ_NUM; i++) {
2051 int irq = platform_get_irq(pdev, i);
2052 if (irq > 0)
2053 free_irq(irq, ndev);
2054 }
2055 free_netdev(ndev); 2215 free_netdev(ndev);
2056 2216
2057 platform_set_drvdata(pdev, NULL);
2058
2059 return 0; 2217 return 0;
2060} 2218}
2061 2219
@@ -2074,6 +2232,9 @@ fec_suspend(struct device *dev)
2074 clk_disable_unprepare(fep->clk_ahb); 2232 clk_disable_unprepare(fep->clk_ahb);
2075 clk_disable_unprepare(fep->clk_ipg); 2233 clk_disable_unprepare(fep->clk_ipg);
2076 2234
2235 if (fep->reg_phy)
2236 regulator_disable(fep->reg_phy);
2237
2077 return 0; 2238 return 0;
2078} 2239}
2079 2240
@@ -2082,6 +2243,13 @@ fec_resume(struct device *dev)
2082{ 2243{
2083 struct net_device *ndev = dev_get_drvdata(dev); 2244 struct net_device *ndev = dev_get_drvdata(dev);
2084 struct fec_enet_private *fep = netdev_priv(ndev); 2245 struct fec_enet_private *fep = netdev_priv(ndev);
2246 int ret;
2247
2248 if (fep->reg_phy) {
2249 ret = regulator_enable(fep->reg_phy);
2250 if (ret)
2251 return ret;
2252 }
2085 2253
2086 clk_prepare_enable(fep->clk_enet_out); 2254 clk_prepare_enable(fep->clk_enet_out);
2087 clk_prepare_enable(fep->clk_ahb); 2255 clk_prepare_enable(fep->clk_ahb);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 9bc15e2365bb..9947765e90c5 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -981,7 +981,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
981 goto err_node; 981 goto err_node;
982 982
983 /* We're done ! */ 983 /* We're done ! */
984 dev_set_drvdata(&op->dev, ndev); 984 platform_set_drvdata(op, ndev);
985 netdev_info(ndev, "%s MAC %pM\n", 985 netdev_info(ndev, "%s MAC %pM\n",
986 op->dev.of_node->full_name, ndev->dev_addr); 986 op->dev.of_node->full_name, ndev->dev_addr);
987 987
@@ -1010,7 +1010,7 @@ mpc52xx_fec_remove(struct platform_device *op)
1010 struct net_device *ndev; 1010 struct net_device *ndev;
1011 struct mpc52xx_fec_priv *priv; 1011 struct mpc52xx_fec_priv *priv;
1012 1012
1013 ndev = dev_get_drvdata(&op->dev); 1013 ndev = platform_get_drvdata(op);
1014 priv = netdev_priv(ndev); 1014 priv = netdev_priv(ndev);
1015 1015
1016 unregister_netdev(ndev); 1016 unregister_netdev(ndev);
@@ -1030,14 +1030,13 @@ mpc52xx_fec_remove(struct platform_device *op)
1030 1030
1031 free_netdev(ndev); 1031 free_netdev(ndev);
1032 1032
1033 dev_set_drvdata(&op->dev, NULL);
1034 return 0; 1033 return 0;
1035} 1034}
1036 1035
1037#ifdef CONFIG_PM 1036#ifdef CONFIG_PM
1038static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state) 1037static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
1039{ 1038{
1040 struct net_device *dev = dev_get_drvdata(&op->dev); 1039 struct net_device *dev = platform_get_drvdata(op);
1041 1040
1042 if (netif_running(dev)) 1041 if (netif_running(dev))
1043 mpc52xx_fec_close(dev); 1042 mpc52xx_fec_close(dev);
@@ -1047,7 +1046,7 @@ static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state
1047 1046
1048static int mpc52xx_fec_of_resume(struct platform_device *op) 1047static int mpc52xx_fec_of_resume(struct platform_device *op)
1049{ 1048{
1050 struct net_device *dev = dev_get_drvdata(&op->dev); 1049 struct net_device *dev = platform_get_drvdata(op);
1051 1050
1052 mpc52xx_fec_hw_init(dev); 1051 mpc52xx_fec_hw_init(dev);
1053 mpc52xx_fec_reset_stats(dev); 1052 mpc52xx_fec_reset_stats(dev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 25fc960cbf0e..5007e4f9fff9 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -347,8 +347,9 @@ static void fec_time_keep(unsigned long _data)
347 * cyclecounter init routine and exits. 347 * cyclecounter init routine and exits.
348 */ 348 */
349 349
350void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) 350void fec_ptp_init(struct platform_device *pdev)
351{ 351{
352 struct net_device *ndev = platform_get_drvdata(pdev);
352 struct fec_enet_private *fep = netdev_priv(ndev); 353 struct fec_enet_private *fep = netdev_priv(ndev);
353 354
354 fep->ptp_caps.owner = THIS_MODULE; 355 fep->ptp_caps.owner = THIS_MODULE;
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
index 268414d9f2cb..be92229f2c2a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/Kconfig
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -1,7 +1,6 @@
1config FS_ENET 1config FS_ENET
2 tristate "Freescale Ethernet Driver" 2 tristate "Freescale Ethernet Driver"
3 depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) 3 depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
4 select NET_CORE
5 select MII 4 select MII
6 select PHYLIB 5 select PHYLIB
7 6
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index edc120094c34..8de53a14a6f4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1048,7 +1048,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
1048 } 1048 }
1049 1049
1050 SET_NETDEV_DEV(ndev, &ofdev->dev); 1050 SET_NETDEV_DEV(ndev, &ofdev->dev);
1051 dev_set_drvdata(&ofdev->dev, ndev); 1051 platform_set_drvdata(ofdev, ndev);
1052 1052
1053 fep = netdev_priv(ndev); 1053 fep = netdev_priv(ndev);
1054 fep->dev = &ofdev->dev; 1054 fep->dev = &ofdev->dev;
@@ -1106,7 +1106,6 @@ out_cleanup_data:
1106 fep->ops->cleanup_data(ndev); 1106 fep->ops->cleanup_data(ndev);
1107out_free_dev: 1107out_free_dev:
1108 free_netdev(ndev); 1108 free_netdev(ndev);
1109 dev_set_drvdata(&ofdev->dev, NULL);
1110out_put: 1109out_put:
1111 of_node_put(fpi->phy_node); 1110 of_node_put(fpi->phy_node);
1112out_free_fpi: 1111out_free_fpi:
@@ -1116,7 +1115,7 @@ out_free_fpi:
1116 1115
1117static int fs_enet_remove(struct platform_device *ofdev) 1116static int fs_enet_remove(struct platform_device *ofdev)
1118{ 1117{
1119 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 1118 struct net_device *ndev = platform_get_drvdata(ofdev);
1120 struct fs_enet_private *fep = netdev_priv(ndev); 1119 struct fs_enet_private *fep = netdev_priv(ndev);
1121 1120
1122 unregister_netdev(ndev); 1121 unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 2bafbd37c247..844ecfa84d17 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -179,7 +179,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
179 } 179 }
180 180
181 new_bus->parent = &ofdev->dev; 181 new_bus->parent = &ofdev->dev;
182 dev_set_drvdata(&ofdev->dev, new_bus); 182 platform_set_drvdata(ofdev, new_bus);
183 183
184 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); 184 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
185 if (ret) 185 if (ret)
@@ -188,7 +188,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
188 return 0; 188 return 0;
189 189
190out_free_irqs: 190out_free_irqs:
191 dev_set_drvdata(&ofdev->dev, NULL);
192 kfree(new_bus->irq); 191 kfree(new_bus->irq);
193out_unmap_regs: 192out_unmap_regs:
194 iounmap(bitbang->dir); 193 iounmap(bitbang->dir);
@@ -202,11 +201,10 @@ out:
202 201
203static int fs_enet_mdio_remove(struct platform_device *ofdev) 202static int fs_enet_mdio_remove(struct platform_device *ofdev)
204{ 203{
205 struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); 204 struct mii_bus *bus = platform_get_drvdata(ofdev);
206 struct bb_info *bitbang = bus->priv; 205 struct bb_info *bitbang = bus->priv;
207 206
208 mdiobus_unregister(bus); 207 mdiobus_unregister(bus);
209 dev_set_drvdata(&ofdev->dev, NULL);
210 kfree(bus->irq); 208 kfree(bus->irq);
211 free_mdio_bitbang(bus); 209 free_mdio_bitbang(bus);
212 iounmap(bitbang->dir); 210 iounmap(bitbang->dir);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 18e8ef203736..2f1c46a12f05 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -180,7 +180,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
180 } 180 }
181 181
182 new_bus->parent = &ofdev->dev; 182 new_bus->parent = &ofdev->dev;
183 dev_set_drvdata(&ofdev->dev, new_bus); 183 platform_set_drvdata(ofdev, new_bus);
184 184
185 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); 185 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
186 if (ret) 186 if (ret)
@@ -189,7 +189,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
189 return 0; 189 return 0;
190 190
191out_free_irqs: 191out_free_irqs:
192 dev_set_drvdata(&ofdev->dev, NULL);
193 kfree(new_bus->irq); 192 kfree(new_bus->irq);
194out_unmap_regs: 193out_unmap_regs:
195 iounmap(fec->fecp); 194 iounmap(fec->fecp);
@@ -204,11 +203,10 @@ out:
204 203
205static int fs_enet_mdio_remove(struct platform_device *ofdev) 204static int fs_enet_mdio_remove(struct platform_device *ofdev)
206{ 205{
207 struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); 206 struct mii_bus *bus = platform_get_drvdata(ofdev);
208 struct fec_info *fec = bus->priv; 207 struct fec_info *fec = bus->priv;
209 208
210 mdiobus_unregister(bus); 209 mdiobus_unregister(bus);
211 dev_set_drvdata(&ofdev->dev, NULL);
212 kfree(bus->irq); 210 kfree(bus->irq);
213 iounmap(fec->fecp); 211 iounmap(fec->fecp);
214 kfree(fec); 212 kfree(fec);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2375a01715a0..8d2db7b808b7 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -128,6 +128,7 @@ static void gfar_set_multi(struct net_device *dev);
128static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 128static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
129static void gfar_configure_serdes(struct net_device *dev); 129static void gfar_configure_serdes(struct net_device *dev);
130static int gfar_poll(struct napi_struct *napi, int budget); 130static int gfar_poll(struct napi_struct *napi, int budget);
131static int gfar_poll_sq(struct napi_struct *napi, int budget);
131#ifdef CONFIG_NET_POLL_CONTROLLER 132#ifdef CONFIG_NET_POLL_CONTROLLER
132static void gfar_netpoll(struct net_device *dev); 133static void gfar_netpoll(struct net_device *dev);
133#endif 134#endif
@@ -1000,7 +1001,7 @@ static int gfar_probe(struct platform_device *ofdev)
1000 spin_lock_init(&priv->bflock); 1001 spin_lock_init(&priv->bflock);
1001 INIT_WORK(&priv->reset_task, gfar_reset_task); 1002 INIT_WORK(&priv->reset_task, gfar_reset_task);
1002 1003
1003 dev_set_drvdata(&ofdev->dev, priv); 1004 platform_set_drvdata(ofdev, priv);
1004 regs = priv->gfargrp[0].regs; 1005 regs = priv->gfargrp[0].regs;
1005 1006
1006 gfar_detect_errata(priv); 1007 gfar_detect_errata(priv);
@@ -1038,9 +1039,13 @@ static int gfar_probe(struct platform_device *ofdev)
1038 dev->ethtool_ops = &gfar_ethtool_ops; 1039 dev->ethtool_ops = &gfar_ethtool_ops;
1039 1040
1040 /* Register for napi ...We are registering NAPI for each grp */ 1041 /* Register for napi ...We are registering NAPI for each grp */
1041 for (i = 0; i < priv->num_grps; i++) 1042 if (priv->mode == SQ_SG_MODE)
1042 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, 1043 netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
1043 GFAR_DEV_WEIGHT); 1044 GFAR_DEV_WEIGHT);
1045 else
1046 for (i = 0; i < priv->num_grps; i++)
1047 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1048 GFAR_DEV_WEIGHT);
1044 1049
1045 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1050 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1046 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1051 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -1240,15 +1245,13 @@ register_fail:
1240 1245
1241static int gfar_remove(struct platform_device *ofdev) 1246static int gfar_remove(struct platform_device *ofdev)
1242{ 1247{
1243 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1248 struct gfar_private *priv = platform_get_drvdata(ofdev);
1244 1249
1245 if (priv->phy_node) 1250 if (priv->phy_node)
1246 of_node_put(priv->phy_node); 1251 of_node_put(priv->phy_node);
1247 if (priv->tbi_node) 1252 if (priv->tbi_node)
1248 of_node_put(priv->tbi_node); 1253 of_node_put(priv->tbi_node);
1249 1254
1250 dev_set_drvdata(&ofdev->dev, NULL);
1251
1252 unregister_netdev(priv->ndev); 1255 unregister_netdev(priv->ndev);
1253 unmap_group_regs(priv); 1256 unmap_group_regs(priv);
1254 free_gfar_dev(priv); 1257 free_gfar_dev(priv);
@@ -2825,6 +2828,48 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2825 return howmany; 2828 return howmany;
2826} 2829}
2827 2830
2831static int gfar_poll_sq(struct napi_struct *napi, int budget)
2832{
2833 struct gfar_priv_grp *gfargrp =
2834 container_of(napi, struct gfar_priv_grp, napi);
2835 struct gfar __iomem *regs = gfargrp->regs;
2836 struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
2837 struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
2838 int work_done = 0;
2839
2840 /* Clear IEVENT, so interrupts aren't called again
2841 * because of the packets that have already arrived
2842 */
2843 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2844
2845 /* run Tx cleanup to completion */
2846 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2847 gfar_clean_tx_ring(tx_queue);
2848
2849 work_done = gfar_clean_rx_ring(rx_queue, budget);
2850
2851 if (work_done < budget) {
2852 napi_complete(napi);
2853 /* Clear the halt bit in RSTAT */
2854 gfar_write(&regs->rstat, gfargrp->rstat);
2855
2856 gfar_write(&regs->imask, IMASK_DEFAULT);
2857
2858 /* If we are coalescing interrupts, update the timer
2859 * Otherwise, clear it
2860 */
2861 gfar_write(&regs->txic, 0);
2862 if (likely(tx_queue->txcoalescing))
2863 gfar_write(&regs->txic, tx_queue->txic);
2864
2865 gfar_write(&regs->rxic, 0);
2866 if (unlikely(rx_queue->rxcoalescing))
2867 gfar_write(&regs->rxic, rx_queue->rxic);
2868 }
2869
2870 return work_done;
2871}
2872
2828static int gfar_poll(struct napi_struct *napi, int budget) 2873static int gfar_poll(struct napi_struct *napi, int budget)
2829{ 2874{
2830 struct gfar_priv_grp *gfargrp = 2875 struct gfar_priv_grp *gfargrp =
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 083ea2b4d20a..098f133908ae 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -519,7 +519,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
519 } 519 }
520 gfar_phc_index = ptp_clock_index(etsects->clock); 520 gfar_phc_index = ptp_clock_index(etsects->clock);
521 521
522 dev_set_drvdata(&dev->dev, etsects); 522 platform_set_drvdata(dev, etsects);
523 523
524 return 0; 524 return 0;
525 525
@@ -537,7 +537,7 @@ no_memory:
537 537
538static int gianfar_ptp_remove(struct platform_device *dev) 538static int gianfar_ptp_remove(struct platform_device *dev)
539{ 539{
540 struct etsects *etsects = dev_get_drvdata(&dev->dev); 540 struct etsects *etsects = platform_get_drvdata(dev);
541 541
542 gfar_write(&etsects->regs->tmr_temask, 0); 542 gfar_write(&etsects->regs->tmr_temask, 0);
543 gfar_write(&etsects->regs->tmr_ctrl, 0); 543 gfar_write(&etsects->regs->tmr_ctrl, 0);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index e04c59818f60..3c43dac894ec 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3564,7 +3564,7 @@ static void ucc_geth_timeout(struct net_device *dev)
3564 3564
3565static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) 3565static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
3566{ 3566{
3567 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 3567 struct net_device *ndev = platform_get_drvdata(ofdev);
3568 struct ucc_geth_private *ugeth = netdev_priv(ndev); 3568 struct ucc_geth_private *ugeth = netdev_priv(ndev);
3569 3569
3570 if (!netif_running(ndev)) 3570 if (!netif_running(ndev))
@@ -3592,7 +3592,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
3592 3592
3593static int ucc_geth_resume(struct platform_device *ofdev) 3593static int ucc_geth_resume(struct platform_device *ofdev)
3594{ 3594{
3595 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 3595 struct net_device *ndev = platform_get_drvdata(ofdev);
3596 struct ucc_geth_private *ugeth = netdev_priv(ndev); 3596 struct ucc_geth_private *ugeth = netdev_priv(ndev);
3597 int err; 3597 int err;
3598 3598
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 418068b941b1..c1b6e7e31aac 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -227,7 +227,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
227 goto err_registration; 227 goto err_registration;
228 } 228 }
229 229
230 dev_set_drvdata(&pdev->dev, bus); 230 platform_set_drvdata(pdev, bus);
231 231
232 return 0; 232 return 0;
233 233
@@ -242,7 +242,7 @@ err_ioremap:
242 242
243static int xgmac_mdio_remove(struct platform_device *pdev) 243static int xgmac_mdio_remove(struct platform_device *pdev)
244{ 244{
245 struct mii_bus *bus = dev_get_drvdata(&pdev->dev); 245 struct mii_bus *bus = platform_get_drvdata(pdev);
246 246
247 mdiobus_unregister(bus); 247 mdiobus_unregister(bus);
248 iounmap(bus->priv); 248 iounmap(bus->priv);
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index 6529d31595a7..563a1ac71dbc 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -5,8 +5,7 @@
5config NET_VENDOR_IBM 5config NET_VENDOR_IBM
6 bool "IBM devices" 6 bool "IBM devices"
7 default y 7 default y
8 depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \ 8 depends on PPC_PSERIES || PPC_DCR || (IBMEBUS && SPARSEMEM)
9 (IBMEBUS && SPARSEMEM)
10 ---help--- 9 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
12 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 90ea0b1673ca..35853b43d66e 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -98,8 +98,7 @@ static struct ehea_fw_handle_array ehea_fw_handles;
98static struct ehea_bcmc_reg_array ehea_bcmc_regs; 98static struct ehea_bcmc_reg_array ehea_bcmc_regs;
99 99
100 100
101static int ehea_probe_adapter(struct platform_device *dev, 101static int ehea_probe_adapter(struct platform_device *dev);
102 const struct of_device_id *id);
103 102
104static int ehea_remove(struct platform_device *dev); 103static int ehea_remove(struct platform_device *dev);
105 104
@@ -112,7 +111,7 @@ static struct of_device_id ehea_device_table[] = {
112}; 111};
113MODULE_DEVICE_TABLE(of, ehea_device_table); 112MODULE_DEVICE_TABLE(of, ehea_device_table);
114 113
115static struct of_platform_driver ehea_driver = { 114static struct platform_driver ehea_driver = {
116 .driver = { 115 .driver = {
117 .name = "ehea", 116 .name = "ehea",
118 .owner = THIS_MODULE, 117 .owner = THIS_MODULE,
@@ -3251,8 +3250,7 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
3251 device_remove_file(&dev->dev, &dev_attr_remove_port); 3250 device_remove_file(&dev->dev, &dev_attr_remove_port);
3252} 3251}
3253 3252
3254static int ehea_probe_adapter(struct platform_device *dev, 3253static int ehea_probe_adapter(struct platform_device *dev)
3255 const struct of_device_id *id)
3256{ 3254{
3257 struct ehea_adapter *adapter; 3255 struct ehea_adapter *adapter;
3258 const u64 *adapter_handle; 3256 const u64 *adapter_handle;
@@ -3289,7 +3287,7 @@ static int ehea_probe_adapter(struct platform_device *dev,
3289 3287
3290 adapter->pd = EHEA_PD_ID; 3288 adapter->pd = EHEA_PD_ID;
3291 3289
3292 dev_set_drvdata(&dev->dev, adapter); 3290 platform_set_drvdata(dev, adapter);
3293 3291
3294 3292
3295 /* initialize adapter and ports */ 3293 /* initialize adapter and ports */
@@ -3360,7 +3358,7 @@ out:
3360 3358
3361static int ehea_remove(struct platform_device *dev) 3359static int ehea_remove(struct platform_device *dev)
3362{ 3360{
3363 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev); 3361 struct ehea_adapter *adapter = platform_get_drvdata(dev);
3364 int i; 3362 int i;
3365 3363
3366 for (i = 0; i < EHEA_MAX_PORTS; i++) 3364 for (i = 0; i < EHEA_MAX_PORTS; i++)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 610ed223d1db..856ea66c9223 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -696,7 +696,7 @@ static int mal_probe(struct platform_device *ofdev)
696 696
697 /* Advertise this instance to the rest of the world */ 697 /* Advertise this instance to the rest of the world */
698 wmb(); 698 wmb();
699 dev_set_drvdata(&ofdev->dev, mal); 699 platform_set_drvdata(ofdev, mal);
700 700
701 mal_dbg_register(mal); 701 mal_dbg_register(mal);
702 702
@@ -722,7 +722,7 @@ static int mal_probe(struct platform_device *ofdev)
722 722
723static int mal_remove(struct platform_device *ofdev) 723static int mal_remove(struct platform_device *ofdev)
724{ 724{
725 struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); 725 struct mal_instance *mal = platform_get_drvdata(ofdev);
726 726
727 MAL_DBG(mal, "remove" NL); 727 MAL_DBG(mal, "remove" NL);
728 728
@@ -735,8 +735,6 @@ static int mal_remove(struct platform_device *ofdev)
735 "mal%d: commac list is not empty on remove!\n", 735 "mal%d: commac list is not empty on remove!\n",
736 mal->index); 736 mal->index);
737 737
738 dev_set_drvdata(&ofdev->dev, NULL);
739
740 free_irq(mal->serr_irq, mal); 738 free_irq(mal->serr_irq, mal);
741 free_irq(mal->txde_irq, mal); 739 free_irq(mal->txde_irq, mal);
742 free_irq(mal->txeob_irq, mal); 740 free_irq(mal->txeob_irq, mal);
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 39251765b55d..c47e23d6eeaa 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -95,7 +95,7 @@ static inline u32 rgmii_mode_mask(int mode, int input)
95 95
96int rgmii_attach(struct platform_device *ofdev, int input, int mode) 96int rgmii_attach(struct platform_device *ofdev, int input, int mode)
97{ 97{
98 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 98 struct rgmii_instance *dev = platform_get_drvdata(ofdev);
99 struct rgmii_regs __iomem *p = dev->base; 99 struct rgmii_regs __iomem *p = dev->base;
100 100
101 RGMII_DBG(dev, "attach(%d)" NL, input); 101 RGMII_DBG(dev, "attach(%d)" NL, input);
@@ -124,7 +124,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
124 124
125void rgmii_set_speed(struct platform_device *ofdev, int input, int speed) 125void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
126{ 126{
127 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 127 struct rgmii_instance *dev = platform_get_drvdata(ofdev);
128 struct rgmii_regs __iomem *p = dev->base; 128 struct rgmii_regs __iomem *p = dev->base;
129 u32 ssr; 129 u32 ssr;
130 130
@@ -146,7 +146,7 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
146 146
147void rgmii_get_mdio(struct platform_device *ofdev, int input) 147void rgmii_get_mdio(struct platform_device *ofdev, int input)
148{ 148{
149 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 149 struct rgmii_instance *dev = platform_get_drvdata(ofdev);
150 struct rgmii_regs __iomem *p = dev->base; 150 struct rgmii_regs __iomem *p = dev->base;
151 u32 fer; 151 u32 fer;
152 152
@@ -167,7 +167,7 @@ void rgmii_get_mdio(struct platform_device *ofdev, int input)
167 167
168void rgmii_put_mdio(struct platform_device *ofdev, int input) 168void rgmii_put_mdio(struct platform_device *ofdev, int input)
169{ 169{
170 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 170 struct rgmii_instance *dev = platform_get_drvdata(ofdev);
171 struct rgmii_regs __iomem *p = dev->base; 171 struct rgmii_regs __iomem *p = dev->base;
172 u32 fer; 172 u32 fer;
173 173
@@ -188,7 +188,7 @@ void rgmii_put_mdio(struct platform_device *ofdev, int input)
188 188
189void rgmii_detach(struct platform_device *ofdev, int input) 189void rgmii_detach(struct platform_device *ofdev, int input)
190{ 190{
191 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 191 struct rgmii_instance *dev = platform_get_drvdata(ofdev);
192 struct rgmii_regs __iomem *p; 192 struct rgmii_regs __iomem *p;
193 193
194 BUG_ON(!dev || dev->users == 0); 194 BUG_ON(!dev || dev->users == 0);
@@ -214,7 +214,7 @@ int rgmii_get_regs_len(struct platform_device *ofdev)
214 214
215void *rgmii_dump_regs(struct platform_device *ofdev, void *buf) 215void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
216{ 216{
217 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 217 struct rgmii_instance *dev = platform_get_drvdata(ofdev);
218 struct emac_ethtool_regs_subhdr *hdr = buf; 218 struct emac_ethtool_regs_subhdr *hdr = buf;
219 struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1); 219 struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1);
220 220
@@ -279,7 +279,7 @@ static int rgmii_probe(struct platform_device *ofdev)
279 (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); 279 (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
280 280
281 wmb(); 281 wmb();
282 dev_set_drvdata(&ofdev->dev, dev); 282 platform_set_drvdata(ofdev, dev);
283 283
284 return 0; 284 return 0;
285 285
@@ -291,9 +291,7 @@ static int rgmii_probe(struct platform_device *ofdev)
291 291
292static int rgmii_remove(struct platform_device *ofdev) 292static int rgmii_remove(struct platform_device *ofdev)
293{ 293{
294 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 294 struct rgmii_instance *dev = platform_get_drvdata(ofdev);
295
296 dev_set_drvdata(&ofdev->dev, NULL);
297 295
298 WARN_ON(dev->users != 0); 296 WARN_ON(dev->users != 0);
299 297
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index 795f1393e2b6..c231a4a32c4d 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -25,7 +25,7 @@
25 25
26int tah_attach(struct platform_device *ofdev, int channel) 26int tah_attach(struct platform_device *ofdev, int channel)
27{ 27{
28 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); 28 struct tah_instance *dev = platform_get_drvdata(ofdev);
29 29
30 mutex_lock(&dev->lock); 30 mutex_lock(&dev->lock);
31 /* Reset has been done at probe() time... nothing else to do for now */ 31 /* Reset has been done at probe() time... nothing else to do for now */
@@ -37,7 +37,7 @@ int tah_attach(struct platform_device *ofdev, int channel)
37 37
38void tah_detach(struct platform_device *ofdev, int channel) 38void tah_detach(struct platform_device *ofdev, int channel)
39{ 39{
40 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); 40 struct tah_instance *dev = platform_get_drvdata(ofdev);
41 41
42 mutex_lock(&dev->lock); 42 mutex_lock(&dev->lock);
43 --dev->users; 43 --dev->users;
@@ -46,7 +46,7 @@ void tah_detach(struct platform_device *ofdev, int channel)
46 46
47void tah_reset(struct platform_device *ofdev) 47void tah_reset(struct platform_device *ofdev)
48{ 48{
49 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); 49 struct tah_instance *dev = platform_get_drvdata(ofdev);
50 struct tah_regs __iomem *p = dev->base; 50 struct tah_regs __iomem *p = dev->base;
51 int n; 51 int n;
52 52
@@ -74,7 +74,7 @@ int tah_get_regs_len(struct platform_device *ofdev)
74 74
75void *tah_dump_regs(struct platform_device *ofdev, void *buf) 75void *tah_dump_regs(struct platform_device *ofdev, void *buf)
76{ 76{
77 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); 77 struct tah_instance *dev = platform_get_drvdata(ofdev);
78 struct emac_ethtool_regs_subhdr *hdr = buf; 78 struct emac_ethtool_regs_subhdr *hdr = buf;
79 struct tah_regs *regs = (struct tah_regs *)(hdr + 1); 79 struct tah_regs *regs = (struct tah_regs *)(hdr + 1);
80 80
@@ -118,7 +118,7 @@ static int tah_probe(struct platform_device *ofdev)
118 goto err_free; 118 goto err_free;
119 } 119 }
120 120
121 dev_set_drvdata(&ofdev->dev, dev); 121 platform_set_drvdata(ofdev, dev);
122 122
123 /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ 123 /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */
124 tah_reset(ofdev); 124 tah_reset(ofdev);
@@ -137,9 +137,7 @@ static int tah_probe(struct platform_device *ofdev)
137 137
138static int tah_remove(struct platform_device *ofdev) 138static int tah_remove(struct platform_device *ofdev)
139{ 139{
140 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); 140 struct tah_instance *dev = platform_get_drvdata(ofdev);
141
142 dev_set_drvdata(&ofdev->dev, NULL);
143 141
144 WARN_ON(dev->users != 0); 142 WARN_ON(dev->users != 0);
145 143
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index f91202f42125..4cdf286f7ee3 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -84,7 +84,7 @@ static inline u32 zmii_mode_mask(int mode, int input)
84 84
85int zmii_attach(struct platform_device *ofdev, int input, int *mode) 85int zmii_attach(struct platform_device *ofdev, int input, int *mode)
86{ 86{
87 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 87 struct zmii_instance *dev = platform_get_drvdata(ofdev);
88 struct zmii_regs __iomem *p = dev->base; 88 struct zmii_regs __iomem *p = dev->base;
89 89
90 ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode); 90 ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode);
@@ -150,7 +150,7 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
150 150
151void zmii_get_mdio(struct platform_device *ofdev, int input) 151void zmii_get_mdio(struct platform_device *ofdev, int input)
152{ 152{
153 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 153 struct zmii_instance *dev = platform_get_drvdata(ofdev);
154 u32 fer; 154 u32 fer;
155 155
156 ZMII_DBG2(dev, "get_mdio(%d)" NL, input); 156 ZMII_DBG2(dev, "get_mdio(%d)" NL, input);
@@ -163,7 +163,7 @@ void zmii_get_mdio(struct platform_device *ofdev, int input)
163 163
164void zmii_put_mdio(struct platform_device *ofdev, int input) 164void zmii_put_mdio(struct platform_device *ofdev, int input)
165{ 165{
166 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 166 struct zmii_instance *dev = platform_get_drvdata(ofdev);
167 167
168 ZMII_DBG2(dev, "put_mdio(%d)" NL, input); 168 ZMII_DBG2(dev, "put_mdio(%d)" NL, input);
169 mutex_unlock(&dev->lock); 169 mutex_unlock(&dev->lock);
@@ -172,7 +172,7 @@ void zmii_put_mdio(struct platform_device *ofdev, int input)
172 172
173void zmii_set_speed(struct platform_device *ofdev, int input, int speed) 173void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
174{ 174{
175 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 175 struct zmii_instance *dev = platform_get_drvdata(ofdev);
176 u32 ssr; 176 u32 ssr;
177 177
178 mutex_lock(&dev->lock); 178 mutex_lock(&dev->lock);
@@ -193,7 +193,7 @@ void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
193 193
194void zmii_detach(struct platform_device *ofdev, int input) 194void zmii_detach(struct platform_device *ofdev, int input)
195{ 195{
196 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 196 struct zmii_instance *dev = platform_get_drvdata(ofdev);
197 197
198 BUG_ON(!dev || dev->users == 0); 198 BUG_ON(!dev || dev->users == 0);
199 199
@@ -218,7 +218,7 @@ int zmii_get_regs_len(struct platform_device *ofdev)
218 218
219void *zmii_dump_regs(struct platform_device *ofdev, void *buf) 219void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
220{ 220{
221 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 221 struct zmii_instance *dev = platform_get_drvdata(ofdev);
222 struct emac_ethtool_regs_subhdr *hdr = buf; 222 struct emac_ethtool_regs_subhdr *hdr = buf;
223 struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1); 223 struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1);
224 224
@@ -272,7 +272,7 @@ static int zmii_probe(struct platform_device *ofdev)
272 printk(KERN_INFO 272 printk(KERN_INFO
273 "ZMII %s initialized\n", ofdev->dev.of_node->full_name); 273 "ZMII %s initialized\n", ofdev->dev.of_node->full_name);
274 wmb(); 274 wmb();
275 dev_set_drvdata(&ofdev->dev, dev); 275 platform_set_drvdata(ofdev, dev);
276 276
277 return 0; 277 return 0;
278 278
@@ -284,9 +284,7 @@ static int zmii_probe(struct platform_device *ofdev)
284 284
285static int zmii_remove(struct platform_device *ofdev) 285static int zmii_remove(struct platform_device *ofdev)
286{ 286{
287 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 287 struct zmii_instance *dev = platform_get_drvdata(ofdev);
288
289 dev_set_drvdata(&ofdev->dev, NULL);
290 288
291 WARN_ON(dev->users != 0); 289 WARN_ON(dev->users != 0);
292 290
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
index 5119ef18953b..14a66e9d2e26 100644
--- a/drivers/net/ethernet/icplus/Kconfig
+++ b/drivers/net/ethernet/icplus/Kconfig
@@ -5,7 +5,6 @@
5config IP1000 5config IP1000
6 tristate "IP1000 Gigabit Ethernet support" 6 tristate "IP1000 Gigabit Ethernet support"
7 depends on PCI 7 depends on PCI
8 select NET_CORE
9 select MII 8 select MII
10 ---help--- 9 ---help---
11 This driver supports IP1000 gigabit Ethernet cards. 10 This driver supports IP1000 gigabit Ethernet cards.
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 068d78151658..1fde90b96685 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2298,15 +2298,4 @@ static struct pci_driver ipg_pci_driver = {
2298 .remove = ipg_remove, 2298 .remove = ipg_remove,
2299}; 2299};
2300 2300
2301static int __init ipg_init_module(void) 2301module_pci_driver(ipg_pci_driver);
2302{
2303 return pci_register_driver(&ipg_pci_driver);
2304}
2305
2306static void __exit ipg_exit_module(void)
2307{
2308 pci_unregister_driver(&ipg_pci_driver);
2309}
2310
2311module_init(ipg_init_module);
2312module_exit(ipg_exit_module);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 05f7264c51f7..f0e7ed20a750 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -20,7 +20,6 @@ if NET_VENDOR_INTEL
20config E100 20config E100
21 tristate "Intel(R) PRO/100+ support" 21 tristate "Intel(R) PRO/100+ support"
22 depends on PCI 22 depends on PCI
23 select NET_CORE
24 select MII 23 select MII
25 ---help--- 24 ---help---
26 This driver supports Intel(R) PRO/100 family of adapters. 25 This driver supports Intel(R) PRO/100 family of adapters.
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index d2bea3f07c73..5115ae76a5d1 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3069,7 +3069,7 @@ static int e100_resume(struct pci_dev *pdev)
3069 pci_set_power_state(pdev, PCI_D0); 3069 pci_set_power_state(pdev, PCI_D0);
3070 pci_restore_state(pdev); 3070 pci_restore_state(pdev);
3071 /* ack any pending wake events, disable PME */ 3071 /* ack any pending wake events, disable PME */
3072 pci_enable_wake(pdev, 0, 0); 3072 pci_enable_wake(pdev, PCI_D0, 0);
3073 3073
3074 /* disable reverse auto-negotiation */ 3074 /* disable reverse auto-negotiation */
3075 if (nic->phy == phy_82552_v) { 3075 if (nic->phy == phy_82552_v) {
@@ -3160,7 +3160,7 @@ static void e100_io_resume(struct pci_dev *pdev)
3160 struct nic *nic = netdev_priv(netdev); 3160 struct nic *nic = netdev_priv(netdev);
3161 3161
3162 /* ack any pending wake events, disable PME */ 3162 /* ack any pending wake events, disable PME */
3163 pci_enable_wake(pdev, 0, 0); 3163 pci_enable_wake(pdev, PCI_D0, 0);
3164 3164
3165 netif_device_attach(netdev); 3165 netif_device_attach(netdev);
3166 if (netif_running(netdev)) { 3166 if (netif_running(netdev)) {
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index b71c8502a2b3..895450e9bb3c 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -66,17 +66,17 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
66 s32 ret_val; 66 s32 ret_val;
67 67
68 if (hw->phy.media_type != e1000_media_type_copper) { 68 if (hw->phy.media_type != e1000_media_type_copper) {
69 phy->type = e1000_phy_none; 69 phy->type = e1000_phy_none;
70 return 0; 70 return 0;
71 } else { 71 } else {
72 phy->ops.power_up = e1000_power_up_phy_copper; 72 phy->ops.power_up = e1000_power_up_phy_copper;
73 phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; 73 phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
74 } 74 }
75 75
76 phy->addr = 1; 76 phy->addr = 1;
77 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 77 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
78 phy->reset_delay_us = 100; 78 phy->reset_delay_us = 100;
79 phy->type = e1000_phy_gg82563; 79 phy->type = e1000_phy_gg82563;
80 80
81 /* This can only be done after all function pointers are setup. */ 81 /* This can only be done after all function pointers are setup. */
82 ret_val = e1000e_get_phy_id(hw); 82 ret_val = e1000e_get_phy_id(hw);
@@ -98,19 +98,19 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
98 u32 eecd = er32(EECD); 98 u32 eecd = er32(EECD);
99 u16 size; 99 u16 size;
100 100
101 nvm->opcode_bits = 8; 101 nvm->opcode_bits = 8;
102 nvm->delay_usec = 1; 102 nvm->delay_usec = 1;
103 switch (nvm->override) { 103 switch (nvm->override) {
104 case e1000_nvm_override_spi_large: 104 case e1000_nvm_override_spi_large:
105 nvm->page_size = 32; 105 nvm->page_size = 32;
106 nvm->address_bits = 16; 106 nvm->address_bits = 16;
107 break; 107 break;
108 case e1000_nvm_override_spi_small: 108 case e1000_nvm_override_spi_small:
109 nvm->page_size = 8; 109 nvm->page_size = 8;
110 nvm->address_bits = 8; 110 nvm->address_bits = 8;
111 break; 111 break;
112 default: 112 default:
113 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 113 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
114 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; 114 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
115 break; 115 break;
116 } 116 }
@@ -128,7 +128,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
128 /* EEPROM access above 16k is unsupported */ 128 /* EEPROM access above 16k is unsupported */
129 if (size > 14) 129 if (size > 14)
130 size = 14; 130 size = 14;
131 nvm->word_size = 1 << size; 131 nvm->word_size = 1 << size;
132 132
133 return 0; 133 return 0;
134} 134}
@@ -859,7 +859,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
859 859
860 /* Transmit Arbitration Control 0 */ 860 /* Transmit Arbitration Control 0 */
861 reg = er32(TARC(0)); 861 reg = er32(TARC(0));
862 reg &= ~(0xF << 27); /* 30:27 */ 862 reg &= ~(0xF << 27); /* 30:27 */
863 if (hw->phy.media_type != e1000_media_type_copper) 863 if (hw->phy.media_type != e1000_media_type_copper)
864 reg &= ~(1 << 20); 864 reg &= ~(1 << 20);
865 ew32(TARC(0), reg); 865 ew32(TARC(0), reg);
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 7380442a3829..4c303e2a7cb3 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -77,24 +77,24 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
77 return 0; 77 return 0;
78 } 78 }
79 79
80 phy->addr = 1; 80 phy->addr = 1;
81 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 81 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
82 phy->reset_delay_us = 100; 82 phy->reset_delay_us = 100;
83 83
84 phy->ops.power_up = e1000_power_up_phy_copper; 84 phy->ops.power_up = e1000_power_up_phy_copper;
85 phy->ops.power_down = e1000_power_down_phy_copper_82571; 85 phy->ops.power_down = e1000_power_down_phy_copper_82571;
86 86
87 switch (hw->mac.type) { 87 switch (hw->mac.type) {
88 case e1000_82571: 88 case e1000_82571:
89 case e1000_82572: 89 case e1000_82572:
90 phy->type = e1000_phy_igp_2; 90 phy->type = e1000_phy_igp_2;
91 break; 91 break;
92 case e1000_82573: 92 case e1000_82573:
93 phy->type = e1000_phy_m88; 93 phy->type = e1000_phy_m88;
94 break; 94 break;
95 case e1000_82574: 95 case e1000_82574:
96 case e1000_82583: 96 case e1000_82583:
97 phy->type = e1000_phy_bm; 97 phy->type = e1000_phy_bm;
98 phy->ops.acquire = e1000_get_hw_semaphore_82574; 98 phy->ops.acquire = e1000_get_hw_semaphore_82574;
99 phy->ops.release = e1000_put_hw_semaphore_82574; 99 phy->ops.release = e1000_put_hw_semaphore_82574;
100 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; 100 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
@@ -193,7 +193,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
193 /* EEPROM access above 16k is unsupported */ 193 /* EEPROM access above 16k is unsupported */
194 if (size > 14) 194 if (size > 14)
195 size = 14; 195 size = 14;
196 nvm->word_size = 1 << size; 196 nvm->word_size = 1 << size;
197 break; 197 break;
198 } 198 }
199 199
@@ -339,7 +339,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
339static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) 339static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
340{ 340{
341 struct e1000_hw *hw = &adapter->hw; 341 struct e1000_hw *hw = &adapter->hw;
342 static int global_quad_port_a; /* global port a indication */ 342 static int global_quad_port_a; /* global port a indication */
343 struct pci_dev *pdev = adapter->pdev; 343 struct pci_dev *pdev = adapter->pdev;
344 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; 344 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
345 s32 rc; 345 s32 rc;
@@ -1003,8 +1003,6 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1003 default: 1003 default:
1004 break; 1004 break;
1005 } 1005 }
1006 if (ret_val)
1007 e_dbg("Cannot acquire MDIO ownership\n");
1008 1006
1009 ctrl = er32(CTRL); 1007 ctrl = er32(CTRL);
1010 1008
@@ -1015,7 +1013,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1015 switch (hw->mac.type) { 1013 switch (hw->mac.type) {
1016 case e1000_82574: 1014 case e1000_82574:
1017 case e1000_82583: 1015 case e1000_82583:
1018 e1000_put_hw_semaphore_82574(hw); 1016 /* Release mutex only if the hw semaphore is acquired */
1017 if (!ret_val)
1018 e1000_put_hw_semaphore_82574(hw);
1019 break; 1019 break;
1020 default: 1020 default:
1021 break; 1021 break;
@@ -1178,7 +1178,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1178 1178
1179 /* Transmit Arbitration Control 0 */ 1179 /* Transmit Arbitration Control 0 */
1180 reg = er32(TARC(0)); 1180 reg = er32(TARC(0));
1181 reg &= ~(0xF << 27); /* 30:27 */ 1181 reg &= ~(0xF << 27); /* 30:27 */
1182 switch (hw->mac.type) { 1182 switch (hw->mac.type) {
1183 case e1000_82571: 1183 case e1000_82571:
1184 case e1000_82572: 1184 case e1000_82572:
@@ -1390,7 +1390,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
1390 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); 1390 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
1391 if (ret_val) 1391 if (ret_val)
1392 return false; 1392 return false;
1393 if (receive_errors == E1000_RECEIVE_ERROR_MAX) { 1393 if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
1394 ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); 1394 ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
1395 if (ret_val) 1395 if (ret_val)
1396 return false; 1396 return false;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 7c8ca658d553..59c22bf18701 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -244,7 +244,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
244 mac->autoneg = 1; 244 mac->autoneg = 1;
245 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 245 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
246 break; 246 break;
247 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 247 case SPEED_1000 + DUPLEX_HALF: /* not supported */
248 default: 248 default:
249 goto err_inval; 249 goto err_inval;
250 } 250 }
@@ -416,7 +416,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data)
416 416
417static int e1000_get_regs_len(struct net_device __always_unused *netdev) 417static int e1000_get_regs_len(struct net_device __always_unused *netdev)
418{ 418{
419#define E1000_REGS_LEN 32 /* overestimate */ 419#define E1000_REGS_LEN 32 /* overestimate */
420 return E1000_REGS_LEN * sizeof(u32); 420 return E1000_REGS_LEN * sizeof(u32);
421} 421}
422 422
@@ -433,22 +433,22 @@ static void e1000_get_regs(struct net_device *netdev,
433 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 433 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
434 adapter->pdev->device; 434 adapter->pdev->device;
435 435
436 regs_buff[0] = er32(CTRL); 436 regs_buff[0] = er32(CTRL);
437 regs_buff[1] = er32(STATUS); 437 regs_buff[1] = er32(STATUS);
438 438
439 regs_buff[2] = er32(RCTL); 439 regs_buff[2] = er32(RCTL);
440 regs_buff[3] = er32(RDLEN(0)); 440 regs_buff[3] = er32(RDLEN(0));
441 regs_buff[4] = er32(RDH(0)); 441 regs_buff[4] = er32(RDH(0));
442 regs_buff[5] = er32(RDT(0)); 442 regs_buff[5] = er32(RDT(0));
443 regs_buff[6] = er32(RDTR); 443 regs_buff[6] = er32(RDTR);
444 444
445 regs_buff[7] = er32(TCTL); 445 regs_buff[7] = er32(TCTL);
446 regs_buff[8] = er32(TDLEN(0)); 446 regs_buff[8] = er32(TDLEN(0));
447 regs_buff[9] = er32(TDH(0)); 447 regs_buff[9] = er32(TDH(0));
448 regs_buff[10] = er32(TDT(0)); 448 regs_buff[10] = er32(TDT(0));
449 regs_buff[11] = er32(TIDV); 449 regs_buff[11] = er32(TIDV);
450 450
451 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ 451 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
452 452
453 /* ethtool doesn't use anything past this point, so all this 453 /* ethtool doesn't use anything past this point, so all this
454 * code is likely legacy junk for apps that may or may not exist 454 * code is likely legacy junk for apps that may or may not exist
@@ -1379,7 +1379,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1379 1379
1380 if (hw->phy.media_type == e1000_media_type_copper && 1380 if (hw->phy.media_type == e1000_media_type_copper &&
1381 hw->phy.type == e1000_phy_m88) { 1381 hw->phy.type == e1000_phy_m88) {
1382 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1382 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1383 } else { 1383 } else {
1384 /* Set the ILOS bit on the fiber Nic if half duplex link is 1384 /* Set the ILOS bit on the fiber Nic if half duplex link is
1385 * detected. 1385 * detected.
@@ -1613,7 +1613,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1613 ew32(TDT(0), k); 1613 ew32(TDT(0), k);
1614 e1e_flush(); 1614 e1e_flush();
1615 msleep(200); 1615 msleep(200);
1616 time = jiffies; /* set the start time for the receive */ 1616 time = jiffies; /* set the start time for the receive */
1617 good_cnt = 0; 1617 good_cnt = 0;
1618 /* receive the sent packets */ 1618 /* receive the sent packets */
1619 do { 1619 do {
@@ -1636,11 +1636,11 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1636 */ 1636 */
1637 } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); 1637 } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
1638 if (good_cnt != 64) { 1638 if (good_cnt != 64) {
1639 ret_val = 13; /* ret_val is the same as mis-compare */ 1639 ret_val = 13; /* ret_val is the same as mis-compare */
1640 break; 1640 break;
1641 } 1641 }
1642 if (jiffies >= (time + 20)) { 1642 if (jiffies >= (time + 20)) {
1643 ret_val = 14; /* error code for time out error */ 1643 ret_val = 14; /* error code for time out error */
1644 break; 1644 break;
1645 } 1645 }
1646 } 1646 }
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 84850f7a23e4..a6f903a9b773 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -402,13 +402,13 @@ struct e1000_phy_stats {
402 402
403struct e1000_host_mng_dhcp_cookie { 403struct e1000_host_mng_dhcp_cookie {
404 u32 signature; 404 u32 signature;
405 u8 status; 405 u8 status;
406 u8 reserved0; 406 u8 reserved0;
407 u16 vlan_id; 407 u16 vlan_id;
408 u32 reserved1; 408 u32 reserved1;
409 u16 reserved2; 409 u16 reserved2;
410 u8 reserved3; 410 u8 reserved3;
411 u8 checksum; 411 u8 checksum;
412}; 412};
413 413
414/* Host Interface "Rev 1" */ 414/* Host Interface "Rev 1" */
@@ -427,8 +427,8 @@ struct e1000_host_command_info {
427 427
428/* Host Interface "Rev 2" */ 428/* Host Interface "Rev 2" */
429struct e1000_host_mng_command_header { 429struct e1000_host_mng_command_header {
430 u8 command_id; 430 u8 command_id;
431 u8 checksum; 431 u8 checksum;
432 u16 reserved1; 432 u16 reserved1;
433 u16 reserved2; 433 u16 reserved2;
434 u16 command_length; 434 u16 command_length;
@@ -549,7 +549,7 @@ struct e1000_mac_info {
549 u32 mta_shadow[MAX_MTA_REG]; 549 u32 mta_shadow[MAX_MTA_REG];
550 u16 rar_entry_count; 550 u16 rar_entry_count;
551 551
552 u8 forced_speed_duplex; 552 u8 forced_speed_duplex;
553 553
554 bool adaptive_ifs; 554 bool adaptive_ifs;
555 bool has_fwsm; 555 bool has_fwsm;
@@ -577,7 +577,7 @@ struct e1000_phy_info {
577 577
578 u32 addr; 578 u32 addr;
579 u32 id; 579 u32 id;
580 u32 reset_delay_us; /* in usec */ 580 u32 reset_delay_us; /* in usec */
581 u32 revision; 581 u32 revision;
582 582
583 enum e1000_media_type media_type; 583 enum e1000_media_type media_type;
@@ -636,11 +636,11 @@ struct e1000_dev_spec_82571 {
636}; 636};
637 637
638struct e1000_dev_spec_80003es2lan { 638struct e1000_dev_spec_80003es2lan {
639 bool mdic_wa_enable; 639 bool mdic_wa_enable;
640}; 640};
641 641
642struct e1000_shadow_ram { 642struct e1000_shadow_ram {
643 u16 value; 643 u16 value;
644 bool modified; 644 bool modified;
645}; 645};
646 646
@@ -660,17 +660,17 @@ struct e1000_hw {
660 void __iomem *hw_addr; 660 void __iomem *hw_addr;
661 void __iomem *flash_address; 661 void __iomem *flash_address;
662 662
663 struct e1000_mac_info mac; 663 struct e1000_mac_info mac;
664 struct e1000_fc_info fc; 664 struct e1000_fc_info fc;
665 struct e1000_phy_info phy; 665 struct e1000_phy_info phy;
666 struct e1000_nvm_info nvm; 666 struct e1000_nvm_info nvm;
667 struct e1000_bus_info bus; 667 struct e1000_bus_info bus;
668 struct e1000_host_mng_dhcp_cookie mng_cookie; 668 struct e1000_host_mng_dhcp_cookie mng_cookie;
669 669
670 union { 670 union {
671 struct e1000_dev_spec_82571 e82571; 671 struct e1000_dev_spec_82571 e82571;
672 struct e1000_dev_spec_80003es2lan e80003es2lan; 672 struct e1000_dev_spec_80003es2lan e80003es2lan;
673 struct e1000_dev_spec_ich8lan ich8lan; 673 struct e1000_dev_spec_ich8lan ich8lan;
674 } dev_spec; 674 } dev_spec;
675}; 675};
676 676
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index ad9d8f2dd868..9dde390f7e71 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -101,12 +101,12 @@ union ich8_hws_flash_regacc {
101/* ICH Flash Protected Region */ 101/* ICH Flash Protected Region */
102union ich8_flash_protected_range { 102union ich8_flash_protected_range {
103 struct ich8_pr { 103 struct ich8_pr {
104 u32 base:13; /* 0:12 Protected Range Base */ 104 u32 base:13; /* 0:12 Protected Range Base */
105 u32 reserved1:2; /* 13:14 Reserved */ 105 u32 reserved1:2; /* 13:14 Reserved */
106 u32 rpe:1; /* 15 Read Protection Enable */ 106 u32 rpe:1; /* 15 Read Protection Enable */
107 u32 limit:13; /* 16:28 Protected Range Limit */ 107 u32 limit:13; /* 16:28 Protected Range Limit */
108 u32 reserved2:2; /* 29:30 Reserved */ 108 u32 reserved2:2; /* 29:30 Reserved */
109 u32 wpe:1; /* 31 Write Protection Enable */ 109 u32 wpe:1; /* 31 Write Protection Enable */
110 } range; 110 } range;
111 u32 regval; 111 u32 regval;
112}; 112};
@@ -362,21 +362,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
362 struct e1000_phy_info *phy = &hw->phy; 362 struct e1000_phy_info *phy = &hw->phy;
363 s32 ret_val; 363 s32 ret_val;
364 364
365 phy->addr = 1; 365 phy->addr = 1;
366 phy->reset_delay_us = 100; 366 phy->reset_delay_us = 100;
367 367
368 phy->ops.set_page = e1000_set_page_igp; 368 phy->ops.set_page = e1000_set_page_igp;
369 phy->ops.read_reg = e1000_read_phy_reg_hv; 369 phy->ops.read_reg = e1000_read_phy_reg_hv;
370 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; 370 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
371 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; 371 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
372 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 372 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
373 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 373 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
374 phy->ops.write_reg = e1000_write_phy_reg_hv; 374 phy->ops.write_reg = e1000_write_phy_reg_hv;
375 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; 375 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
376 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; 376 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
377 phy->ops.power_up = e1000_power_up_phy_copper; 377 phy->ops.power_up = e1000_power_up_phy_copper;
378 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 378 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
379 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 379 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
380 380
381 phy->id = e1000_phy_unknown; 381 phy->id = e1000_phy_unknown;
382 382
@@ -445,11 +445,11 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
445 s32 ret_val; 445 s32 ret_val;
446 u16 i = 0; 446 u16 i = 0;
447 447
448 phy->addr = 1; 448 phy->addr = 1;
449 phy->reset_delay_us = 100; 449 phy->reset_delay_us = 100;
450 450
451 phy->ops.power_up = e1000_power_up_phy_copper; 451 phy->ops.power_up = e1000_power_up_phy_copper;
452 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 452 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
453 453
454 /* We may need to do this twice - once for IGP and if that fails, 454 /* We may need to do this twice - once for IGP and if that fails,
455 * we'll set BM func pointers and try again 455 * we'll set BM func pointers and try again
@@ -457,7 +457,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
457 ret_val = e1000e_determine_phy_address(hw); 457 ret_val = e1000e_determine_phy_address(hw);
458 if (ret_val) { 458 if (ret_val) {
459 phy->ops.write_reg = e1000e_write_phy_reg_bm; 459 phy->ops.write_reg = e1000e_write_phy_reg_bm;
460 phy->ops.read_reg = e1000e_read_phy_reg_bm; 460 phy->ops.read_reg = e1000e_read_phy_reg_bm;
461 ret_val = e1000e_determine_phy_address(hw); 461 ret_val = e1000e_determine_phy_address(hw);
462 if (ret_val) { 462 if (ret_val) {
463 e_dbg("Cannot determine PHY addr. Erroring out\n"); 463 e_dbg("Cannot determine PHY addr. Erroring out\n");
@@ -560,7 +560,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
560 /* Clear shadow ram */ 560 /* Clear shadow ram */
561 for (i = 0; i < nvm->word_size; i++) { 561 for (i = 0; i < nvm->word_size; i++) {
562 dev_spec->shadow_ram[i].modified = false; 562 dev_spec->shadow_ram[i].modified = false;
563 dev_spec->shadow_ram[i].value = 0xFFFF; 563 dev_spec->shadow_ram[i].value = 0xFFFF;
564 } 564 }
565 565
566 return 0; 566 return 0;
@@ -1012,7 +1012,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1012 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1012 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1013 1013
1014 if (!link) 1014 if (!link)
1015 return 0; /* No link detected */ 1015 return 0; /* No link detected */
1016 1016
1017 mac->get_link_status = false; 1017 mac->get_link_status = false;
1018 1018
@@ -2816,7 +2816,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2816 s32 ret_val = -E1000_ERR_NVM; 2816 s32 ret_val = -E1000_ERR_NVM;
2817 u8 count = 0; 2817 u8 count = 0;
2818 2818
2819 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 2819 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2820 return -E1000_ERR_NVM; 2820 return -E1000_ERR_NVM;
2821 2821
2822 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 2822 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
@@ -2939,7 +2939,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2939 * write to bank 0 etc. We also need to erase the segment that 2939 * write to bank 0 etc. We also need to erase the segment that
2940 * is going to be written 2940 * is going to be written
2941 */ 2941 */
2942 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 2942 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2943 if (ret_val) { 2943 if (ret_val) {
2944 e_dbg("Could not detect valid bank, assuming bank 0\n"); 2944 e_dbg("Could not detect valid bank, assuming bank 0\n");
2945 bank = 0; 2945 bank = 0;
@@ -4073,7 +4073,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4073{ 4073{
4074 u32 reg; 4074 u32 reg;
4075 u16 data; 4075 u16 data;
4076 u8 retry = 0; 4076 u8 retry = 0;
4077 4077
4078 if (hw->phy.type != e1000_phy_igp_3) 4078 if (hw->phy.type != e1000_phy_igp_3)
4079 return; 4079 return;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a27e3bcc3249..77f81cbb601a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1196,7 +1196,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1196 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1196 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1197 (count < tx_ring->count)) { 1197 (count < tx_ring->count)) {
1198 bool cleaned = false; 1198 bool cleaned = false;
1199 rmb(); /* read buffer_info after eop_desc */ 1199 rmb(); /* read buffer_info after eop_desc */
1200 for (; !cleaned; count++) { 1200 for (; !cleaned; count++) {
1201 tx_desc = E1000_TX_DESC(*tx_ring, i); 1201 tx_desc = E1000_TX_DESC(*tx_ring, i);
1202 buffer_info = &tx_ring->buffer_info[i]; 1202 buffer_info = &tx_ring->buffer_info[i];
@@ -1385,7 +1385,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1385 1385
1386 skb_put(skb, l1); 1386 skb_put(skb, l1);
1387 goto copydone; 1387 goto copydone;
1388 } /* if */ 1388 } /* if */
1389 } 1389 }
1390 1390
1391 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1391 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
@@ -1800,7 +1800,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1800 u32 rctl, icr = er32(ICR); 1800 u32 rctl, icr = er32(ICR);
1801 1801
1802 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1802 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1803 return IRQ_NONE; /* Not our interrupt */ 1803 return IRQ_NONE; /* Not our interrupt */
1804 1804
1805 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 1805 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1806 * not set, then the adapter didn't send an interrupt 1806 * not set, then the adapter didn't send an interrupt
@@ -2487,7 +2487,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2487 else if ((packets < 5) && (bytes > 512)) 2487 else if ((packets < 5) && (bytes > 512))
2488 retval = low_latency; 2488 retval = low_latency;
2489 break; 2489 break;
2490 case low_latency: /* 50 usec aka 20000 ints/s */ 2490 case low_latency: /* 50 usec aka 20000 ints/s */
2491 if (bytes > 10000) { 2491 if (bytes > 10000) {
2492 /* this if handles the TSO accounting */ 2492 /* this if handles the TSO accounting */
2493 if (bytes / packets > 8000) 2493 if (bytes / packets > 8000)
@@ -2502,7 +2502,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2502 retval = lowest_latency; 2502 retval = lowest_latency;
2503 } 2503 }
2504 break; 2504 break;
2505 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2505 case bulk_latency: /* 250 usec aka 4000 ints/s */
2506 if (bytes > 25000) { 2506 if (bytes > 25000) {
2507 if (packets > 35) 2507 if (packets > 35)
2508 retval = low_latency; 2508 retval = low_latency;
@@ -2554,7 +2554,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
2554 new_itr = 70000; 2554 new_itr = 70000;
2555 break; 2555 break;
2556 case low_latency: 2556 case low_latency:
2557 new_itr = 20000; /* aka hwitr = ~200 */ 2557 new_itr = 20000; /* aka hwitr = ~200 */
2558 break; 2558 break;
2559 case bulk_latency: 2559 case bulk_latency:
2560 new_itr = 4000; 2560 new_itr = 4000;
@@ -2673,7 +2673,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
2673} 2673}
2674 2674
2675static int e1000_vlan_rx_add_vid(struct net_device *netdev, 2675static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2676 __be16 proto, u16 vid) 2676 __always_unused __be16 proto, u16 vid)
2677{ 2677{
2678 struct e1000_adapter *adapter = netdev_priv(netdev); 2678 struct e1000_adapter *adapter = netdev_priv(netdev);
2679 struct e1000_hw *hw = &adapter->hw; 2679 struct e1000_hw *hw = &adapter->hw;
@@ -2699,7 +2699,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2699} 2699}
2700 2700
2701static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 2701static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2702 __be16 proto, u16 vid) 2702 __always_unused __be16 proto, u16 vid)
2703{ 2703{
2704 struct e1000_adapter *adapter = netdev_priv(netdev); 2704 struct e1000_adapter *adapter = netdev_priv(netdev);
2705 struct e1000_hw *hw = &adapter->hw; 2705 struct e1000_hw *hw = &adapter->hw;
@@ -3104,13 +3104,13 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3104 /* UPE and MPE will be handled by normal PROMISC logic 3104 /* UPE and MPE will be handled by normal PROMISC logic
3105 * in e1000e_set_rx_mode 3105 * in e1000e_set_rx_mode
3106 */ 3106 */
3107 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3107 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3108 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3108 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3109 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3109 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3110 3110
3111 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 3111 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3112 E1000_RCTL_DPF | /* Allow filtered pause */ 3112 E1000_RCTL_DPF | /* Allow filtered pause */
3113 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 3113 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3114 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 3114 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3115 * and that breaks VLANs. 3115 * and that breaks VLANs.
3116 */ 3116 */
@@ -3799,7 +3799,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3799 hwm = min(((pba << 10) * 9 / 10), 3799 hwm = min(((pba << 10) * 9 / 10),
3800 ((pba << 10) - adapter->max_frame_size)); 3800 ((pba << 10) - adapter->max_frame_size));
3801 3801
3802 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 3802 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3803 fc->low_water = fc->high_water - 8; 3803 fc->low_water = fc->high_water - 8;
3804 break; 3804 break;
3805 case e1000_pchlan: 3805 case e1000_pchlan:
@@ -3808,10 +3808,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
3808 */ 3808 */
3809 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3809 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3810 fc->high_water = 0x3500; 3810 fc->high_water = 0x3500;
3811 fc->low_water = 0x1500; 3811 fc->low_water = 0x1500;
3812 } else { 3812 } else {
3813 fc->high_water = 0x5000; 3813 fc->high_water = 0x5000;
3814 fc->low_water = 0x3000; 3814 fc->low_water = 0x3000;
3815 } 3815 }
3816 fc->refresh_time = 0x1000; 3816 fc->refresh_time = 0x1000;
3817 break; 3817 break;
@@ -4581,7 +4581,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4581 adapter->stats.crcerrs += er32(CRCERRS); 4581 adapter->stats.crcerrs += er32(CRCERRS);
4582 adapter->stats.gprc += er32(GPRC); 4582 adapter->stats.gprc += er32(GPRC);
4583 adapter->stats.gorc += er32(GORCL); 4583 adapter->stats.gorc += er32(GORCL);
4584 er32(GORCH); /* Clear gorc */ 4584 er32(GORCH); /* Clear gorc */
4585 adapter->stats.bprc += er32(BPRC); 4585 adapter->stats.bprc += er32(BPRC);
4586 adapter->stats.mprc += er32(MPRC); 4586 adapter->stats.mprc += er32(MPRC);
4587 adapter->stats.roc += er32(ROC); 4587 adapter->stats.roc += er32(ROC);
@@ -4614,7 +4614,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4614 adapter->stats.xofftxc += er32(XOFFTXC); 4614 adapter->stats.xofftxc += er32(XOFFTXC);
4615 adapter->stats.gptc += er32(GPTC); 4615 adapter->stats.gptc += er32(GPTC);
4616 adapter->stats.gotc += er32(GOTCL); 4616 adapter->stats.gotc += er32(GOTCL);
4617 er32(GOTCH); /* Clear gotc */ 4617 er32(GOTCH); /* Clear gotc */
4618 adapter->stats.rnbc += er32(RNBC); 4618 adapter->stats.rnbc += er32(RNBC);
4619 adapter->stats.ruc += er32(RUC); 4619 adapter->stats.ruc += er32(RUC);
4620 4620
@@ -5106,13 +5106,13 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5106 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 5106 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5107 buffer_info = &tx_ring->buffer_info[i]; 5107 buffer_info = &tx_ring->buffer_info[i];
5108 5108
5109 context_desc->lower_setup.ip_fields.ipcss = ipcss; 5109 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5110 context_desc->lower_setup.ip_fields.ipcso = ipcso; 5110 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5111 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 5111 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
5112 context_desc->upper_setup.tcp_fields.tucss = tucss; 5112 context_desc->upper_setup.tcp_fields.tucss = tucss;
5113 context_desc->upper_setup.tcp_fields.tucso = tucso; 5113 context_desc->upper_setup.tcp_fields.tucso = tucso;
5114 context_desc->upper_setup.tcp_fields.tucse = 0; 5114 context_desc->upper_setup.tcp_fields.tucse = 0;
5115 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 5115 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
5116 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 5116 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5117 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 5117 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5118 5118
@@ -5363,7 +5363,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5363static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, 5363static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5364 struct sk_buff *skb) 5364 struct sk_buff *skb)
5365{ 5365{
5366 struct e1000_hw *hw = &adapter->hw; 5366 struct e1000_hw *hw = &adapter->hw;
5367 u16 length, offset; 5367 u16 length, offset;
5368 5368
5369 if (vlan_tx_tag_present(skb) && 5369 if (vlan_tx_tag_present(skb) &&
@@ -6259,7 +6259,7 @@ static void e1000_netpoll(struct net_device *netdev)
6259 e1000_intr_msi(adapter->pdev->irq, netdev); 6259 e1000_intr_msi(adapter->pdev->irq, netdev);
6260 enable_irq(adapter->pdev->irq); 6260 enable_irq(adapter->pdev->irq);
6261 break; 6261 break;
6262 default: /* E1000E_INT_MODE_LEGACY */ 6262 default: /* E1000E_INT_MODE_LEGACY */
6263 disable_irq(adapter->pdev->irq); 6263 disable_irq(adapter->pdev->irq);
6264 e1000_intr(adapter->pdev->irq, netdev); 6264 e1000_intr(adapter->pdev->irq, netdev);
6265 enable_irq(adapter->pdev->irq); 6265 enable_irq(adapter->pdev->irq);
@@ -6589,9 +6589,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6589 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; 6589 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6590 6590
6591 /* construct the net_device struct */ 6591 /* construct the net_device struct */
6592 netdev->netdev_ops = &e1000e_netdev_ops; 6592 netdev->netdev_ops = &e1000e_netdev_ops;
6593 e1000e_set_ethtool_ops(netdev); 6593 e1000e_set_ethtool_ops(netdev);
6594 netdev->watchdog_timeo = 5 * HZ; 6594 netdev->watchdog_timeo = 5 * HZ;
6595 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); 6595 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6596 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 6596 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6597 6597
@@ -7034,7 +7034,6 @@ static void __exit e1000_exit_module(void)
7034} 7034}
7035module_exit(e1000_exit_module); 7035module_exit(e1000_exit_module);
7036 7036
7037
7038MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 7037MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7039MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 7038MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7040MODULE_LICENSE("GPL"); 7039MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 44ddc0a0ee0e..d70a03906ac0 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -117,7 +117,6 @@ static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
117 u16 data; 117 u16 data;
118 118
119 eecd = er32(EECD); 119 eecd = er32(EECD);
120
121 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); 120 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
122 data = 0; 121 data = 0;
123 122
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 59c76a6815a0..da2be59505c0 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1583,13 +1583,13 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
1583 case e1000_phy_gg82563: 1583 case e1000_phy_gg82563:
1584 case e1000_phy_bm: 1584 case e1000_phy_bm:
1585 case e1000_phy_82578: 1585 case e1000_phy_82578:
1586 offset = M88E1000_PHY_SPEC_STATUS; 1586 offset = M88E1000_PHY_SPEC_STATUS;
1587 mask = M88E1000_PSSR_DOWNSHIFT; 1587 mask = M88E1000_PSSR_DOWNSHIFT;
1588 break; 1588 break;
1589 case e1000_phy_igp_2: 1589 case e1000_phy_igp_2:
1590 case e1000_phy_igp_3: 1590 case e1000_phy_igp_3:
1591 offset = IGP01E1000_PHY_LINK_HEALTH; 1591 offset = IGP01E1000_PHY_LINK_HEALTH;
1592 mask = IGP01E1000_PLHR_SS_DOWNGRADE; 1592 mask = IGP01E1000_PLHR_SS_DOWNGRADE;
1593 break; 1593 break;
1594 default: 1594 default:
1595 /* speed downshift not supported */ 1595 /* speed downshift not supported */
@@ -1653,14 +1653,14 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1653 1653
1654 if ((data & IGP01E1000_PSSR_SPEED_MASK) == 1654 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1655 IGP01E1000_PSSR_SPEED_1000MBPS) { 1655 IGP01E1000_PSSR_SPEED_1000MBPS) {
1656 offset = IGP01E1000_PHY_PCS_INIT_REG; 1656 offset = IGP01E1000_PHY_PCS_INIT_REG;
1657 mask = IGP01E1000_PHY_POLARITY_MASK; 1657 mask = IGP01E1000_PHY_POLARITY_MASK;
1658 } else { 1658 } else {
1659 /* This really only applies to 10Mbps since 1659 /* This really only applies to 10Mbps since
1660 * there is no polarity for 100Mbps (always 0). 1660 * there is no polarity for 100Mbps (always 0).
1661 */ 1661 */
1662 offset = IGP01E1000_PHY_PORT_STATUS; 1662 offset = IGP01E1000_PHY_PORT_STATUS;
1663 mask = IGP01E1000_PSSR_POLARITY_REVERSED; 1663 mask = IGP01E1000_PSSR_POLARITY_REVERSED;
1664 } 1664 }
1665 1665
1666 ret_val = e1e_rphy(hw, offset, &data); 1666 ret_val = e1e_rphy(hw, offset, &data);
@@ -1900,7 +1900,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1900s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) 1900s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1901{ 1901{
1902 struct e1000_phy_info *phy = &hw->phy; 1902 struct e1000_phy_info *phy = &hw->phy;
1903 s32 ret_val; 1903 s32 ret_val;
1904 u16 phy_data; 1904 u16 phy_data;
1905 bool link; 1905 bool link;
1906 1906
@@ -2253,7 +2253,7 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
2253 case M88E1011_I_PHY_ID: 2253 case M88E1011_I_PHY_ID:
2254 phy_type = e1000_phy_m88; 2254 phy_type = e1000_phy_m88;
2255 break; 2255 break;
2256 case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ 2256 case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
2257 phy_type = e1000_phy_igp_2; 2257 phy_type = e1000_phy_igp_2;
2258 break; 2258 break;
2259 case GG82563_E_PHY_ID: 2259 case GG82563_E_PHY_ID:
@@ -2317,7 +2317,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
2317 /* If phy_type is valid, break - we found our 2317 /* If phy_type is valid, break - we found our
2318 * PHY address 2318 * PHY address
2319 */ 2319 */
2320 if (phy_type != e1000_phy_unknown) 2320 if (phy_type != e1000_phy_unknown)
2321 return 0; 2321 return 0;
2322 2322
2323 usleep_range(1000, 2000); 2323 usleep_range(1000, 2000);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ff6a17cb1362..f21a91a299a2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -401,12 +401,82 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
401 return 0; 401 return 0;
402} 402}
403 403
404/**
405 * igb_set_sfp_media_type_82575 - derives SFP module media type.
406 * @hw: pointer to the HW structure
407 *
408 * The media type is chosen based on SFP module.
409 * compatibility flags retrieved from SFP ID EEPROM.
410 **/
411static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
412{
413 s32 ret_val = E1000_ERR_CONFIG;
414 u32 ctrl_ext = 0;
415 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
416 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
417 u8 tranceiver_type = 0;
418 s32 timeout = 3;
419
420 /* Turn I2C interface ON and power on sfp cage */
421 ctrl_ext = rd32(E1000_CTRL_EXT);
422 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
423 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
424
425 wrfl();
426
427 /* Read SFP module data */
428 while (timeout) {
429 ret_val = igb_read_sfp_data_byte(hw,
430 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
431 &tranceiver_type);
432 if (ret_val == 0)
433 break;
434 msleep(100);
435 timeout--;
436 }
437 if (ret_val != 0)
438 goto out;
439
440 ret_val = igb_read_sfp_data_byte(hw,
441 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
442 (u8 *)eth_flags);
443 if (ret_val != 0)
444 goto out;
445
446 /* Check if there is some SFP module plugged and powered */
447 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
448 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
449 dev_spec->module_plugged = true;
450 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
451 hw->phy.media_type = e1000_media_type_internal_serdes;
452 } else if (eth_flags->e100_base_fx) {
453 dev_spec->sgmii_active = true;
454 hw->phy.media_type = e1000_media_type_internal_serdes;
455 } else if (eth_flags->e1000_base_t) {
456 dev_spec->sgmii_active = true;
457 hw->phy.media_type = e1000_media_type_copper;
458 } else {
459 hw->phy.media_type = e1000_media_type_unknown;
460 hw_dbg("PHY module has not been recognized\n");
461 goto out;
462 }
463 } else {
464 hw->phy.media_type = e1000_media_type_unknown;
465 }
466 ret_val = 0;
467out:
468 /* Restore I2C interface setting */
469 wr32(E1000_CTRL_EXT, ctrl_ext);
470 return ret_val;
471}
472
404static s32 igb_get_invariants_82575(struct e1000_hw *hw) 473static s32 igb_get_invariants_82575(struct e1000_hw *hw)
405{ 474{
406 struct e1000_mac_info *mac = &hw->mac; 475 struct e1000_mac_info *mac = &hw->mac;
407 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 476 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
408 s32 ret_val; 477 s32 ret_val;
409 u32 ctrl_ext = 0; 478 u32 ctrl_ext = 0;
479 u32 link_mode = 0;
410 480
411 switch (hw->device_id) { 481 switch (hw->device_id) {
412 case E1000_DEV_ID_82575EB_COPPER: 482 case E1000_DEV_ID_82575EB_COPPER:
@@ -470,16 +540,56 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
470 */ 540 */
471 hw->phy.media_type = e1000_media_type_copper; 541 hw->phy.media_type = e1000_media_type_copper;
472 dev_spec->sgmii_active = false; 542 dev_spec->sgmii_active = false;
543 dev_spec->module_plugged = false;
473 544
474 ctrl_ext = rd32(E1000_CTRL_EXT); 545 ctrl_ext = rd32(E1000_CTRL_EXT);
475 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 546
476 case E1000_CTRL_EXT_LINK_MODE_SGMII: 547 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
477 dev_spec->sgmii_active = true; 548 switch (link_mode) {
478 break;
479 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 549 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
480 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
481 hw->phy.media_type = e1000_media_type_internal_serdes; 550 hw->phy.media_type = e1000_media_type_internal_serdes;
482 break; 551 break;
552 case E1000_CTRL_EXT_LINK_MODE_SGMII:
553 /* Get phy control interface type set (MDIO vs. I2C)*/
554 if (igb_sgmii_uses_mdio_82575(hw)) {
555 hw->phy.media_type = e1000_media_type_copper;
556 dev_spec->sgmii_active = true;
557 break;
558 }
559 /* fall through for I2C based SGMII */
560 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
561 /* read media type from SFP EEPROM */
562 ret_val = igb_set_sfp_media_type_82575(hw);
563 if ((ret_val != 0) ||
564 (hw->phy.media_type == e1000_media_type_unknown)) {
565 /* If media type was not identified then return media
566 * type defined by the CTRL_EXT settings.
567 */
568 hw->phy.media_type = e1000_media_type_internal_serdes;
569
570 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
571 hw->phy.media_type = e1000_media_type_copper;
572 dev_spec->sgmii_active = true;
573 }
574
575 break;
576 }
577
578 /* do not change link mode for 100BaseFX */
579 if (dev_spec->eth_flags.e100_base_fx)
580 break;
581
582 /* change current link mode setting */
583 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
584
585 if (hw->phy.media_type == e1000_media_type_copper)
586 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
587 else
588 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
589
590 wr32(E1000_CTRL_EXT, ctrl_ext);
591
592 break;
483 default: 593 default:
484 break; 594 break;
485 } 595 }
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 31a0f82cc650..aa201abb8ad2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -61,20 +61,22 @@
61/* Clear Interrupt timers after IMS clear */ 61/* Clear Interrupt timers after IMS clear */
62/* packet buffer parity error detection enabled */ 62/* packet buffer parity error detection enabled */
63/* descriptor FIFO parity error detection enable */ 63/* descriptor FIFO parity error detection enable */
64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
65#define E1000_I2CCMD_REG_ADDR_SHIFT 16 65#define E1000_I2CCMD_REG_ADDR_SHIFT 16
66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
67#define E1000_I2CCMD_OPCODE_READ 0x08000000 67#define E1000_I2CCMD_OPCODE_READ 0x08000000
68#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 68#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
69#define E1000_I2CCMD_READY 0x20000000 69#define E1000_I2CCMD_READY 0x20000000
70#define E1000_I2CCMD_ERROR 0x80000000 70#define E1000_I2CCMD_ERROR 0x80000000
71#define E1000_MAX_SGMII_PHY_REG_ADDR 255 71#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a))
72#define E1000_I2CCMD_PHY_TIMEOUT 200 72#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
73#define E1000_IVAR_VALID 0x80 73#define E1000_MAX_SGMII_PHY_REG_ADDR 255
74#define E1000_GPIE_NSICR 0x00000001 74#define E1000_I2CCMD_PHY_TIMEOUT 200
75#define E1000_GPIE_MSIX_MODE 0x00000010 75#define E1000_IVAR_VALID 0x80
76#define E1000_GPIE_EIAME 0x40000000 76#define E1000_GPIE_NSICR 0x00000001
77#define E1000_GPIE_PBA 0x80000000 77#define E1000_GPIE_MSIX_MODE 0x00000010
78#define E1000_GPIE_EIAME 0x40000000
79#define E1000_GPIE_PBA 0x80000000
78 80
79/* Receive Descriptor bit definitions */ 81/* Receive Descriptor bit definitions */
80#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 82#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -270,8 +272,10 @@
270#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX 272#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
271 273
272/* LED Control */ 274/* LED Control */
273#define E1000_LEDCTL_LED0_MODE_SHIFT 0 275#define E1000_LEDCTL_LED0_MODE_SHIFT 0
274#define E1000_LEDCTL_LED0_BLINK 0x00000080 276#define E1000_LEDCTL_LED0_BLINK 0x00000080
277#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
278#define E1000_LEDCTL_LED0_IVRT 0x00000040
275 279
276#define E1000_LEDCTL_MODE_LED_ON 0xE 280#define E1000_LEDCTL_MODE_LED_ON 0xE
277#define E1000_LEDCTL_MODE_LED_OFF 0xF 281#define E1000_LEDCTL_MODE_LED_OFF 0xF
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 488abb24a54f..94d7866b9c20 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -528,6 +528,8 @@ struct e1000_dev_spec_82575 {
528 bool global_device_reset; 528 bool global_device_reset;
529 bool eee_disable; 529 bool eee_disable;
530 bool clear_semaphore_once; 530 bool clear_semaphore_once;
531 struct e1000_sfp_flags eth_flags;
532 bool module_plugged;
531}; 533};
532 534
533struct e1000_hw { 535struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index bfc08e05c907..5caa332e7556 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -82,11 +82,11 @@ enum E1000_INVM_STRUCTURE_TYPE {
82#define E1000_INVM_MAJOR_SHIFT 4 82#define E1000_INVM_MAJOR_SHIFT 4
83 83
84#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ 84#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
85 (ID_LED_OFF1_OFF2 << 4) | \ 85 (ID_LED_DEF1_DEF2 << 4) | \
86 (ID_LED_DEF1_DEF2)) 86 (ID_LED_OFF1_OFF2))
87#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ 87#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
88 (ID_LED_DEF1_DEF2 << 4) | \ 88 (ID_LED_DEF1_DEF2 << 4) | \
89 (ID_LED_DEF1_DEF2)) 89 (ID_LED_OFF1_ON2))
90 90
91/* NVM offset defaults for i211 device */ 91/* NVM offset defaults for i211 device */
92#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 92#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 2559d70a2321..bab556a47fcc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1332,7 +1332,13 @@ s32 igb_id_led_init(struct e1000_hw *hw)
1332 u16 data, i, temp; 1332 u16 data, i, temp;
1333 const u16 led_mask = 0x0F; 1333 const u16 led_mask = 0x0F;
1334 1334
1335 ret_val = igb_valid_led_default(hw, &data); 1335 /* i210 and i211 devices have different LED mechanism */
1336 if ((hw->mac.type == e1000_i210) ||
1337 (hw->mac.type == e1000_i211))
1338 ret_val = igb_valid_led_default_i210(hw, &data);
1339 else
1340 ret_val = igb_valid_led_default(hw, &data);
1341
1336 if (ret_val) 1342 if (ret_val)
1337 goto out; 1343 goto out;
1338 1344
@@ -1406,15 +1412,34 @@ s32 igb_blink_led(struct e1000_hw *hw)
1406 u32 ledctl_blink = 0; 1412 u32 ledctl_blink = 0;
1407 u32 i; 1413 u32 i;
1408 1414
1409 /* set the blink bit for each LED that's "on" (0x0E) 1415 if (hw->phy.media_type == e1000_media_type_fiber) {
1410 * in ledctl_mode2 1416 /* always blink LED0 for PCI-E fiber */
1411 */ 1417 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1412 ledctl_blink = hw->mac.ledctl_mode2; 1418 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1413 for (i = 0; i < 4; i++) 1419 } else {
1414 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == 1420 /* Set the blink bit for each LED that's "on" (0x0E)
1415 E1000_LEDCTL_MODE_LED_ON) 1421 * (or "off" if inverted) in ledctl_mode2. The blink
1416 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << 1422 * logic in hardware only works when mode is set to "on"
1417 (i * 8)); 1423 * so it must be changed accordingly when the mode is
1424 * "off" and inverted.
1425 */
1426 ledctl_blink = hw->mac.ledctl_mode2;
1427 for (i = 0; i < 32; i += 8) {
1428 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1429 E1000_LEDCTL_LED0_MODE_MASK;
1430 u32 led_default = hw->mac.ledctl_default >> i;
1431
1432 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1433 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1434 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1435 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1436 ledctl_blink &=
1437 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1438 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1439 E1000_LEDCTL_MODE_LED_ON) << i;
1440 }
1441 }
1442 }
1418 1443
1419 wr32(E1000_LEDCTL, ledctl_blink); 1444 wr32(E1000_LEDCTL, ledctl_blink);
1420 1445
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 115b0da6e013..60461946f98c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -341,6 +341,130 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
341} 341}
342 342
343/** 343/**
344 * igb_read_sfp_data_byte - Reads SFP module data.
345 * @hw: pointer to the HW structure
346 * @offset: byte location offset to be read
347 * @data: read data buffer pointer
348 *
349 * Reads one byte from SFP module data stored
350 * in SFP resided EEPROM memory or SFP diagnostic area.
351 * Function should be called with
352 * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
353 * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
354 * access
355 **/
356s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
357{
358 u32 i = 0;
359 u32 i2ccmd = 0;
360 u32 data_local = 0;
361
362 if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
363 hw_dbg("I2CCMD command address exceeds upper limit\n");
364 return -E1000_ERR_PHY;
365 }
366
367 /* Set up Op-code, EEPROM Address,in the I2CCMD
368 * register. The MAC will take care of interfacing with the
369 * EEPROM to retrieve the desired data.
370 */
371 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
372 E1000_I2CCMD_OPCODE_READ);
373
374 wr32(E1000_I2CCMD, i2ccmd);
375
376 /* Poll the ready bit to see if the I2C read completed */
377 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
378 udelay(50);
379 data_local = rd32(E1000_I2CCMD);
380 if (data_local & E1000_I2CCMD_READY)
381 break;
382 }
383 if (!(data_local & E1000_I2CCMD_READY)) {
384 hw_dbg("I2CCMD Read did not complete\n");
385 return -E1000_ERR_PHY;
386 }
387 if (data_local & E1000_I2CCMD_ERROR) {
388 hw_dbg("I2CCMD Error bit set\n");
389 return -E1000_ERR_PHY;
390 }
391 *data = (u8) data_local & 0xFF;
392
393 return 0;
394}
395
396/**
397 * e1000_write_sfp_data_byte - Writes SFP module data.
398 * @hw: pointer to the HW structure
399 * @offset: byte location offset to write to
400 * @data: data to write
401 *
402 * Writes one byte to SFP module data stored
403 * in SFP resided EEPROM memory or SFP diagnostic area.
404 * Function should be called with
405 * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
406 * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
407 * access
408 **/
409s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
410{
411 u32 i = 0;
412 u32 i2ccmd = 0;
413 u32 data_local = 0;
414
415 if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
416 hw_dbg("I2CCMD command address exceeds upper limit\n");
417 return -E1000_ERR_PHY;
418 }
419 /* The programming interface is 16 bits wide
420 * so we need to read the whole word first
421 * then update appropriate byte lane and write
422 * the updated word back.
423 */
424 /* Set up Op-code, EEPROM Address,in the I2CCMD
425 * register. The MAC will take care of interfacing
426 * with an EEPROM to write the data given.
427 */
428 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
429 E1000_I2CCMD_OPCODE_READ);
430 /* Set a command to read single word */
431 wr32(E1000_I2CCMD, i2ccmd);
432 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
433 udelay(50);
434 /* Poll the ready bit to see if lastly
435 * launched I2C operation completed
436 */
437 i2ccmd = rd32(E1000_I2CCMD);
438 if (i2ccmd & E1000_I2CCMD_READY) {
439 /* Check if this is READ or WRITE phase */
440 if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
441 E1000_I2CCMD_OPCODE_READ) {
442 /* Write the selected byte
443 * lane and update whole word
444 */
445 data_local = i2ccmd & 0xFF00;
446 data_local |= data;
447 i2ccmd = ((offset <<
448 E1000_I2CCMD_REG_ADDR_SHIFT) |
449 E1000_I2CCMD_OPCODE_WRITE | data_local);
450 wr32(E1000_I2CCMD, i2ccmd);
451 } else {
452 break;
453 }
454 }
455 }
456 if (!(i2ccmd & E1000_I2CCMD_READY)) {
457 hw_dbg("I2CCMD Write did not complete\n");
458 return -E1000_ERR_PHY;
459 }
460 if (i2ccmd & E1000_I2CCMD_ERROR) {
461 hw_dbg("I2CCMD Error bit set\n");
462 return -E1000_ERR_PHY;
463 }
464 return 0;
465}
466
467/**
344 * igb_read_phy_reg_igp - Read igp PHY register 468 * igb_read_phy_reg_igp - Read igp PHY register
345 * @hw: pointer to the HW structure 469 * @hw: pointer to the HW structure
346 * @offset: register offset to be read 470 * @offset: register offset to be read
@@ -2014,7 +2138,7 @@ out:
2014 * Verify the reset block is not blocking us from resetting. Acquire 2138 * Verify the reset block is not blocking us from resetting. Acquire
2015 * semaphore (if necessary) and read/set/write the device control reset 2139 * semaphore (if necessary) and read/set/write the device control reset
2016 * bit in the PHY. Wait the appropriate delay time for the device to 2140 * bit in the PHY. Wait the appropriate delay time for the device to
2017 * reset and relase the semaphore (if necessary). 2141 * reset and release the semaphore (if necessary).
2018 **/ 2142 **/
2019s32 igb_phy_hw_reset(struct e1000_hw *hw) 2143s32 igb_phy_hw_reset(struct e1000_hw *hw)
2020{ 2144{
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 784fd1c40989..6a0873f2095a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -69,6 +69,8 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
69s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); 69s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
70s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); 70s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
71s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); 71s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
72s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
73s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
72s32 igb_copper_link_setup_82580(struct e1000_hw *hw); 74s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
73s32 igb_get_phy_info_82580(struct e1000_hw *hw); 75s32 igb_get_phy_info_82580(struct e1000_hw *hw);
74s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); 76s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
@@ -157,4 +159,22 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
157#define GS40G_CS_POWER_DOWN 0x0002 159#define GS40G_CS_POWER_DOWN 0x0002
158#define GS40G_LINE_LB 0x4000 160#define GS40G_LINE_LB 0x4000
159 161
162/* SFP modules ID memory locations */
163#define E1000_SFF_IDENTIFIER_OFFSET 0x00
164#define E1000_SFF_IDENTIFIER_SFF 0x02
165#define E1000_SFF_IDENTIFIER_SFP 0x03
166
167#define E1000_SFF_ETH_FLAGS_OFFSET 0x06
168/* Flags for SFP modules compatible with ETH up to 1Gb */
169struct e1000_sfp_flags {
170 u8 e1000_base_sx:1;
171 u8 e1000_base_lx:1;
172 u8 e1000_base_cx:1;
173 u8 e1000_base_t:1;
174 u8 e100_base_lx:1;
175 u8 e100_base_fx:1;
176 u8 e10_base_bx10:1;
177 u8 e10_base_px:1;
178};
179
160#endif 180#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9d6c075e232d..15ea8dc9dad3 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -322,11 +322,6 @@ static inline int igb_desc_unused(struct igb_ring *ring)
322 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 322 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
323} 323}
324 324
325struct igb_i2c_client_list {
326 struct i2c_client *client;
327 struct igb_i2c_client_list *next;
328};
329
330#ifdef CONFIG_IGB_HWMON 325#ifdef CONFIG_IGB_HWMON
331 326
332#define IGB_HWMON_TYPE_LOC 0 327#define IGB_HWMON_TYPE_LOC 0
@@ -514,13 +509,18 @@ extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
514extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, 509extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
515 unsigned char *va, 510 unsigned char *va,
516 struct sk_buff *skb); 511 struct sk_buff *skb);
517static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, 512static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
518 union e1000_adv_rx_desc *rx_desc, 513 union e1000_adv_rx_desc *rx_desc,
519 struct sk_buff *skb) 514 struct sk_buff *skb)
520{ 515{
521 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 516 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
522 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 517 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
523 igb_ptp_rx_rgtstamp(q_vector, skb); 518 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
519
520 /* Update the last_rx_timestamp timer in order to enable watchdog check
521 * for error case of latched timestamp on a dropped packet.
522 */
523 rx_ring->last_rx_timestamp = jiffies;
524} 524}
525 525
526extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 526extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7876240fa74e..85fe7b52f435 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -142,6 +142,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
142{ 142{
143 struct igb_adapter *adapter = netdev_priv(netdev); 143 struct igb_adapter *adapter = netdev_priv(netdev);
144 struct e1000_hw *hw = &adapter->hw; 144 struct e1000_hw *hw = &adapter->hw;
145 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
146 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
145 u32 status; 147 u32 status;
146 148
147 if (hw->phy.media_type == e1000_media_type_copper) { 149 if (hw->phy.media_type == e1000_media_type_copper) {
@@ -162,49 +164,26 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
162 ecmd->advertising |= hw->phy.autoneg_advertised; 164 ecmd->advertising |= hw->phy.autoneg_advertised;
163 } 165 }
164 166
165 if (hw->mac.autoneg != 1)
166 ecmd->advertising &= ~(ADVERTISED_Pause |
167 ADVERTISED_Asym_Pause);
168
169 if (hw->fc.requested_mode == e1000_fc_full)
170 ecmd->advertising |= ADVERTISED_Pause;
171 else if (hw->fc.requested_mode == e1000_fc_rx_pause)
172 ecmd->advertising |= (ADVERTISED_Pause |
173 ADVERTISED_Asym_Pause);
174 else if (hw->fc.requested_mode == e1000_fc_tx_pause)
175 ecmd->advertising |= ADVERTISED_Asym_Pause;
176 else
177 ecmd->advertising &= ~(ADVERTISED_Pause |
178 ADVERTISED_Asym_Pause);
179
180 ecmd->port = PORT_TP; 167 ecmd->port = PORT_TP;
181 ecmd->phy_address = hw->phy.addr; 168 ecmd->phy_address = hw->phy.addr;
182 ecmd->transceiver = XCVR_INTERNAL; 169 ecmd->transceiver = XCVR_INTERNAL;
183 } else { 170 } else {
184 ecmd->supported = (SUPPORTED_1000baseT_Full | 171 ecmd->supported = (SUPPORTED_FIBRE |
185 SUPPORTED_100baseT_Full |
186 SUPPORTED_FIBRE |
187 SUPPORTED_Autoneg | 172 SUPPORTED_Autoneg |
188 SUPPORTED_Pause); 173 SUPPORTED_Pause);
189 if (hw->mac.type == e1000_i354)
190 ecmd->supported |= SUPPORTED_2500baseX_Full;
191
192 ecmd->advertising = ADVERTISED_FIBRE; 174 ecmd->advertising = ADVERTISED_FIBRE;
193 175 if (hw->mac.type == e1000_i354) {
194 switch (adapter->link_speed) { 176 ecmd->supported |= SUPPORTED_2500baseX_Full;
195 case SPEED_2500: 177 ecmd->advertising |= ADVERTISED_2500baseX_Full;
196 ecmd->advertising = ADVERTISED_2500baseX_Full; 178 }
197 break; 179 if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
198 case SPEED_1000: 180 ecmd->supported |= SUPPORTED_1000baseT_Full;
199 ecmd->advertising = ADVERTISED_1000baseT_Full; 181 ecmd->advertising |= ADVERTISED_1000baseT_Full;
200 break; 182 }
201 case SPEED_100: 183 if (eth_flags->e100_base_fx) {
202 ecmd->advertising = ADVERTISED_100baseT_Full; 184 ecmd->supported |= SUPPORTED_100baseT_Full;
203 break; 185 ecmd->advertising |= ADVERTISED_100baseT_Full;
204 default:
205 break;
206 } 186 }
207
208 if (hw->mac.autoneg == 1) 187 if (hw->mac.autoneg == 1)
209 ecmd->advertising |= ADVERTISED_Autoneg; 188 ecmd->advertising |= ADVERTISED_Autoneg;
210 189
@@ -212,6 +191,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
212 ecmd->transceiver = XCVR_EXTERNAL; 191 ecmd->transceiver = XCVR_EXTERNAL;
213 } 192 }
214 193
194 if (hw->mac.autoneg != 1)
195 ecmd->advertising &= ~(ADVERTISED_Pause |
196 ADVERTISED_Asym_Pause);
197
198 if (hw->fc.requested_mode == e1000_fc_full)
199 ecmd->advertising |= ADVERTISED_Pause;
200 else if (hw->fc.requested_mode == e1000_fc_rx_pause)
201 ecmd->advertising |= (ADVERTISED_Pause |
202 ADVERTISED_Asym_Pause);
203 else if (hw->fc.requested_mode == e1000_fc_tx_pause)
204 ecmd->advertising |= ADVERTISED_Asym_Pause;
205 else
206 ecmd->advertising &= ~(ADVERTISED_Pause |
207 ADVERTISED_Asym_Pause);
208
215 status = rd32(E1000_STATUS); 209 status = rd32(E1000_STATUS);
216 210
217 if (status & E1000_STATUS_LU) { 211 if (status & E1000_STATUS_LU) {
@@ -392,6 +386,10 @@ static int igb_set_pauseparam(struct net_device *netdev,
392 struct e1000_hw *hw = &adapter->hw; 386 struct e1000_hw *hw = &adapter->hw;
393 int retval = 0; 387 int retval = 0;
394 388
389 /* 100basefx does not support setting link flow control */
390 if (hw->dev_spec._82575.eth_flags.e100_base_fx)
391 return -EINVAL;
392
395 adapter->fc_autoneg = pause->autoneg; 393 adapter->fc_autoneg = pause->autoneg;
396 394
397 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 395 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
@@ -813,10 +811,8 @@ static int igb_set_eeprom(struct net_device *netdev,
813 ret_val = hw->nvm.ops.write(hw, first_word, 811 ret_val = hw->nvm.ops.write(hw, first_word,
814 last_word - first_word + 1, eeprom_buff); 812 last_word - first_word + 1, eeprom_buff);
815 813
816 /* Update the checksum over the first part of the EEPROM if needed 814 /* Update the checksum if nvm write succeeded */
817 * and flush shadow RAM for 82573 controllers 815 if (ret_val == 0)
818 */
819 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
820 hw->nvm.ops.update(hw); 816 hw->nvm.ops.update(hw);
821 817
822 igb_set_fw_version(adapter); 818 igb_set_fw_version(adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 64cbe0dfe043..6a0c1b66ce54 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1667,10 +1667,13 @@ void igb_down(struct igb_adapter *adapter)
1667 wrfl(); 1667 wrfl();
1668 msleep(10); 1668 msleep(10);
1669 1669
1670 for (i = 0; i < adapter->num_q_vectors; i++) 1670 igb_irq_disable(adapter);
1671
1672 for (i = 0; i < adapter->num_q_vectors; i++) {
1673 napi_synchronize(&(adapter->q_vector[i]->napi));
1671 napi_disable(&(adapter->q_vector[i]->napi)); 1674 napi_disable(&(adapter->q_vector[i]->napi));
1675 }
1672 1676
1673 igb_irq_disable(adapter);
1674 1677
1675 del_timer_sync(&adapter->watchdog_timer); 1678 del_timer_sync(&adapter->watchdog_timer);
1676 del_timer_sync(&adapter->phy_info_timer); 1679 del_timer_sync(&adapter->phy_info_timer);
@@ -6622,7 +6625,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
6622 6625
6623 igb_rx_checksum(rx_ring, rx_desc, skb); 6626 igb_rx_checksum(rx_ring, rx_desc, skb);
6624 6627
6625 igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); 6628 igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
6626 6629
6627 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6630 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
6628 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6631 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ca932387a80f..fb098b46c6a6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -52,6 +52,11 @@
52#include <linux/dca.h> 52#include <linux/dca.h>
53#endif 53#endif
54 54
55#include <net/ll_poll.h>
56
57#ifdef CONFIG_NET_LL_RX_POLL
58#define LL_EXTENDED_STATS
59#endif
55/* common prefix used by pr_<> macros */ 60/* common prefix used by pr_<> macros */
56#undef pr_fmt 61#undef pr_fmt
57#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 62#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -182,6 +187,11 @@ struct ixgbe_rx_buffer {
182struct ixgbe_queue_stats { 187struct ixgbe_queue_stats {
183 u64 packets; 188 u64 packets;
184 u64 bytes; 189 u64 bytes;
190#ifdef LL_EXTENDED_STATS
191 u64 yields;
192 u64 misses;
193 u64 cleaned;
194#endif /* LL_EXTENDED_STATS */
185}; 195};
186 196
187struct ixgbe_tx_queue_stats { 197struct ixgbe_tx_queue_stats {
@@ -356,9 +366,133 @@ struct ixgbe_q_vector {
356 struct rcu_head rcu; /* to avoid race with update stats on free */ 366 struct rcu_head rcu; /* to avoid race with update stats on free */
357 char name[IFNAMSIZ + 9]; 367 char name[IFNAMSIZ + 9];
358 368
369#ifdef CONFIG_NET_LL_RX_POLL
370 unsigned int state;
371#define IXGBE_QV_STATE_IDLE 0
372#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
373#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
374#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
375#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */
376#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */
377#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
378#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
379 spinlock_t lock;
380#endif /* CONFIG_NET_LL_RX_POLL */
381
359 /* for dynamic allocation of rings associated with this q_vector */ 382 /* for dynamic allocation of rings associated with this q_vector */
360 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 383 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
361}; 384};
385#ifdef CONFIG_NET_LL_RX_POLL
386static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
387{
388
389 spin_lock_init(&q_vector->lock);
390 q_vector->state = IXGBE_QV_STATE_IDLE;
391}
392
393/* called from the device poll routine to get ownership of a q_vector */
394static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
395{
396 int rc = true;
397 spin_lock(&q_vector->lock);
398 if (q_vector->state & IXGBE_QV_LOCKED) {
399 WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
400 q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
401 rc = false;
402#ifdef LL_EXTENDED_STATS
403 q_vector->tx.ring->stats.yields++;
404#endif
405 } else
406 /* we don't care if someone yielded */
407 q_vector->state = IXGBE_QV_STATE_NAPI;
408 spin_unlock(&q_vector->lock);
409 return rc;
410}
411
412/* returns true is someone tried to get the qv while napi had it */
413static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
414{
415 int rc = false;
416 spin_lock(&q_vector->lock);
417 WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
418 IXGBE_QV_STATE_NAPI_YIELD));
419
420 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
421 rc = true;
422 q_vector->state = IXGBE_QV_STATE_IDLE;
423 spin_unlock(&q_vector->lock);
424 return rc;
425}
426
427/* called from ixgbe_low_latency_poll() */
428static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
429{
430 int rc = true;
431 spin_lock_bh(&q_vector->lock);
432 if ((q_vector->state & IXGBE_QV_LOCKED)) {
433 q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
434 rc = false;
435#ifdef LL_EXTENDED_STATS
436 q_vector->rx.ring->stats.yields++;
437#endif
438 } else
439 /* preserve yield marks */
440 q_vector->state |= IXGBE_QV_STATE_POLL;
441 spin_unlock_bh(&q_vector->lock);
442 return rc;
443}
444
445/* returns true if someone tried to get the qv while it was locked */
446static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
447{
448 int rc = false;
449 spin_lock_bh(&q_vector->lock);
450 WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI));
451
452 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
453 rc = true;
454 q_vector->state = IXGBE_QV_STATE_IDLE;
455 spin_unlock_bh(&q_vector->lock);
456 return rc;
457}
458
459/* true if a socket is polling, even if it did not get the lock */
460static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
461{
462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
463 return q_vector->state & IXGBE_QV_USER_PEND;
464}
465#else /* CONFIG_NET_LL_RX_POLL */
466static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
467{
468}
469
470static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
471{
472 return true;
473}
474
475static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
476{
477 return false;
478}
479
480static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
481{
482 return false;
483}
484
485static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
486{
487 return false;
488}
489
490static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
491{
492 return false;
493}
494#endif /* CONFIG_NET_LL_RX_POLL */
495
362#ifdef CONFIG_IXGBE_HWMON 496#ifdef CONFIG_IXGBE_HWMON
363 497
364#define IXGBE_HWMON_TYPE_LOC 0 498#define IXGBE_HWMON_TYPE_LOC 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 1f2c805684dd..e055e000131b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -380,3 +380,26 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
380 } 380 }
381 return 0; 381 return 0;
382} 382}
383
384static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
385{
386 u32 reg, i;
387
388 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
389 for (i = 0; i < MAX_USER_PRIORITY; i++)
390 map[i] = IXGBE_RTRUP2TC_UP_MASK &
391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
392 return;
393}
394
395void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
396{
397 switch (hw->mac.type) {
398 case ixgbe_mac_82599EB:
399 case ixgbe_mac_X540:
400 ixgbe_dcb_read_rtrup2tc_82599(hw, map);
401 break;
402 default:
403 break;
404 }
405}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 1634de8b627f..fc0a2dd52499 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -159,6 +159,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
159s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); 159s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
160s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 160s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
161 161
162void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
163
162/* DCB definitions for credit calculation */ 164/* DCB definitions for credit calculation */
163#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ 165#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
164#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ 166#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index a4ef07631d1e..d71d9ce3e394 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -45,6 +45,7 @@
45 45
46/* Receive UP2TC mapping */ 46/* Receive UP2TC mapping */
47#define IXGBE_RTRUP2TC_UP_SHIFT 3 47#define IXGBE_RTRUP2TC_UP_SHIFT 3
48#define IXGBE_RTRUP2TC_UP_MASK 7
48/* Transmit UP2TC mapping */ 49/* Transmit UP2TC mapping */
49#define IXGBE_RTTUP2TC_UP_SHIFT 3 50#define IXGBE_RTTUP2TC_UP_SHIFT 3
50 51
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f3d68f9696ba..edd89a1ef27f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -554,6 +554,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
554 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 554 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
555 adapter->ixgbe_ieee_ets->prio_tc[i] = 555 adapter->ixgbe_ieee_ets->prio_tc[i] =
556 IEEE_8021QAZ_MAX_TCS; 556 IEEE_8021QAZ_MAX_TCS;
557 /* if possible update UP2TC mappings from HW */
558 ixgbe_dcb_read_rtrup2tc(&adapter->hw,
559 adapter->ixgbe_ieee_ets->prio_tc);
557 } 560 }
558 561
559 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 562 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index d3754722adb4..24e2e7aafda2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1054,6 +1054,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1054 data[i] = 0; 1054 data[i] = 0;
1055 data[i+1] = 0; 1055 data[i+1] = 0;
1056 i += 2; 1056 i += 2;
1057#ifdef LL_EXTENDED_STATS
1058 data[i] = 0;
1059 data[i+1] = 0;
1060 data[i+2] = 0;
1061 i += 3;
1062#endif
1057 continue; 1063 continue;
1058 } 1064 }
1059 1065
@@ -1063,6 +1069,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1063 data[i+1] = ring->stats.bytes; 1069 data[i+1] = ring->stats.bytes;
1064 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1070 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1065 i += 2; 1071 i += 2;
1072#ifdef LL_EXTENDED_STATS
1073 data[i] = ring->stats.yields;
1074 data[i+1] = ring->stats.misses;
1075 data[i+2] = ring->stats.cleaned;
1076 i += 3;
1077#endif
1066 } 1078 }
1067 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1079 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1068 ring = adapter->rx_ring[j]; 1080 ring = adapter->rx_ring[j];
@@ -1070,6 +1082,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1070 data[i] = 0; 1082 data[i] = 0;
1071 data[i+1] = 0; 1083 data[i+1] = 0;
1072 i += 2; 1084 i += 2;
1085#ifdef LL_EXTENDED_STATS
1086 data[i] = 0;
1087 data[i+1] = 0;
1088 data[i+2] = 0;
1089 i += 3;
1090#endif
1073 continue; 1091 continue;
1074 } 1092 }
1075 1093
@@ -1079,6 +1097,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1079 data[i+1] = ring->stats.bytes; 1097 data[i+1] = ring->stats.bytes;
1080 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1098 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1081 i += 2; 1099 i += 2;
1100#ifdef LL_EXTENDED_STATS
1101 data[i] = ring->stats.yields;
1102 data[i+1] = ring->stats.misses;
1103 data[i+2] = ring->stats.cleaned;
1104 i += 3;
1105#endif
1082 } 1106 }
1083 1107
1084 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1108 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
@@ -1115,12 +1139,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1115 p += ETH_GSTRING_LEN; 1139 p += ETH_GSTRING_LEN;
1116 sprintf(p, "tx_queue_%u_bytes", i); 1140 sprintf(p, "tx_queue_%u_bytes", i);
1117 p += ETH_GSTRING_LEN; 1141 p += ETH_GSTRING_LEN;
1142#ifdef LL_EXTENDED_STATS
1143 sprintf(p, "tx_q_%u_napi_yield", i);
1144 p += ETH_GSTRING_LEN;
1145 sprintf(p, "tx_q_%u_misses", i);
1146 p += ETH_GSTRING_LEN;
1147 sprintf(p, "tx_q_%u_cleaned", i);
1148 p += ETH_GSTRING_LEN;
1149#endif /* LL_EXTENDED_STATS */
1118 } 1150 }
1119 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { 1151 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1120 sprintf(p, "rx_queue_%u_packets", i); 1152 sprintf(p, "rx_queue_%u_packets", i);
1121 p += ETH_GSTRING_LEN; 1153 p += ETH_GSTRING_LEN;
1122 sprintf(p, "rx_queue_%u_bytes", i); 1154 sprintf(p, "rx_queue_%u_bytes", i);
1123 p += ETH_GSTRING_LEN; 1155 p += ETH_GSTRING_LEN;
1156#ifdef LL_EXTENDED_STATS
1157 sprintf(p, "rx_q_%u_ll_poll_yield", i);
1158 p += ETH_GSTRING_LEN;
1159 sprintf(p, "rx_q_%u_misses", i);
1160 p += ETH_GSTRING_LEN;
1161 sprintf(p, "rx_q_%u_cleaned", i);
1162 p += ETH_GSTRING_LEN;
1163#endif /* LL_EXTENDED_STATS */
1124 } 1164 }
1125 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1165 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1126 sprintf(p, "tx_pb_%u_pxon", i); 1166 sprintf(p, "tx_pb_%u_pxon", i);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ef5f7a678ce1..90b4e1089ecc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -811,6 +811,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
811 /* initialize NAPI */ 811 /* initialize NAPI */
812 netif_napi_add(adapter->netdev, &q_vector->napi, 812 netif_napi_add(adapter->netdev, &q_vector->napi,
813 ixgbe_poll, 64); 813 ixgbe_poll, 64);
814 napi_hash_add(&q_vector->napi);
814 815
815 /* tie q_vector and adapter together */ 816 /* tie q_vector and adapter together */
816 adapter->q_vector[v_idx] = q_vector; 817 adapter->q_vector[v_idx] = q_vector;
@@ -931,6 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
931 adapter->rx_ring[ring->queue_index] = NULL; 932 adapter->rx_ring[ring->queue_index] = NULL;
932 933
933 adapter->q_vector[v_idx] = NULL; 934 adapter->q_vector[v_idx] = NULL;
935 napi_hash_del(&q_vector->napi);
934 netif_napi_del(&q_vector->napi); 936 netif_napi_del(&q_vector->napi);
935 937
936 /* 938 /*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d30fbdd81fca..047ebaaf0141 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1504,7 +1504,9 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1504{ 1504{
1505 struct ixgbe_adapter *adapter = q_vector->adapter; 1505 struct ixgbe_adapter *adapter = q_vector->adapter;
1506 1506
1507 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 1507 if (ixgbe_qv_ll_polling(q_vector))
1508 netif_receive_skb(skb);
1509 else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1508 napi_gro_receive(&q_vector->napi, skb); 1510 napi_gro_receive(&q_vector->napi, skb);
1509 else 1511 else
1510 netif_rx(skb); 1512 netif_rx(skb);
@@ -1892,9 +1894,9 @@ dma_sync:
1892 * expensive overhead for IOMMU access this provides a means of avoiding 1894 * expensive overhead for IOMMU access this provides a means of avoiding
1893 * it by maintaining the mapping of the page to the syste. 1895 * it by maintaining the mapping of the page to the syste.
1894 * 1896 *
1895 * Returns true if all work is completed without reaching budget 1897 * Returns amount of work completed
1896 **/ 1898 **/
1897static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1899static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1898 struct ixgbe_ring *rx_ring, 1900 struct ixgbe_ring *rx_ring,
1899 const int budget) 1901 const int budget)
1900{ 1902{
@@ -1976,6 +1978,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1976 } 1978 }
1977 1979
1978#endif /* IXGBE_FCOE */ 1980#endif /* IXGBE_FCOE */
1981 skb_mark_ll(skb, &q_vector->napi);
1979 ixgbe_rx_skb(q_vector, skb); 1982 ixgbe_rx_skb(q_vector, skb);
1980 1983
1981 /* update budget accounting */ 1984 /* update budget accounting */
@@ -1992,9 +1995,43 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1992 if (cleaned_count) 1995 if (cleaned_count)
1993 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 1996 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1994 1997
1995 return (total_rx_packets < budget); 1998 return total_rx_packets;
1996} 1999}
1997 2000
2001#ifdef CONFIG_NET_LL_RX_POLL
2002/* must be called with local_bh_disable()d */
2003static int ixgbe_low_latency_recv(struct napi_struct *napi)
2004{
2005 struct ixgbe_q_vector *q_vector =
2006 container_of(napi, struct ixgbe_q_vector, napi);
2007 struct ixgbe_adapter *adapter = q_vector->adapter;
2008 struct ixgbe_ring *ring;
2009 int found = 0;
2010
2011 if (test_bit(__IXGBE_DOWN, &adapter->state))
2012 return LL_FLUSH_FAILED;
2013
2014 if (!ixgbe_qv_lock_poll(q_vector))
2015 return LL_FLUSH_BUSY;
2016
2017 ixgbe_for_each_ring(ring, q_vector->rx) {
2018 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2019#ifdef LL_EXTENDED_STATS
2020 if (found)
2021 ring->stats.cleaned += found;
2022 else
2023 ring->stats.misses++;
2024#endif
2025 if (found)
2026 break;
2027 }
2028
2029 ixgbe_qv_unlock_poll(q_vector);
2030
2031 return found;
2032}
2033#endif /* CONFIG_NET_LL_RX_POLL */
2034
1998/** 2035/**
1999 * ixgbe_configure_msix - Configure MSI-X hardware 2036 * ixgbe_configure_msix - Configure MSI-X hardware
2000 * @adapter: board private structure 2037 * @adapter: board private structure
@@ -2550,6 +2587,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2550 ixgbe_for_each_ring(ring, q_vector->tx) 2587 ixgbe_for_each_ring(ring, q_vector->tx)
2551 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); 2588 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2552 2589
2590 if (!ixgbe_qv_lock_napi(q_vector))
2591 return budget;
2592
2553 /* attempt to distribute budget to each queue fairly, but don't allow 2593 /* attempt to distribute budget to each queue fairly, but don't allow
2554 * the budget to go below 1 because we'll exit polling */ 2594 * the budget to go below 1 because we'll exit polling */
2555 if (q_vector->rx.count > 1) 2595 if (q_vector->rx.count > 1)
@@ -2558,9 +2598,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2558 per_ring_budget = budget; 2598 per_ring_budget = budget;
2559 2599
2560 ixgbe_for_each_ring(ring, q_vector->rx) 2600 ixgbe_for_each_ring(ring, q_vector->rx)
2561 clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, 2601 clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
2562 per_ring_budget); 2602 per_ring_budget) < per_ring_budget);
2563 2603
2604 ixgbe_qv_unlock_napi(q_vector);
2564 /* If all work not completed, return budget and keep polling */ 2605 /* If all work not completed, return budget and keep polling */
2565 if (!clean_complete) 2606 if (!clean_complete)
2566 return budget; 2607 return budget;
@@ -3747,16 +3788,25 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3747{ 3788{
3748 int q_idx; 3789 int q_idx;
3749 3790
3750 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) 3791 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3792 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
3751 napi_enable(&adapter->q_vector[q_idx]->napi); 3793 napi_enable(&adapter->q_vector[q_idx]->napi);
3794 }
3752} 3795}
3753 3796
3754static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) 3797static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3755{ 3798{
3756 int q_idx; 3799 int q_idx;
3757 3800
3758 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) 3801 local_bh_disable(); /* for ixgbe_qv_lock_napi() */
3802 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3759 napi_disable(&adapter->q_vector[q_idx]->napi); 3803 napi_disable(&adapter->q_vector[q_idx]->napi);
3804 while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
3805 pr_info("QV %d locked\n", q_idx);
3806 mdelay(1);
3807 }
3808 }
3809 local_bh_enable();
3760} 3810}
3761 3811
3762#ifdef CONFIG_IXGBE_DCB 3812#ifdef CONFIG_IXGBE_DCB
@@ -7177,6 +7227,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7177#ifdef CONFIG_NET_POLL_CONTROLLER 7227#ifdef CONFIG_NET_POLL_CONTROLLER
7178 .ndo_poll_controller = ixgbe_netpoll, 7228 .ndo_poll_controller = ixgbe_netpoll,
7179#endif 7229#endif
7230#ifdef CONFIG_NET_LL_RX_POLL
7231 .ndo_ll_poll = ixgbe_low_latency_recv,
7232#endif
7180#ifdef IXGBE_FCOE 7233#ifdef IXGBE_FCOE
7181 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 7234 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7182 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, 7235 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 070a6f1a0577..7fbe6abf6054 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -3148,7 +3148,6 @@ jme_init_one(struct pci_dev *pdev,
3148 jme->mii_if.mdio_write = jme_mdio_write; 3148 jme->mii_if.mdio_write = jme_mdio_write;
3149 3149
3150 jme_clear_pm(jme); 3150 jme_clear_pm(jme);
3151 pci_set_power_state(jme->pdev, PCI_D0);
3152 device_set_wakeup_enable(&pdev->dev, true); 3151 device_set_wakeup_enable(&pdev->dev, true);
3153 3152
3154 jme_set_phyfifo_5level(jme); 3153 jme_set_phyfifo_5level(jme);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 5409fe876a44..270e65f21102 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -483,7 +483,6 @@ static void korina_multicast_list(struct net_device *dev)
483 unsigned long flags; 483 unsigned long flags;
484 struct netdev_hw_addr *ha; 484 struct netdev_hw_addr *ha;
485 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ 485 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
486 int i;
487 486
488 /* Set promiscuous mode */ 487 /* Set promiscuous mode */
489 if (dev->flags & IFF_PROMISC) 488 if (dev->flags & IFF_PROMISC)
@@ -495,12 +494,9 @@ static void korina_multicast_list(struct net_device *dev)
495 494
496 /* Build the hash table */ 495 /* Build the hash table */
497 if (netdev_mc_count(dev) > 4) { 496 if (netdev_mc_count(dev) > 4) {
498 u16 hash_table[4]; 497 u16 hash_table[4] = { 0 };
499 u32 crc; 498 u32 crc;
500 499
501 for (i = 0; i < 4; i++)
502 hash_table[i] = 0;
503
504 netdev_for_each_mc_addr(ha, dev) { 500 netdev_for_each_mc_addr(ha, dev) {
505 crc = ether_crc_le(6, ha->addr); 501 crc = ether_crc_le(6, ha->addr);
506 crc >>= 26; 502 crc >>= 26;
@@ -1214,7 +1210,6 @@ static int korina_remove(struct platform_device *pdev)
1214 iounmap(lp->rx_dma_regs); 1210 iounmap(lp->rx_dma_regs);
1215 iounmap(lp->tx_dma_regs); 1211 iounmap(lp->tx_dma_regs);
1216 1212
1217 platform_set_drvdata(pdev, NULL);
1218 unregister_netdev(bif->dev); 1213 unregister_netdev(bif->dev);
1219 free_netdev(bif->dev); 1214 free_netdev(bif->dev);
1220 1215
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d1cbfb12c1ca..c35db735958f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -60,6 +60,10 @@
60#include <linux/types.h> 60#include <linux/types.h>
61#include <linux/slab.h> 61#include <linux/slab.h>
62#include <linux/clk.h> 62#include <linux/clk.h>
63#include <linux/of.h>
64#include <linux/of_irq.h>
65#include <linux/of_net.h>
66#include <linux/of_mdio.h>
63 67
64static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 68static char mv643xx_eth_driver_name[] = "mv643xx_eth";
65static char mv643xx_eth_driver_version[] = "1.4"; 69static char mv643xx_eth_driver_version[] = "1.4";
@@ -115,6 +119,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
115#define LINK_UP 0x00000002 119#define LINK_UP 0x00000002
116#define TXQ_COMMAND 0x0048 120#define TXQ_COMMAND 0x0048
117#define TXQ_FIX_PRIO_CONF 0x004c 121#define TXQ_FIX_PRIO_CONF 0x004c
122#define PORT_SERIAL_CONTROL1 0x004c
123#define CLK125_BYPASS_EN 0x00000010
118#define TX_BW_RATE 0x0050 124#define TX_BW_RATE 0x0050
119#define TX_BW_MTU 0x0058 125#define TX_BW_MTU 0x0058
120#define TX_BW_BURST 0x005c 126#define TX_BW_BURST 0x005c
@@ -615,7 +621,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
615 621
616 rx_desc = rxq->rx_desc_area + rx; 622 rx_desc = rxq->rx_desc_area + rx;
617 623
618 size = skb->end - skb->data; 624 size = skb_end_pointer(skb) - skb->data;
619 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 625 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
620 skb->data, size, 626 skb->data, size,
621 DMA_FROM_DEVICE); 627 DMA_FROM_DEVICE);
@@ -2450,13 +2456,159 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2450 } 2456 }
2451} 2457}
2452 2458
2459#if defined(CONFIG_OF)
2460static const struct of_device_id mv643xx_eth_shared_ids[] = {
2461 { .compatible = "marvell,orion-eth", },
2462 { .compatible = "marvell,kirkwood-eth", },
2463 { }
2464};
2465MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
2466#endif
2467
2468#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
2469#define mv643xx_eth_property(_np, _name, _v) \
2470 do { \
2471 u32 tmp; \
2472 if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
2473 _v = tmp; \
2474 } while (0)
2475
2476static struct platform_device *port_platdev[3];
2477
2478static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2479 struct device_node *pnp)
2480{
2481 struct platform_device *ppdev;
2482 struct mv643xx_eth_platform_data ppd;
2483 struct resource res;
2484 const char *mac_addr;
2485 int ret;
2486 int dev_num = 0;
2487
2488 memset(&ppd, 0, sizeof(ppd));
2489 ppd.shared = pdev;
2490
2491 memset(&res, 0, sizeof(res));
2492 if (!of_irq_to_resource(pnp, 0, &res)) {
2493 dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
2494 return -EINVAL;
2495 }
2496
2497 if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
2498 dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
2499 return -EINVAL;
2500 }
2501
2502 if (ppd.port_number >= 3) {
2503 dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
2504 return -EINVAL;
2505 }
2506
2507 while (dev_num < 3 && port_platdev[dev_num])
2508 dev_num++;
2509
2510 if (dev_num == 3) {
2511 dev_err(&pdev->dev, "too many ports registered\n");
2512 return -EINVAL;
2513 }
2514
2515 mac_addr = of_get_mac_address(pnp);
2516 if (mac_addr)
2517 memcpy(ppd.mac_addr, mac_addr, 6);
2518
2519 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
2520 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
2521 mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
2522 mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
2523 mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
2524 mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
2525
2526 ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
2527 if (!ppd.phy_node) {
2528 ppd.phy_addr = MV643XX_ETH_PHY_NONE;
2529 of_property_read_u32(pnp, "speed", &ppd.speed);
2530 of_property_read_u32(pnp, "duplex", &ppd.duplex);
2531 }
2532
2533 ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
2534 if (!ppdev)
2535 return -ENOMEM;
2536 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2537
2538 ret = platform_device_add_resources(ppdev, &res, 1);
2539 if (ret)
2540 goto port_err;
2541
2542 ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
2543 if (ret)
2544 goto port_err;
2545
2546 ret = platform_device_add(ppdev);
2547 if (ret)
2548 goto port_err;
2549
2550 port_platdev[dev_num] = ppdev;
2551
2552 return 0;
2553
2554port_err:
2555 platform_device_put(ppdev);
2556 return ret;
2557}
2558
2559static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2560{
2561 struct mv643xx_eth_shared_platform_data *pd;
2562 struct device_node *pnp, *np = pdev->dev.of_node;
2563 int ret;
2564
2565 /* bail out if not registered from DT */
2566 if (!np)
2567 return 0;
2568
2569 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
2570 if (!pd)
2571 return -ENOMEM;
2572 pdev->dev.platform_data = pd;
2573
2574 mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
2575
2576 for_each_available_child_of_node(np, pnp) {
2577 ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
2578 if (ret)
2579 return ret;
2580 }
2581 return 0;
2582}
2583
2584static void mv643xx_eth_shared_of_remove(void)
2585{
2586 int n;
2587
2588 for (n = 0; n < 3; n++) {
2589 platform_device_del(port_platdev[n]);
2590 port_platdev[n] = NULL;
2591 }
2592}
2593#else
2594static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2595{
2596 return 0;
2597}
2598
2599static inline void mv643xx_eth_shared_of_remove(void)
2600{
2601}
2602#endif
2603
2453static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2604static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2454{ 2605{
2455 static int mv643xx_eth_version_printed; 2606 static int mv643xx_eth_version_printed;
2456 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2607 struct mv643xx_eth_shared_platform_data *pd;
2457 struct mv643xx_eth_shared_private *msp; 2608 struct mv643xx_eth_shared_private *msp;
2458 const struct mbus_dram_target_info *dram; 2609 const struct mbus_dram_target_info *dram;
2459 struct resource *res; 2610 struct resource *res;
2611 int ret;
2460 2612
2461 if (!mv643xx_eth_version_printed++) 2613 if (!mv643xx_eth_version_printed++)
2462 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", 2614 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
@@ -2469,8 +2621,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2469 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); 2621 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
2470 if (msp == NULL) 2622 if (msp == NULL)
2471 return -ENOMEM; 2623 return -ENOMEM;
2624 platform_set_drvdata(pdev, msp);
2472 2625
2473 msp->base = ioremap(res->start, resource_size(res)); 2626 msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
2474 if (msp->base == NULL) 2627 if (msp->base == NULL)
2475 return -ENOMEM; 2628 return -ENOMEM;
2476 2629
@@ -2485,12 +2638,15 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2485 if (dram) 2638 if (dram)
2486 mv643xx_eth_conf_mbus_windows(msp, dram); 2639 mv643xx_eth_conf_mbus_windows(msp, dram);
2487 2640
2641 ret = mv643xx_eth_shared_of_probe(pdev);
2642 if (ret)
2643 return ret;
2644 pd = pdev->dev.platform_data;
2645
2488 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2646 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2489 pd->tx_csum_limit : 9 * 1024; 2647 pd->tx_csum_limit : 9 * 1024;
2490 infer_hw_params(msp); 2648 infer_hw_params(msp);
2491 2649
2492 platform_set_drvdata(pdev, msp);
2493
2494 return 0; 2650 return 0;
2495} 2651}
2496 2652
@@ -2498,10 +2654,9 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2498{ 2654{
2499 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2655 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2500 2656
2501 iounmap(msp->base); 2657 mv643xx_eth_shared_of_remove();
2502 if (!IS_ERR(msp->clk)) 2658 if (!IS_ERR(msp->clk))
2503 clk_disable_unprepare(msp->clk); 2659 clk_disable_unprepare(msp->clk);
2504
2505 return 0; 2660 return 0;
2506} 2661}
2507 2662
@@ -2511,6 +2666,7 @@ static struct platform_driver mv643xx_eth_shared_driver = {
2511 .driver = { 2666 .driver = {
2512 .name = MV643XX_ETH_SHARED_NAME, 2667 .name = MV643XX_ETH_SHARED_NAME,
2513 .owner = THIS_MODULE, 2668 .owner = THIS_MODULE,
2669 .of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
2514 }, 2670 },
2515}; 2671};
2516 2672
@@ -2701,6 +2857,15 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2701 2857
2702 mp->dev = dev; 2858 mp->dev = dev;
2703 2859
2860 /* Kirkwood resets some registers on gated clocks. Especially
2861 * CLK125_BYPASS_EN must be cleared but is not available on
2862 * all other SoCs/System Controllers using this driver.
2863 */
2864 if (of_device_is_compatible(pdev->dev.of_node,
2865 "marvell,kirkwood-eth-port"))
2866 wrlp(mp, PORT_SERIAL_CONTROL1,
2867 rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
2868
2704 /* 2869 /*
2705 * Start with a default rate, and if there is a clock, allow 2870 * Start with a default rate, and if there is a clock, allow
2706 * it to override the default. 2871 * it to override the default.
@@ -2710,23 +2875,35 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2710 if (!IS_ERR(mp->clk)) { 2875 if (!IS_ERR(mp->clk)) {
2711 clk_prepare_enable(mp->clk); 2876 clk_prepare_enable(mp->clk);
2712 mp->t_clk = clk_get_rate(mp->clk); 2877 mp->t_clk = clk_get_rate(mp->clk);
2878 } else if (!IS_ERR(mp->shared->clk)) {
2879 mp->t_clk = clk_get_rate(mp->shared->clk);
2713 } 2880 }
2714 2881
2715 set_params(mp, pd); 2882 set_params(mp, pd);
2716 netif_set_real_num_tx_queues(dev, mp->txq_count); 2883 netif_set_real_num_tx_queues(dev, mp->txq_count);
2717 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2884 netif_set_real_num_rx_queues(dev, mp->rxq_count);
2718 2885
2719 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { 2886 err = 0;
2887 if (pd->phy_node) {
2888 mp->phy = of_phy_connect(mp->dev, pd->phy_node,
2889 mv643xx_eth_adjust_link, 0,
2890 PHY_INTERFACE_MODE_GMII);
2891 if (!mp->phy)
2892 err = -ENODEV;
2893 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
2720 mp->phy = phy_scan(mp, pd->phy_addr); 2894 mp->phy = phy_scan(mp, pd->phy_addr);
2721 2895
2722 if (IS_ERR(mp->phy)) { 2896 if (IS_ERR(mp->phy))
2723 err = PTR_ERR(mp->phy); 2897 err = PTR_ERR(mp->phy);
2724 if (err == -ENODEV) 2898 else
2725 err = -EPROBE_DEFER; 2899 phy_init(mp, pd->speed, pd->duplex);
2726 goto out;
2727 }
2728 phy_init(mp, pd->speed, pd->duplex);
2729 } 2900 }
2901 if (err == -ENODEV) {
2902 err = -EPROBE_DEFER;
2903 goto out;
2904 }
2905 if (err)
2906 goto out;
2730 2907
2731 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2908 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2732 2909
@@ -2805,7 +2982,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2805 2982
2806 unregister_netdev(mp->dev); 2983 unregister_netdev(mp->dev);
2807 if (mp->phy != NULL) 2984 if (mp->phy != NULL)
2808 phy_detach(mp->phy); 2985 phy_disconnect(mp->phy);
2809 cancel_work_sync(&mp->tx_timeout_task); 2986 cancel_work_sync(&mp->tx_timeout_task);
2810 2987
2811 if (!IS_ERR(mp->clk)) 2988 if (!IS_ERR(mp->clk))
@@ -2813,8 +2990,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2813 2990
2814 free_netdev(mp->dev); 2991 free_netdev(mp->dev);
2815 2992
2816 platform_set_drvdata(pdev, NULL);
2817
2818 return 0; 2993 return 0;
2819} 2994}
2820 2995
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c96678555233..712779fb12b7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2251,6 +2251,21 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
2251 return 0; 2251 return 0;
2252} 2252}
2253 2253
2254/* Get mac address */
2255static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2256{
2257 u32 mac_addr_l, mac_addr_h;
2258
2259 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2260 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2261 addr[0] = (mac_addr_h >> 24) & 0xFF;
2262 addr[1] = (mac_addr_h >> 16) & 0xFF;
2263 addr[2] = (mac_addr_h >> 8) & 0xFF;
2264 addr[3] = mac_addr_h & 0xFF;
2265 addr[4] = (mac_addr_l >> 8) & 0xFF;
2266 addr[5] = mac_addr_l & 0xFF;
2267}
2268
2254/* Handle setting mac address */ 2269/* Handle setting mac address */
2255static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2270static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2256{ 2271{
@@ -2667,7 +2682,9 @@ static int mvneta_probe(struct platform_device *pdev)
2667 u32 phy_addr; 2682 u32 phy_addr;
2668 struct mvneta_port *pp; 2683 struct mvneta_port *pp;
2669 struct net_device *dev; 2684 struct net_device *dev;
2670 const char *mac_addr; 2685 const char *dt_mac_addr;
2686 char hw_mac_addr[ETH_ALEN];
2687 const char *mac_from;
2671 int phy_mode; 2688 int phy_mode;
2672 int err; 2689 int err;
2673 2690
@@ -2703,13 +2720,6 @@ static int mvneta_probe(struct platform_device *pdev)
2703 goto err_free_irq; 2720 goto err_free_irq;
2704 } 2721 }
2705 2722
2706 mac_addr = of_get_mac_address(dn);
2707
2708 if (!mac_addr || !is_valid_ether_addr(mac_addr))
2709 eth_hw_addr_random(dev);
2710 else
2711 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
2712
2713 dev->tx_queue_len = MVNETA_MAX_TXD; 2723 dev->tx_queue_len = MVNETA_MAX_TXD;
2714 dev->watchdog_timeo = 5 * HZ; 2724 dev->watchdog_timeo = 5 * HZ;
2715 dev->netdev_ops = &mvneta_netdev_ops; 2725 dev->netdev_ops = &mvneta_netdev_ops;
@@ -2740,6 +2750,21 @@ static int mvneta_probe(struct platform_device *pdev)
2740 2750
2741 clk_prepare_enable(pp->clk); 2751 clk_prepare_enable(pp->clk);
2742 2752
2753 dt_mac_addr = of_get_mac_address(dn);
2754 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
2755 mac_from = "device tree";
2756 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
2757 } else {
2758 mvneta_get_mac_addr(pp, hw_mac_addr);
2759 if (is_valid_ether_addr(hw_mac_addr)) {
2760 mac_from = "hardware";
2761 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
2762 } else {
2763 mac_from = "random";
2764 eth_hw_addr_random(dev);
2765 }
2766 }
2767
2743 pp->tx_done_timer.data = (unsigned long)dev; 2768 pp->tx_done_timer.data = (unsigned long)dev;
2744 2769
2745 pp->tx_ring_size = MVNETA_MAX_TXD; 2770 pp->tx_ring_size = MVNETA_MAX_TXD;
@@ -2772,7 +2797,8 @@ static int mvneta_probe(struct platform_device *pdev)
2772 goto err_deinit; 2797 goto err_deinit;
2773 } 2798 }
2774 2799
2775 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 2800 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
2801 dev->dev_addr);
2776 2802
2777 platform_set_drvdata(pdev, pp->dev); 2803 platform_set_drvdata(pdev, pp->dev);
2778 2804
@@ -2804,8 +2830,6 @@ static int mvneta_remove(struct platform_device *pdev)
2804 irq_dispose_mapping(dev->irq); 2830 irq_dispose_mapping(dev->irq);
2805 free_netdev(dev); 2831 free_netdev(dev);
2806 2832
2807 platform_set_drvdata(pdev, NULL);
2808
2809 return 0; 2833 return 0;
2810} 2834}
2811 2835
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1c8af8ba08d9..db481477bcc5 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -357,7 +357,7 @@ static void rxq_refill(struct net_device *dev)
357 /* Get 'used' Rx descriptor */ 357 /* Get 'used' Rx descriptor */
358 used_rx_desc = pep->rx_used_desc_q; 358 used_rx_desc = pep->rx_used_desc_q;
359 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; 359 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
360 size = skb->end - skb->data; 360 size = skb_end_pointer(skb) - skb->data;
361 p_used_rx_desc->buf_ptr = dma_map_single(NULL, 361 p_used_rx_desc->buf_ptr = dma_map_single(NULL,
362 skb->data, 362 skb->data,
363 size, 363 size,
@@ -1602,7 +1602,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
1602 unregister_netdev(dev); 1602 unregister_netdev(dev);
1603 cancel_work_sync(&pep->tx_timeout_task); 1603 cancel_work_sync(&pep->tx_timeout_task);
1604 free_netdev(dev); 1604 free_netdev(dev);
1605 platform_set_drvdata(pdev, NULL);
1606 return 0; 1605 return 0;
1607} 1606}
1608 1607
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 171f4b3dda07..c896079728e1 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3706,7 +3706,7 @@ static const struct file_operations skge_debug_fops = {
3706static int skge_device_event(struct notifier_block *unused, 3706static int skge_device_event(struct notifier_block *unused,
3707 unsigned long event, void *ptr) 3707 unsigned long event, void *ptr)
3708{ 3708{
3709 struct net_device *dev = ptr; 3709 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3710 struct skge_port *skge; 3710 struct skge_port *skge;
3711 struct dentry *d; 3711 struct dentry *d;
3712 3712
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index d175bbd3ffd3..e09a8c6f8536 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4642,7 +4642,7 @@ static const struct file_operations sky2_debug_fops = {
4642static int sky2_device_event(struct notifier_block *unused, 4642static int sky2_device_event(struct notifier_block *unused,
4643 unsigned long event, void *ptr) 4643 unsigned long event, void *ptr)
4644{ 4644{
4645 struct net_device *dev = ptr; 4645 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4646 struct sky2_port *sky2 = netdev_priv(dev); 4646 struct sky2_port *sky2 = netdev_priv(dev);
4647 4647
4648 if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) 4648 if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0e572a527154..299d0184f983 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -39,6 +39,7 @@
39#include <linux/errno.h> 39#include <linux/errno.h>
40 40
41#include <linux/mlx4/cmd.h> 41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/device.h>
42#include <linux/semaphore.h> 43#include <linux/semaphore.h>
43#include <rdma/ib_smi.h> 44#include <rdma/ib_smi.h>
44 45
@@ -111,6 +112,14 @@ enum {
111 GO_BIT_TIMEOUT_MSECS = 10000 112 GO_BIT_TIMEOUT_MSECS = 10000
112}; 113};
113 114
115enum mlx4_vlan_transition {
116 MLX4_VLAN_TRANSITION_VST_VST = 0,
117 MLX4_VLAN_TRANSITION_VST_VGT = 1,
118 MLX4_VLAN_TRANSITION_VGT_VST = 2,
119 MLX4_VLAN_TRANSITION_VGT_VGT = 3,
120};
121
122
114struct mlx4_cmd_context { 123struct mlx4_cmd_context {
115 struct completion done; 124 struct completion done;
116 int result; 125 int result;
@@ -256,6 +265,8 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
256 265
257 if (!wait_for_completion_timeout(&context->done, 266 if (!wait_for_completion_timeout(&context->done,
258 msecs_to_jiffies(timeout))) { 267 msecs_to_jiffies(timeout))) {
268 mlx4_warn(dev, "communication channel command 0x%x timed out\n",
269 op);
259 err = -EBUSY; 270 err = -EBUSY;
260 goto out; 271 goto out;
261 } 272 }
@@ -485,6 +496,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
485 } 496 }
486 497
487 if (cmd_pending(dev)) { 498 if (cmd_pending(dev)) {
499 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
500 op);
488 err = -ETIMEDOUT; 501 err = -ETIMEDOUT;
489 goto out; 502 goto out;
490 } 503 }
@@ -548,6 +561,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
548 561
549 if (!wait_for_completion_timeout(&context->done, 562 if (!wait_for_completion_timeout(&context->done,
550 msecs_to_jiffies(timeout))) { 563 msecs_to_jiffies(timeout))) {
564 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
565 op);
551 err = -EBUSY; 566 err = -EBUSY;
552 goto out; 567 goto out;
553 } 568 }
@@ -785,6 +800,15 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
785 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 800 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
786} 801}
787 802
803int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
804 struct mlx4_vhcr *vhcr,
805 struct mlx4_cmd_mailbox *inbox,
806 struct mlx4_cmd_mailbox *outbox,
807 struct mlx4_cmd_info *cmd)
808{
809 return -EPERM;
810}
811
788int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, 812int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
789 struct mlx4_vhcr *vhcr, 813 struct mlx4_vhcr *vhcr,
790 struct mlx4_cmd_mailbox *inbox, 814 struct mlx4_cmd_mailbox *inbox,
@@ -1219,6 +1243,15 @@ static struct mlx4_cmd_info cmd_info[] = {
1219 .wrapper = mlx4_GEN_QP_wrapper 1243 .wrapper = mlx4_GEN_QP_wrapper
1220 }, 1244 },
1221 { 1245 {
1246 .opcode = MLX4_CMD_UPDATE_QP,
1247 .has_inbox = false,
1248 .has_outbox = false,
1249 .out_is_imm = false,
1250 .encode_slave_id = false,
1251 .verify = NULL,
1252 .wrapper = MLX4_CMD_UPDATE_QP_wrapper
1253 },
1254 {
1222 .opcode = MLX4_CMD_CONF_SPECIAL_QP, 1255 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1223 .has_inbox = false, 1256 .has_inbox = false,
1224 .has_outbox = false, 1257 .has_outbox = false,
@@ -1488,6 +1521,102 @@ out:
1488 return ret; 1521 return ret;
1489} 1522}
1490 1523
1524static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
1525{
1526 return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
1527}
1528
1529int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1530 int slave, int port)
1531{
1532 struct mlx4_vport_oper_state *vp_oper;
1533 struct mlx4_vport_state *vp_admin;
1534 struct mlx4_vf_immed_vlan_work *work;
1535 struct mlx4_dev *dev = &(priv->dev);
1536 int err;
1537 int admin_vlan_ix = NO_INDX;
1538 enum mlx4_vlan_transition vlan_trans;
1539
1540 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1541 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1542
1543 if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1544 vp_oper->state.default_qos == vp_admin->default_qos &&
1545 vp_oper->state.link_state == vp_admin->link_state)
1546 return 0;
1547
1548 vlan_trans = calculate_transition(vp_oper->state.default_vlan,
1549 vp_admin->default_vlan);
1550
1551 if (!(priv->mfunc.master.slave_state[slave].active &&
1552 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP &&
1553 vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) {
1554 /* even if the UPDATE_QP command isn't supported, we still want
1555 * to set this VF link according to the admin directive
1556 */
1557 vp_oper->state.link_state = vp_admin->link_state;
1558 return -1;
1559 }
1560
1561 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1562 slave, port);
1563 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
1564 vp_admin->default_qos, vp_admin->link_state);
1565
1566 work = kzalloc(sizeof(*work), GFP_KERNEL);
1567 if (!work)
1568 return -ENOMEM;
1569
1570 if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1571 err = __mlx4_register_vlan(&priv->dev, port,
1572 vp_admin->default_vlan,
1573 &admin_vlan_ix);
1574 if (err) {
1575 kfree(work);
1576 mlx4_warn((&priv->dev),
1577 "No vlan resources slave %d, port %d\n",
1578 slave, port);
1579 return err;
1580 }
1581 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1582 mlx4_dbg((&(priv->dev)),
1583 "alloc vlan %d idx %d slave %d port %d\n",
1584 (int)(vp_admin->default_vlan),
1585 admin_vlan_ix, slave, port);
1586 }
1587
1588 /* save original vlan ix and vlan id */
1589 work->orig_vlan_id = vp_oper->state.default_vlan;
1590 work->orig_vlan_ix = vp_oper->vlan_idx;
1591
1592 /* handle new qos */
1593 if (vp_oper->state.default_qos != vp_admin->default_qos)
1594 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1595
1596 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1597 vp_oper->vlan_idx = admin_vlan_ix;
1598
1599 vp_oper->state.default_vlan = vp_admin->default_vlan;
1600 vp_oper->state.default_qos = vp_admin->default_qos;
1601 vp_oper->state.link_state = vp_admin->link_state;
1602
1603 if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1604 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1605
1606 /* iterate over QPs owned by this slave, using UPDATE_QP */
1607 work->port = port;
1608 work->slave = slave;
1609 work->qos = vp_oper->state.default_qos;
1610 work->vlan_id = vp_oper->state.default_vlan;
1611 work->vlan_ix = vp_oper->vlan_idx;
1612 work->priv = priv;
1613 INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1614 queue_work(priv->mfunc.master.comm_wq, &work->work);
1615
1616 return 0;
1617}
1618
1619
1491static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) 1620static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1492{ 1621{
1493 int port, err; 1622 int port, err;
@@ -2102,10 +2231,12 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2102} 2231}
2103EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); 2232EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2104 2233
2234
2105int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) 2235int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2106{ 2236{
2107 struct mlx4_priv *priv = mlx4_priv(dev); 2237 struct mlx4_priv *priv = mlx4_priv(dev);
2108 struct mlx4_vport_state *s_info; 2238 struct mlx4_vport_oper_state *vf_oper;
2239 struct mlx4_vport_state *vf_admin;
2109 int slave; 2240 int slave;
2110 2241
2111 if ((!mlx4_is_master(dev)) || 2242 if ((!mlx4_is_master(dev)) ||
@@ -2119,12 +2250,19 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2119 if (slave < 0) 2250 if (slave < 0)
2120 return -EINVAL; 2251 return -EINVAL;
2121 2252
2122 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2253 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2254 vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2255
2123 if ((0 == vlan) && (0 == qos)) 2256 if ((0 == vlan) && (0 == qos))
2124 s_info->default_vlan = MLX4_VGT; 2257 vf_admin->default_vlan = MLX4_VGT;
2125 else 2258 else
2126 s_info->default_vlan = vlan; 2259 vf_admin->default_vlan = vlan;
2127 s_info->default_qos = qos; 2260 vf_admin->default_qos = qos;
2261
2262 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2263 mlx4_info(dev,
2264 "updating vf %d port %d config will take effect on next VF restart\n",
2265 vf, port);
2128 return 0; 2266 return 0;
2129} 2267}
2130EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); 2268EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
@@ -2178,7 +2316,55 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
2178 ivf->qos = s_info->default_qos; 2316 ivf->qos = s_info->default_qos;
2179 ivf->tx_rate = s_info->tx_rate; 2317 ivf->tx_rate = s_info->tx_rate;
2180 ivf->spoofchk = s_info->spoofchk; 2318 ivf->spoofchk = s_info->spoofchk;
2319 ivf->linkstate = s_info->link_state;
2181 2320
2182 return 0; 2321 return 0;
2183} 2322}
2184EXPORT_SYMBOL_GPL(mlx4_get_vf_config); 2323EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
2324
2325int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2326{
2327 struct mlx4_priv *priv = mlx4_priv(dev);
2328 struct mlx4_vport_state *s_info;
2329 int slave;
2330 u8 link_stat_event;
2331
2332 slave = mlx4_get_slave_indx(dev, vf);
2333 if (slave < 0)
2334 return -EINVAL;
2335
2336 switch (link_state) {
2337 case IFLA_VF_LINK_STATE_AUTO:
2338 /* get current link state */
2339 if (!priv->sense.do_sense_port[port])
2340 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2341 else
2342 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2343 break;
2344
2345 case IFLA_VF_LINK_STATE_ENABLE:
2346 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2347 break;
2348
2349 case IFLA_VF_LINK_STATE_DISABLE:
2350 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2351 break;
2352
2353 default:
2354 mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2355 link_state, slave, port);
2356 return -EINVAL;
2357 };
2358 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2359 s_info->link_state = link_state;
2360
2361 /* send event */
2362 mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
2363
2364 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2365 mlx4_dbg(dev,
2366 "updating vf %d port %d no link state HW enforcment\n",
2367 vf, port);
2368 return 0;
2369}
2370EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1e6c594d6d04..3e2d5047cdb3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -139,6 +139,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
139 139
140 if (!cq->is_tx) { 140 if (!cq->is_tx) {
141 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 141 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
142 napi_hash_add(&cq->napi);
142 napi_enable(&cq->napi); 143 napi_enable(&cq->napi);
143 } 144 }
144 145
@@ -162,6 +163,8 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
162{ 163{
163 if (!cq->is_tx) { 164 if (!cq->is_tx) {
164 napi_disable(&cq->napi); 165 napi_disable(&cq->napi);
166 napi_hash_del(&cq->napi);
167 synchronize_rcu();
165 netif_napi_del(&cq->napi); 168 netif_napi_del(&cq->napi);
166 } 169 }
167 170
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 0f91222ea3d7..9d4a1ea030d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -207,9 +207,6 @@ static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
207 struct mlx4_en_priv *priv = netdev_priv(dev); 207 struct mlx4_en_priv *priv = netdev_priv(dev);
208 int i; 208 int i;
209 209
210 if (!priv->maxrate)
211 return -EINVAL;
212
213 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 210 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
214 maxrate->tc_maxrate[i] = 211 maxrate->tc_maxrate[i] =
215 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB; 212 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c9e6b62dd000..727874f575ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -222,7 +222,12 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
222 switch (sset) { 222 switch (sset) {
223 case ETH_SS_STATS: 223 case ETH_SS_STATS:
224 return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + 224 return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
225 (priv->tx_ring_num + priv->rx_ring_num) * 2; 225 (priv->tx_ring_num * 2) +
226#ifdef CONFIG_NET_LL_RX_POLL
227 (priv->rx_ring_num * 5);
228#else
229 (priv->rx_ring_num * 2);
230#endif
226 case ETH_SS_TEST: 231 case ETH_SS_TEST:
227 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 232 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
228 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; 233 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -271,6 +276,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
271 for (i = 0; i < priv->rx_ring_num; i++) { 276 for (i = 0; i < priv->rx_ring_num; i++) {
272 data[index++] = priv->rx_ring[i].packets; 277 data[index++] = priv->rx_ring[i].packets;
273 data[index++] = priv->rx_ring[i].bytes; 278 data[index++] = priv->rx_ring[i].bytes;
279#ifdef CONFIG_NET_LL_RX_POLL
280 data[index++] = priv->rx_ring[i].yields;
281 data[index++] = priv->rx_ring[i].misses;
282 data[index++] = priv->rx_ring[i].cleaned;
283#endif
274 } 284 }
275 spin_unlock_bh(&priv->stats_lock); 285 spin_unlock_bh(&priv->stats_lock);
276 286
@@ -334,6 +344,14 @@ static void mlx4_en_get_strings(struct net_device *dev,
334 "rx%d_packets", i); 344 "rx%d_packets", i);
335 sprintf(data + (index++) * ETH_GSTRING_LEN, 345 sprintf(data + (index++) * ETH_GSTRING_LEN,
336 "rx%d_bytes", i); 346 "rx%d_bytes", i);
347#ifdef CONFIG_NET_LL_RX_POLL
348 sprintf(data + (index++) * ETH_GSTRING_LEN,
349 "rx%d_napi_yield", i);
350 sprintf(data + (index++) * ETH_GSTRING_LEN,
351 "rx%d_misses", i);
352 sprintf(data + (index++) * ETH_GSTRING_LEN,
353 "rx%d_cleaned", i);
354#endif
337 } 355 }
338 break; 356 break;
339 } 357 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a5c9df07a7d0..a071cda2dd04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -310,7 +310,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
310err_mr: 310err_mr:
311 (void) mlx4_mr_free(dev, &mdev->mr); 311 (void) mlx4_mr_free(dev, &mdev->mr);
312err_map: 312err_map:
313 if (!mdev->uar_map) 313 if (mdev->uar_map)
314 iounmap(mdev->uar_map); 314 iounmap(mdev->uar_map);
315err_uar: 315err_uar:
316 mlx4_uar_free(dev, &mdev->priv_uar); 316 mlx4_uar_free(dev, &mdev->priv_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 89c47ea84b50..caf204770569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/hash.h> 39#include <linux/hash.h>
40#include <net/ip.h> 40#include <net/ip.h>
41#include <net/ll_poll.h>
41 42
42#include <linux/mlx4/driver.h> 43#include <linux/mlx4/driver.h>
43#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
@@ -67,6 +68,34 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
67 return 0; 68 return 0;
68} 69}
69 70
71#ifdef CONFIG_NET_LL_RX_POLL
72/* must be called with local_bh_disable()d */
73static int mlx4_en_low_latency_recv(struct napi_struct *napi)
74{
75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
76 struct net_device *dev = cq->dev;
77 struct mlx4_en_priv *priv = netdev_priv(dev);
78 struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
79 int done;
80
81 if (!priv->port_up)
82 return LL_FLUSH_FAILED;
83
84 if (!mlx4_en_cq_lock_poll(cq))
85 return LL_FLUSH_BUSY;
86
87 done = mlx4_en_process_rx_cq(dev, cq, 4);
88 if (likely(done))
89 rx_ring->cleaned += done;
90 else
91 rx_ring->misses++;
92
93 mlx4_en_cq_unlock_poll(cq);
94
95 return done;
96}
97#endif /* CONFIG_NET_LL_RX_POLL */
98
70#ifdef CONFIG_RFS_ACCEL 99#ifdef CONFIG_RFS_ACCEL
71 100
72struct mlx4_en_filter { 101struct mlx4_en_filter {
@@ -376,7 +405,7 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
376 en_err(priv, "Failed configuring VLAN filter\n"); 405 en_err(priv, "Failed configuring VLAN filter\n");
377 } 406 }
378 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 407 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
379 en_err(priv, "failed adding vlan %d\n", vid); 408 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
380 mutex_unlock(&mdev->state_lock); 409 mutex_unlock(&mdev->state_lock);
381 410
382 return 0; 411 return 0;
@@ -399,7 +428,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
399 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) 428 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
400 mlx4_unregister_vlan(mdev->dev, priv->port, idx); 429 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
401 else 430 else
402 en_err(priv, "could not find vid %d in cache\n", vid); 431 en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
403 432
404 if (mdev->device_up && priv->port_up) { 433 if (mdev->device_up && priv->port_up) {
405 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 434 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
@@ -1207,10 +1236,19 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
1207{ 1236{
1208 struct mlx4_en_priv *priv = netdev_priv(dev); 1237 struct mlx4_en_priv *priv = netdev_priv(dev);
1209 struct mlx4_en_dev *mdev = priv->mdev; 1238 struct mlx4_en_dev *mdev = priv->mdev;
1239 int i;
1210 1240
1211 if (netif_msg_timer(priv)) 1241 if (netif_msg_timer(priv))
1212 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 1242 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1213 1243
1244 for (i = 0; i < priv->tx_ring_num; i++) {
1245 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1246 continue;
1247 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1248 i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
1249 priv->tx_ring[i].cons, priv->tx_ring[i].prod);
1250 }
1251
1214 priv->port_stats.tx_timeout++; 1252 priv->port_stats.tx_timeout++;
1215 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1253 en_dbg(DRV, priv, "Scheduling watchdog\n");
1216 queue_work(mdev->workqueue, &priv->watchdog_task); 1254 queue_work(mdev->workqueue, &priv->watchdog_task);
@@ -1346,12 +1384,13 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
1346 1384
1347 mutex_lock(&mdev->state_lock); 1385 mutex_lock(&mdev->state_lock);
1348 if (mdev->device_up) { 1386 if (mdev->device_up) {
1349 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1387 if (priv->port_up) {
1350 if (err) 1388 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1351 en_dbg(HW, priv, "Could not update stats\n"); 1389 if (err)
1390 en_dbg(HW, priv, "Could not update stats\n");
1352 1391
1353 if (priv->port_up)
1354 mlx4_en_auto_moderation(priv); 1392 mlx4_en_auto_moderation(priv);
1393 }
1355 1394
1356 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1395 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1357 } 1396 }
@@ -1445,6 +1484,8 @@ int mlx4_en_start_port(struct net_device *dev)
1445 for (i = 0; i < priv->rx_ring_num; i++) { 1484 for (i = 0; i < priv->rx_ring_num; i++) {
1446 cq = &priv->rx_cq[i]; 1485 cq = &priv->rx_cq[i];
1447 1486
1487 mlx4_en_cq_init_lock(cq);
1488
1448 err = mlx4_en_activate_cq(priv, cq, i); 1489 err = mlx4_en_activate_cq(priv, cq, i);
1449 if (err) { 1490 if (err) {
1450 en_err(priv, "Failed activating Rx CQ\n"); 1491 en_err(priv, "Failed activating Rx CQ\n");
@@ -1603,6 +1644,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1603 return; 1644 return;
1604 } 1645 }
1605 1646
1647 /* close port*/
1648 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1649
1606 /* Synchronize with tx routine */ 1650 /* Synchronize with tx routine */
1607 netif_tx_lock_bh(dev); 1651 netif_tx_lock_bh(dev);
1608 if (detach) 1652 if (detach)
@@ -1694,14 +1738,20 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1694 1738
1695 /* Free RX Rings */ 1739 /* Free RX Rings */
1696 for (i = 0; i < priv->rx_ring_num; i++) { 1740 for (i = 0; i < priv->rx_ring_num; i++) {
1697 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1741 struct mlx4_en_cq *cq = &priv->rx_cq[i];
1698 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) 1742
1743 local_bh_disable();
1744 while (!mlx4_en_cq_lock_napi(cq)) {
1745 pr_info("CQ %d locked\n", i);
1746 mdelay(1);
1747 }
1748 local_bh_enable();
1749
1750 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
1699 msleep(1); 1751 msleep(1);
1700 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 1752 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
1753 mlx4_en_deactivate_cq(priv, cq);
1701 } 1754 }
1702
1703 /* close port*/
1704 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1705} 1755}
1706 1756
1707static void mlx4_en_restart(struct work_struct *work) 1757static void mlx4_en_restart(struct work_struct *work)
@@ -2061,6 +2111,13 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_
2061 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); 2111 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2062} 2112}
2063 2113
2114static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2115{
2116 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2117 struct mlx4_en_dev *mdev = en_priv->mdev;
2118
2119 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2120}
2064static const struct net_device_ops mlx4_netdev_ops = { 2121static const struct net_device_ops mlx4_netdev_ops = {
2065 .ndo_open = mlx4_en_open, 2122 .ndo_open = mlx4_en_open,
2066 .ndo_stop = mlx4_en_close, 2123 .ndo_stop = mlx4_en_close,
@@ -2083,6 +2140,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
2083#ifdef CONFIG_RFS_ACCEL 2140#ifdef CONFIG_RFS_ACCEL
2084 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2141 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2085#endif 2142#endif
2143#ifdef CONFIG_NET_LL_RX_POLL
2144 .ndo_ll_poll = mlx4_en_low_latency_recv,
2145#endif
2086}; 2146};
2087 2147
2088static const struct net_device_ops mlx4_netdev_ops_master = { 2148static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2101,6 +2161,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2101 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2161 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2102 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2162 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2103 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2163 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2164 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2104 .ndo_get_vf_config = mlx4_en_get_vf_config, 2165 .ndo_get_vf_config = mlx4_en_get_vf_config,
2105#ifdef CONFIG_NET_POLL_CONTROLLER 2166#ifdef CONFIG_NET_POLL_CONTROLLER
2106 .ndo_poll_controller = mlx4_en_netpoll, 2167 .ndo_poll_controller = mlx4_en_netpoll,
@@ -2271,6 +2332,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2271 mdev->pndev[port] = dev; 2332 mdev->pndev[port] = dev;
2272 2333
2273 netif_carrier_off(dev); 2334 netif_carrier_off(dev);
2335 mlx4_en_set_default_moderation(priv);
2336
2274 err = register_netdev(dev); 2337 err = register_netdev(dev);
2275 if (err) { 2338 if (err) {
2276 en_err(priv, "Netdev registration failed for port %d\n", port); 2339 en_err(priv, "Netdev registration failed for port %d\n", port);
@@ -2302,7 +2365,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2302 en_err(priv, "Failed Initializing port\n"); 2365 en_err(priv, "Failed Initializing port\n");
2303 goto out; 2366 goto out;
2304 } 2367 }
2305 mlx4_en_set_default_moderation(priv);
2306 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2368 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2307 2369
2308 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2370 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 02aee1ebd203..76997b93fdfe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33 33
34#include <net/ll_poll.h>
34#include <linux/mlx4/cq.h> 35#include <linux/mlx4/cq.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/mlx4/qp.h> 37#include <linux/mlx4/qp.h>
@@ -42,40 +43,64 @@
42 43
43#include "mlx4_en.h" 44#include "mlx4_en.h"
44 45
46static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
47 struct mlx4_en_rx_alloc *page_alloc,
48 const struct mlx4_en_frag_info *frag_info,
49 gfp_t _gfp)
50{
51 int order;
52 struct page *page;
53 dma_addr_t dma;
54
55 for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) {
56 gfp_t gfp = _gfp;
57
58 if (order)
59 gfp |= __GFP_COMP | __GFP_NOWARN;
60 page = alloc_pages(gfp, order);
61 if (likely(page))
62 break;
63 if (--order < 0 ||
64 ((PAGE_SIZE << order) < frag_info->frag_size))
65 return -ENOMEM;
66 }
67 dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
68 PCI_DMA_FROMDEVICE);
69 if (dma_mapping_error(priv->ddev, dma)) {
70 put_page(page);
71 return -ENOMEM;
72 }
73 page_alloc->size = PAGE_SIZE << order;
74 page_alloc->page = page;
75 page_alloc->dma = dma;
76 page_alloc->offset = frag_info->frag_align;
77 /* Not doing get_page() for each frag is a big win
78 * on asymetric workloads.
79 */
80 atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
81 return 0;
82}
83
45static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, 84static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
46 struct mlx4_en_rx_desc *rx_desc, 85 struct mlx4_en_rx_desc *rx_desc,
47 struct mlx4_en_rx_alloc *frags, 86 struct mlx4_en_rx_alloc *frags,
48 struct mlx4_en_rx_alloc *ring_alloc) 87 struct mlx4_en_rx_alloc *ring_alloc,
88 gfp_t gfp)
49{ 89{
50 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; 90 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
51 struct mlx4_en_frag_info *frag_info; 91 const struct mlx4_en_frag_info *frag_info;
52 struct page *page; 92 struct page *page;
53 dma_addr_t dma; 93 dma_addr_t dma;
54 int i; 94 int i;
55 95
56 for (i = 0; i < priv->num_frags; i++) { 96 for (i = 0; i < priv->num_frags; i++) {
57 frag_info = &priv->frag_info[i]; 97 frag_info = &priv->frag_info[i];
58 if (ring_alloc[i].offset == frag_info->last_offset) { 98 page_alloc[i] = ring_alloc[i];
59 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, 99 page_alloc[i].offset += frag_info->frag_stride;
60 MLX4_EN_ALLOC_ORDER); 100 if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
61 if (!page) 101 continue;
62 goto out; 102 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
63 dma = dma_map_page(priv->ddev, page, 0, 103 goto out;
64 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
65 if (dma_mapping_error(priv->ddev, dma)) {
66 put_page(page);
67 goto out;
68 }
69 page_alloc[i].page = page;
70 page_alloc[i].dma = dma;
71 page_alloc[i].offset = frag_info->frag_align;
72 } else {
73 page_alloc[i].page = ring_alloc[i].page;
74 get_page(ring_alloc[i].page);
75 page_alloc[i].dma = ring_alloc[i].dma;
76 page_alloc[i].offset = ring_alloc[i].offset +
77 frag_info->frag_stride;
78 }
79 } 104 }
80 105
81 for (i = 0; i < priv->num_frags; i++) { 106 for (i = 0; i < priv->num_frags; i++) {
@@ -87,14 +112,16 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
87 112
88 return 0; 113 return 0;
89 114
90
91out: 115out:
92 while (i--) { 116 while (i--) {
93 frag_info = &priv->frag_info[i]; 117 frag_info = &priv->frag_info[i];
94 if (ring_alloc[i].offset == frag_info->last_offset) 118 if (page_alloc[i].page != ring_alloc[i].page) {
95 dma_unmap_page(priv->ddev, page_alloc[i].dma, 119 dma_unmap_page(priv->ddev, page_alloc[i].dma,
96 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 120 page_alloc[i].size, PCI_DMA_FROMDEVICE);
97 put_page(page_alloc[i].page); 121 page = page_alloc[i].page;
122 atomic_set(&page->_count, 1);
123 put_page(page);
124 }
98 } 125 }
99 return -ENOMEM; 126 return -ENOMEM;
100} 127}
@@ -103,12 +130,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
103 struct mlx4_en_rx_alloc *frags, 130 struct mlx4_en_rx_alloc *frags,
104 int i) 131 int i)
105{ 132{
106 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 133 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
107 134
108 if (frags[i].offset == frag_info->last_offset) { 135 if (frags[i].offset + frag_info->frag_stride > frags[i].size)
109 dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE, 136 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
110 PCI_DMA_FROMDEVICE); 137 PCI_DMA_FROMDEVICE);
111 } 138
112 if (frags[i].page) 139 if (frags[i].page)
113 put_page(frags[i].page); 140 put_page(frags[i].page);
114} 141}
@@ -116,35 +143,28 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
116static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, 143static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
117 struct mlx4_en_rx_ring *ring) 144 struct mlx4_en_rx_ring *ring)
118{ 145{
119 struct mlx4_en_rx_alloc *page_alloc;
120 int i; 146 int i;
147 struct mlx4_en_rx_alloc *page_alloc;
121 148
122 for (i = 0; i < priv->num_frags; i++) { 149 for (i = 0; i < priv->num_frags; i++) {
123 page_alloc = &ring->page_alloc[i]; 150 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
124 page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
125 MLX4_EN_ALLOC_ORDER);
126 if (!page_alloc->page)
127 goto out;
128 151
129 page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0, 152 if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
130 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 153 frag_info, GFP_KERNEL))
131 if (dma_mapping_error(priv->ddev, page_alloc->dma)) {
132 put_page(page_alloc->page);
133 page_alloc->page = NULL;
134 goto out; 154 goto out;
135 }
136 page_alloc->offset = priv->frag_info[i].frag_align;
137 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
138 i, page_alloc->page);
139 } 155 }
140 return 0; 156 return 0;
141 157
142out: 158out:
143 while (i--) { 159 while (i--) {
160 struct page *page;
161
144 page_alloc = &ring->page_alloc[i]; 162 page_alloc = &ring->page_alloc[i];
145 dma_unmap_page(priv->ddev, page_alloc->dma, 163 dma_unmap_page(priv->ddev, page_alloc->dma,
146 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 164 page_alloc->size, PCI_DMA_FROMDEVICE);
147 put_page(page_alloc->page); 165 page = page_alloc->page;
166 atomic_set(&page->_count, 1);
167 put_page(page);
148 page_alloc->page = NULL; 168 page_alloc->page = NULL;
149 } 169 }
150 return -ENOMEM; 170 return -ENOMEM;
@@ -157,13 +177,18 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
157 int i; 177 int i;
158 178
159 for (i = 0; i < priv->num_frags; i++) { 179 for (i = 0; i < priv->num_frags; i++) {
180 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
181
160 page_alloc = &ring->page_alloc[i]; 182 page_alloc = &ring->page_alloc[i];
161 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", 183 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
162 i, page_count(page_alloc->page)); 184 i, page_count(page_alloc->page));
163 185
164 dma_unmap_page(priv->ddev, page_alloc->dma, 186 dma_unmap_page(priv->ddev, page_alloc->dma,
165 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 187 page_alloc->size, PCI_DMA_FROMDEVICE);
166 put_page(page_alloc->page); 188 while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
189 put_page(page_alloc->page);
190 page_alloc->offset += frag_info->frag_stride;
191 }
167 page_alloc->page = NULL; 192 page_alloc->page = NULL;
168 } 193 }
169} 194}
@@ -194,13 +219,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
194} 219}
195 220
196static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, 221static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
197 struct mlx4_en_rx_ring *ring, int index) 222 struct mlx4_en_rx_ring *ring, int index,
223 gfp_t gfp)
198{ 224{
199 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); 225 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
200 struct mlx4_en_rx_alloc *frags = ring->rx_info + 226 struct mlx4_en_rx_alloc *frags = ring->rx_info +
201 (index << priv->log_rx_info); 227 (index << priv->log_rx_info);
202 228
203 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc); 229 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
204} 230}
205 231
206static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) 232static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
@@ -234,7 +260,8 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
234 ring = &priv->rx_ring[ring_ind]; 260 ring = &priv->rx_ring[ring_ind];
235 261
236 if (mlx4_en_prepare_rx_desc(priv, ring, 262 if (mlx4_en_prepare_rx_desc(priv, ring,
237 ring->actual_size)) { 263 ring->actual_size,
264 GFP_KERNEL)) {
238 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 265 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
239 en_err(priv, "Failed to allocate " 266 en_err(priv, "Failed to allocate "
240 "enough rx buffers\n"); 267 "enough rx buffers\n");
@@ -449,11 +476,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
449 DMA_FROM_DEVICE); 476 DMA_FROM_DEVICE);
450 477
451 /* Save page reference in skb */ 478 /* Save page reference in skb */
452 get_page(frags[nr].page);
453 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); 479 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
454 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); 480 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
455 skb_frags_rx[nr].page_offset = frags[nr].offset; 481 skb_frags_rx[nr].page_offset = frags[nr].offset;
456 skb->truesize += frag_info->frag_stride; 482 skb->truesize += frag_info->frag_stride;
483 frags[nr].page = NULL;
457 } 484 }
458 /* Adjust size of last fragment to match actual length */ 485 /* Adjust size of last fragment to match actual length */
459 if (nr > 0) 486 if (nr > 0)
@@ -546,7 +573,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
546 int index = ring->prod & ring->size_mask; 573 int index = ring->prod & ring->size_mask;
547 574
548 while ((u32) (ring->prod - ring->cons) < ring->actual_size) { 575 while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
549 if (mlx4_en_prepare_rx_desc(priv, ring, index)) 576 if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC))
550 break; 577 break;
551 ring->prod++; 578 ring->prod++;
552 index = ring->prod & ring->size_mask; 579 index = ring->prod & ring->size_mask;
@@ -656,8 +683,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
656 * - DIX Ethernet (type interpretation) 683 * - DIX Ethernet (type interpretation)
657 * - TCP/IP (v4) 684 * - TCP/IP (v4)
658 * - without IP options 685 * - without IP options
659 * - not an IP fragment */ 686 * - not an IP fragment
660 if (dev->features & NETIF_F_GRO) { 687 * - no LLS polling in progress
688 */
689 if (!mlx4_en_cq_ll_polling(cq) &&
690 (dev->features & NETIF_F_GRO)) {
661 struct sk_buff *gro_skb = napi_get_frags(&cq->napi); 691 struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
662 if (!gro_skb) 692 if (!gro_skb)
663 goto next; 693 goto next;
@@ -737,6 +767,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
737 timestamp); 767 timestamp);
738 } 768 }
739 769
770 skb_mark_ll(skb, &cq->napi);
771
740 /* Push it up the stack */ 772 /* Push it up the stack */
741 netif_receive_skb(skb); 773 netif_receive_skb(skb);
742 774
@@ -781,8 +813,13 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
781 struct mlx4_en_priv *priv = netdev_priv(dev); 813 struct mlx4_en_priv *priv = netdev_priv(dev);
782 int done; 814 int done;
783 815
816 if (!mlx4_en_cq_lock_napi(cq))
817 return budget;
818
784 done = mlx4_en_process_rx_cq(dev, cq, budget); 819 done = mlx4_en_process_rx_cq(dev, cq, budget);
785 820
821 mlx4_en_cq_unlock_napi(cq);
822
786 /* If we used up all the quota - we're probably not done yet... */ 823 /* If we used up all the quota - we're probably not done yet... */
787 if (done == budget) 824 if (done == budget)
788 INC_PERF_COUNTER(priv->pstats.napi_quota); 825 INC_PERF_COUNTER(priv->pstats.napi_quota);
@@ -794,21 +831,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
794 return done; 831 return done;
795} 832}
796 833
797 834static const int frag_sizes[] = {
798/* Calculate the last offset position that accommodates a full fragment
799 * (assuming fagment size = stride-align) */
800static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
801{
802 u16 res = MLX4_EN_ALLOC_SIZE % stride;
803 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
804
805 en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
806 "res:%d offset:%d\n", stride, align, res, offset);
807 return offset;
808}
809
810
811static int frag_sizes[] = {
812 FRAG_SZ0, 835 FRAG_SZ0,
813 FRAG_SZ1, 836 FRAG_SZ1,
814 FRAG_SZ2, 837 FRAG_SZ2,
@@ -836,9 +859,6 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
836 priv->frag_info[i].frag_stride = 859 priv->frag_info[i].frag_stride =
837 ALIGN(frag_sizes[i], SMP_CACHE_BYTES); 860 ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
838 } 861 }
839 priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
840 priv, priv->frag_info[i].frag_stride,
841 priv->frag_info[i].frag_align);
842 buf_size += priv->frag_info[i].frag_size; 862 buf_size += priv->frag_info[i].frag_size;
843 i++; 863 i++;
844 } 864 }
@@ -850,13 +870,13 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
850 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 870 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
851 "num_frags:%d):\n", eff_mtu, priv->num_frags); 871 "num_frags:%d):\n", eff_mtu, priv->num_frags);
852 for (i = 0; i < priv->num_frags; i++) { 872 for (i = 0; i < priv->num_frags; i++) {
853 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " 873 en_err(priv,
854 "stride:%d last_offset:%d\n", i, 874 " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
855 priv->frag_info[i].frag_size, 875 i,
856 priv->frag_info[i].frag_prefix_size, 876 priv->frag_info[i].frag_size,
857 priv->frag_info[i].frag_align, 877 priv->frag_info[i].frag_prefix_size,
858 priv->frag_info[i].frag_stride, 878 priv->frag_info[i].frag_align,
859 priv->frag_info[i].last_offset); 879 priv->frag_info[i].frag_stride);
860 } 880 }
861} 881}
862 882
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4e6877a032a8..7c492382da09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -544,7 +544,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
544 if (vlan_tx_tag_present(skb)) 544 if (vlan_tx_tag_present(skb))
545 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; 545 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
546 546
547 return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up; 547 return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
548} 548}
549 549
550static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) 550static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 6000342f9725..7e042869ef0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -448,6 +448,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
448 int i; 448 int i;
449 enum slave_port_gen_event gen_event; 449 enum slave_port_gen_event gen_event;
450 unsigned long flags; 450 unsigned long flags;
451 struct mlx4_vport_state *s_info;
451 452
452 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { 453 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
453 /* 454 /*
@@ -556,7 +557,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
556 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" 557 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
557 " to slave: %d, port:%d\n", 558 " to slave: %d, port:%d\n",
558 __func__, i, port); 559 __func__, i, port);
559 mlx4_slave_event(dev, i, eqe); 560 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
561 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
562 mlx4_slave_event(dev, i, eqe);
560 } else { /* IB port */ 563 } else { /* IB port */
561 set_and_calc_slave_port_state(dev, i, port, 564 set_and_calc_slave_port_state(dev, i, port,
562 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 565 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@@ -580,7 +583,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
580 for (i = 0; i < dev->num_slaves; i++) { 583 for (i = 0; i < dev->num_slaves; i++) {
581 if (i == mlx4_master_func_num(dev)) 584 if (i == mlx4_master_func_num(dev))
582 continue; 585 continue;
583 mlx4_slave_event(dev, i, eqe); 586 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
587 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
588 mlx4_slave_event(dev, i, eqe);
584 } 589 }
585 else /* IB port */ 590 else /* IB port */
586 /* port-up event will be sent to a slave when the 591 /* port-up event will be sent to a slave when the
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 2c97901c6a6d..8873d6802c80 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -133,7 +133,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
133 [4] = "Automatic MAC reassignment support", 133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support", 134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support", 135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support" 136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support"
137 }; 138 };
138 int i; 139 int i;
139 140
@@ -659,6 +660,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
659 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 660 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
660 661
661 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 662 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
663 if (field32 & (1 << 16))
664 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
662 if (field32 & (1 << 26)) 665 if (field32 & (1 << 26))
663 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; 666 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
664 if (field32 & (1 << 20)) 667 if (field32 & (1 << 20))
@@ -830,8 +833,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
830 u8 port_type; 833 u8 port_type;
831 u16 short_field; 834 u16 short_field;
832 int err; 835 int err;
836 int admin_link_state;
833 837
834#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 838#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
839#define MLX4_PORT_LINK_UP_MASK 0x80
835#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 840#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
836#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 841#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
837 842
@@ -861,6 +866,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
861 /* set port type to currently operating port type */ 866 /* set port type to currently operating port type */
862 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); 867 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
863 868
869 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
870 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
871 port_type |= MLX4_PORT_LINK_UP_MASK;
872 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
873 port_type &= ~MLX4_PORT_LINK_UP_MASK;
874
864 MLX4_PUT(outbox->buf, port_type, 875 MLX4_PUT(outbox->buf, port_type,
865 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 876 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
866 877
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 8a434997a0df..e85af922dcdc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
98static bool enable_64b_cqe_eqe; 98static bool enable_64b_cqe_eqe;
99module_param(enable_64b_cqe_eqe, bool, 0444); 99module_param(enable_64b_cqe_eqe, bool, 0444);
100MODULE_PARM_DESC(enable_64b_cqe_eqe, 100MODULE_PARM_DESC(enable_64b_cqe_eqe,
101 "Enable 64 byte CQEs/EQEs when the the FW supports this"); 101 "Enable 64 byte CQEs/EQEs when the FW supports this");
102 102
103#define HCA_GLOBAL_CAP_MASK 0 103#define HCA_GLOBAL_CAP_MASK 0
104 104
@@ -842,11 +842,11 @@ static ssize_t set_port_ib_mtu(struct device *dev,
842 return -EINVAL; 842 return -EINVAL;
843 } 843 }
844 844
845 err = sscanf(buf, "%d", &mtu); 845 err = kstrtoint(buf, 0, &mtu);
846 if (err > 0) 846 if (!err)
847 ibta_mtu = int_to_ibta_mtu(mtu); 847 ibta_mtu = int_to_ibta_mtu(mtu);
848 848
849 if (err <= 0 || ibta_mtu < 0) { 849 if (err || ibta_mtu < 0) {
850 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 850 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
851 return -EINVAL; 851 return -EINVAL;
852 } 852 }
@@ -2080,6 +2080,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2080 num_vfs, MLX4_MAX_NUM_VF); 2080 num_vfs, MLX4_MAX_NUM_VF);
2081 return -EINVAL; 2081 return -EINVAL;
2082 } 2082 }
2083
2084 if (num_vfs < 0) {
2085 pr_err("num_vfs module parameter cannot be negative\n");
2086 return -EINVAL;
2087 }
2083 /* 2088 /*
2084 * Check for BARs. 2089 * Check for BARs.
2085 */ 2090 */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index df15bb6631cc..17d9277e33ef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -482,6 +482,7 @@ struct mlx4_vport_state {
482 u8 default_qos; 482 u8 default_qos;
483 u32 tx_rate; 483 u32 tx_rate;
484 bool spoofchk; 484 bool spoofchk;
485 u32 link_state;
485}; 486};
486 487
487struct mlx4_vf_admin_state { 488struct mlx4_vf_admin_state {
@@ -570,6 +571,25 @@ struct mlx4_cmd {
570 u8 comm_toggle; 571 u8 comm_toggle;
571}; 572};
572 573
574enum {
575 MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0,
576 MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1,
577 MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2,
578};
579struct mlx4_vf_immed_vlan_work {
580 struct work_struct work;
581 struct mlx4_priv *priv;
582 int flags;
583 int slave;
584 int vlan_ix;
585 int orig_vlan_ix;
586 u8 port;
587 u8 qos;
588 u16 vlan_id;
589 u16 orig_vlan_id;
590};
591
592
573struct mlx4_uar_table { 593struct mlx4_uar_table {
574 struct mlx4_bitmap bitmap; 594 struct mlx4_bitmap bitmap;
575}; 595};
@@ -1217,4 +1237,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
1217 1237
1218#define NOT_MASKED_PD_BITS 17 1238#define NOT_MASKED_PD_BITS 17
1219 1239
1240void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
1241
1220#endif /* MLX4_H */ 1242#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index b1d7657b2bf5..35fb60e2320c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -96,13 +96,14 @@
96 96
97/* Use the maximum between 16384 and a single page */ 97/* Use the maximum between 16384 and a single page */
98#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) 98#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
99#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE)
100 99
101/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU 100#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER
101
102/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
102 * and 4K allocations) */ 103 * and 4K allocations) */
103enum { 104enum {
104 FRAG_SZ0 = 512 - NET_IP_ALIGN, 105 FRAG_SZ0 = 1536 - NET_IP_ALIGN,
105 FRAG_SZ1 = 1024, 106 FRAG_SZ1 = 4096,
106 FRAG_SZ2 = 4096, 107 FRAG_SZ2 = 4096,
107 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE 108 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
108}; 109};
@@ -234,9 +235,10 @@ struct mlx4_en_tx_desc {
234#define MLX4_EN_CX3_HIGH_ID 0x1005 235#define MLX4_EN_CX3_HIGH_ID 0x1005
235 236
236struct mlx4_en_rx_alloc { 237struct mlx4_en_rx_alloc {
237 struct page *page; 238 struct page *page;
238 dma_addr_t dma; 239 dma_addr_t dma;
239 u16 offset; 240 u32 offset;
241 u32 size;
240}; 242};
241 243
242struct mlx4_en_tx_ring { 244struct mlx4_en_tx_ring {
@@ -290,6 +292,11 @@ struct mlx4_en_rx_ring {
290 void *rx_info; 292 void *rx_info;
291 unsigned long bytes; 293 unsigned long bytes;
292 unsigned long packets; 294 unsigned long packets;
295#ifdef CONFIG_NET_LL_RX_POLL
296 unsigned long yields;
297 unsigned long misses;
298 unsigned long cleaned;
299#endif
293 unsigned long csum_ok; 300 unsigned long csum_ok;
294 unsigned long csum_none; 301 unsigned long csum_none;
295 int hwtstamp_rx_filter; 302 int hwtstamp_rx_filter;
@@ -310,6 +317,19 @@ struct mlx4_en_cq {
310 u16 moder_cnt; 317 u16 moder_cnt;
311 struct mlx4_cqe *buf; 318 struct mlx4_cqe *buf;
312#define MLX4_EN_OPCODE_ERROR 0x1e 319#define MLX4_EN_OPCODE_ERROR 0x1e
320
321#ifdef CONFIG_NET_LL_RX_POLL
322 unsigned int state;
323#define MLX4_EN_CQ_STATE_IDLE 0
324#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
325#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */
326#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
327#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */
328#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */
329#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
330#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
331 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
332#endif /* CONFIG_NET_LL_RX_POLL */
313}; 333};
314 334
315struct mlx4_en_port_profile { 335struct mlx4_en_port_profile {
@@ -421,8 +441,6 @@ struct mlx4_en_frag_info {
421 u16 frag_prefix_size; 441 u16 frag_prefix_size;
422 u16 frag_stride; 442 u16 frag_stride;
423 u16 frag_align; 443 u16 frag_align;
424 u16 last_offset;
425
426}; 444};
427 445
428#ifdef CONFIG_MLX4_EN_DCB 446#ifdef CONFIG_MLX4_EN_DCB
@@ -562,6 +580,115 @@ struct mlx4_mac_entry {
562 struct rcu_head rcu; 580 struct rcu_head rcu;
563}; 581};
564 582
583#ifdef CONFIG_NET_LL_RX_POLL
584static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
585{
586 spin_lock_init(&cq->poll_lock);
587 cq->state = MLX4_EN_CQ_STATE_IDLE;
588}
589
590/* called from the device poll rutine to get ownership of a cq */
591static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
592{
593 int rc = true;
594 spin_lock(&cq->poll_lock);
595 if (cq->state & MLX4_CQ_LOCKED) {
596 WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI);
597 cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD;
598 rc = false;
599 } else
600 /* we don't care if someone yielded */
601 cq->state = MLX4_EN_CQ_STATE_NAPI;
602 spin_unlock(&cq->poll_lock);
603 return rc;
604}
605
606/* returns true is someone tried to get the cq while napi had it */
607static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
608{
609 int rc = false;
610 spin_lock(&cq->poll_lock);
611 WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL |
612 MLX4_EN_CQ_STATE_NAPI_YIELD));
613
614 if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
615 rc = true;
616 cq->state = MLX4_EN_CQ_STATE_IDLE;
617 spin_unlock(&cq->poll_lock);
618 return rc;
619}
620
621/* called from mlx4_en_low_latency_poll() */
622static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
623{
624 int rc = true;
625 spin_lock_bh(&cq->poll_lock);
626 if ((cq->state & MLX4_CQ_LOCKED)) {
627 struct net_device *dev = cq->dev;
628 struct mlx4_en_priv *priv = netdev_priv(dev);
629 struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
630
631 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
632 rc = false;
633 rx_ring->yields++;
634 } else
635 /* preserve yield marks */
636 cq->state |= MLX4_EN_CQ_STATE_POLL;
637 spin_unlock_bh(&cq->poll_lock);
638 return rc;
639}
640
641/* returns true if someone tried to get the cq while it was locked */
642static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
643{
644 int rc = false;
645 spin_lock_bh(&cq->poll_lock);
646 WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI));
647
648 if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
649 rc = true;
650 cq->state = MLX4_EN_CQ_STATE_IDLE;
651 spin_unlock_bh(&cq->poll_lock);
652 return rc;
653}
654
655/* true if a socket is polling, even if it did not get the lock */
656static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
657{
658 WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
659 return cq->state & CQ_USER_PEND;
660}
661#else
662static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
663{
664}
665
666static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
667{
668 return true;
669}
670
671static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
672{
673 return false;
674}
675
676static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
677{
678 return false;
679}
680
681static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
682{
683 return false;
684}
685
686static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
687{
688 return false;
689}
690#endif /* CONFIG_NET_LL_RX_POLL */
691
565#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) 692#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
566 693
567void mlx4_en_update_loopback_state(struct net_device *dev, 694void mlx4_en_update_loopback_state(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1157f028a90f..f984a89c27df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -101,6 +101,8 @@ struct res_qp {
101 spinlock_t mcg_spl; 101 spinlock_t mcg_spl;
102 int local_qpn; 102 int local_qpn;
103 atomic_t ref_count; 103 atomic_t ref_count;
104 u32 qpc_flags;
105 u8 sched_queue;
104}; 106};
105 107
106enum res_mtt_states { 108enum res_mtt_states {
@@ -355,7 +357,7 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
355 357
356static int update_vport_qp_param(struct mlx4_dev *dev, 358static int update_vport_qp_param(struct mlx4_dev *dev,
357 struct mlx4_cmd_mailbox *inbox, 359 struct mlx4_cmd_mailbox *inbox,
358 u8 slave) 360 u8 slave, u32 qpn)
359{ 361{
360 struct mlx4_qp_context *qpc = inbox->buf + 8; 362 struct mlx4_qp_context *qpc = inbox->buf + 8;
361 struct mlx4_vport_oper_state *vp_oper; 363 struct mlx4_vport_oper_state *vp_oper;
@@ -369,12 +371,30 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
369 371
370 if (MLX4_VGT != vp_oper->state.default_vlan) { 372 if (MLX4_VGT != vp_oper->state.default_vlan) {
371 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 373 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
372 if (MLX4_QP_ST_RC == qp_type) 374 if (MLX4_QP_ST_RC == qp_type ||
375 (MLX4_QP_ST_UD == qp_type &&
376 !mlx4_is_qp_reserved(dev, qpn)))
373 return -EINVAL; 377 return -EINVAL;
374 378
379 /* the reserved QPs (special, proxy, tunnel)
380 * do not operate over vlans
381 */
382 if (mlx4_is_qp_reserved(dev, qpn))
383 return 0;
384
375 /* force strip vlan by clear vsd */ 385 /* force strip vlan by clear vsd */
376 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); 386 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
377 if (0 != vp_oper->state.default_vlan) { 387
388 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
389 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
390 qpc->pri_path.vlan_control =
391 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
392 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
393 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
394 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
395 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
396 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
397 } else if (0 != vp_oper->state.default_vlan) {
378 qpc->pri_path.vlan_control = 398 qpc->pri_path.vlan_control =
379 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 399 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
380 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 400 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
@@ -2114,6 +2134,8 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2114 if (err) 2134 if (err)
2115 return err; 2135 return err;
2116 qp->local_qpn = local_qpn; 2136 qp->local_qpn = local_qpn;
2137 qp->sched_queue = 0;
2138 qp->qpc_flags = be32_to_cpu(qpc->flags);
2117 2139
2118 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2140 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2119 if (err) 2141 if (err)
@@ -2836,6 +2858,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2836{ 2858{
2837 int err; 2859 int err;
2838 struct mlx4_qp_context *qpc = inbox->buf + 8; 2860 struct mlx4_qp_context *qpc = inbox->buf + 8;
2861 int qpn = vhcr->in_modifier & 0x7fffff;
2862 struct res_qp *qp;
2863 u8 orig_sched_queue;
2839 2864
2840 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); 2865 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2841 if (err) 2866 if (err)
@@ -2844,11 +2869,30 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2844 update_pkey_index(dev, slave, inbox); 2869 update_pkey_index(dev, slave, inbox);
2845 update_gid(dev, inbox, (u8)slave); 2870 update_gid(dev, inbox, (u8)slave);
2846 adjust_proxy_tun_qkey(dev, vhcr, qpc); 2871 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2847 err = update_vport_qp_param(dev, inbox, slave); 2872 orig_sched_queue = qpc->pri_path.sched_queue;
2873 err = update_vport_qp_param(dev, inbox, slave, qpn);
2848 if (err) 2874 if (err)
2849 return err; 2875 return err;
2850 2876
2851 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2877 err = get_res(dev, slave, qpn, RES_QP, &qp);
2878 if (err)
2879 return err;
2880 if (qp->com.from_state != RES_QP_HW) {
2881 err = -EBUSY;
2882 goto out;
2883 }
2884
2885 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2886out:
2887 /* if no error, save sched queue value passed in by VF. This is
2888 * essentially the QOS value provided by the VF. This will be useful
2889 * if we allow dynamic changes from VST back to VGT
2890 */
2891 if (!err)
2892 qp->sched_queue = orig_sched_queue;
2893
2894 put_res(dev, slave, qpn, RES_QP);
2895 return err;
2852} 2896}
2853 2897
2854int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 2898int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
@@ -3932,3 +3976,112 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3932 rem_slave_xrcdns(dev, slave); 3976 rem_slave_xrcdns(dev, slave);
3933 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 3977 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3934} 3978}
3979
3980void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
3981{
3982 struct mlx4_vf_immed_vlan_work *work =
3983 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
3984 struct mlx4_cmd_mailbox *mailbox;
3985 struct mlx4_update_qp_context *upd_context;
3986 struct mlx4_dev *dev = &work->priv->dev;
3987 struct mlx4_resource_tracker *tracker =
3988 &work->priv->mfunc.master.res_tracker;
3989 struct list_head *qp_list =
3990 &tracker->slave_list[work->slave].res_list[RES_QP];
3991 struct res_qp *qp;
3992 struct res_qp *tmp;
3993 u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
3994 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
3995 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
3996 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
3997 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
3998 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
3999 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4000 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4001
4002 int err;
4003 int port, errors = 0;
4004 u8 vlan_control;
4005
4006 if (mlx4_is_slave(dev)) {
4007 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4008 work->slave);
4009 goto out;
4010 }
4011
4012 mailbox = mlx4_alloc_cmd_mailbox(dev);
4013 if (IS_ERR(mailbox))
4014 goto out;
4015 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4016 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4017 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4018 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4019 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4020 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4021 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4022 else if (!work->vlan_id)
4023 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4024 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4025 else
4026 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4027 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4028 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4029
4030 upd_context = mailbox->buf;
4031 upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4032 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4033 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4034
4035 spin_lock_irq(mlx4_tlock(dev));
4036 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4037 spin_unlock_irq(mlx4_tlock(dev));
4038 if (qp->com.owner == work->slave) {
4039 if (qp->com.from_state != RES_QP_HW ||
4040 !qp->sched_queue || /* no INIT2RTR trans yet */
4041 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4042 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4043 spin_lock_irq(mlx4_tlock(dev));
4044 continue;
4045 }
4046 port = (qp->sched_queue >> 6 & 1) + 1;
4047 if (port != work->port) {
4048 spin_lock_irq(mlx4_tlock(dev));
4049 continue;
4050 }
4051 upd_context->qp_context.pri_path.sched_queue =
4052 qp->sched_queue & 0xC7;
4053 upd_context->qp_context.pri_path.sched_queue |=
4054 ((work->qos & 0x7) << 3);
4055
4056 err = mlx4_cmd(dev, mailbox->dma,
4057 qp->local_qpn & 0xffffff,
4058 0, MLX4_CMD_UPDATE_QP,
4059 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4060 if (err) {
4061 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4062 "port %d, qpn %d (%d)\n",
4063 work->slave, port, qp->local_qpn,
4064 err);
4065 errors++;
4066 }
4067 }
4068 spin_lock_irq(mlx4_tlock(dev));
4069 }
4070 spin_unlock_irq(mlx4_tlock(dev));
4071 mlx4_free_cmd_mailbox(dev, mailbox);
4072
4073 if (errors)
4074 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4075 errors, work->slave, work->port);
4076
4077 /* unregister previous vlan_id if needed and we had no errors
4078 * while updating the QPs
4079 */
4080 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4081 NO_INDX != work->orig_vlan_ix)
4082 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4083 work->orig_vlan_ix);
4084out:
4085 kfree(work);
4086 return;
4087}
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index fe42fc00d8d3..d16b11ed2e52 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -22,7 +22,6 @@ if NET_VENDOR_MICREL
22config ARM_KS8695_ETHER 22config ARM_KS8695_ETHER
23 tristate "KS8695 Ethernet support" 23 tristate "KS8695 Ethernet support"
24 depends on ARM && ARCH_KS8695 24 depends on ARM && ARCH_KS8695
25 select NET_CORE
26 select MII 25 select MII
27 ---help--- 26 ---help---
28 If you wish to compile a kernel for the KS8695 and want to 27 If you wish to compile a kernel for the KS8695 and want to
@@ -39,7 +38,6 @@ config KS8842
39config KS8851 38config KS8851
40 tristate "Micrel KS8851 SPI" 39 tristate "Micrel KS8851 SPI"
41 depends on SPI 40 depends on SPI
42 select NET_CORE
43 select MII 41 select MII
44 select CRC32 42 select CRC32
45 select EEPROM_93CX6 43 select EEPROM_93CX6
@@ -49,7 +47,6 @@ config KS8851
49config KS8851_MLL 47config KS8851_MLL
50 tristate "Micrel KS8851 MLL" 48 tristate "Micrel KS8851 MLL"
51 depends on HAS_IOMEM 49 depends on HAS_IOMEM
52 select NET_CORE
53 select MII 50 select MII
54 ---help--- 51 ---help---
55 This platform driver is for Micrel KS8851 Address/data bus 52 This platform driver is for Micrel KS8851 Address/data bus
@@ -58,7 +55,6 @@ config KS8851_MLL
58config KSZ884X_PCI 55config KSZ884X_PCI
59 tristate "Micrel KSZ8841/2 PCI" 56 tristate "Micrel KSZ8841/2 PCI"
60 depends on PCI 57 depends on PCI
61 select NET_CORE
62 select MII 58 select MII
63 select CRC32 59 select CRC32
64 ---help--- 60 ---help---
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index b6c60fdef4ff..106eb972f2ac 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1600,7 +1600,6 @@ ks8695_drv_remove(struct platform_device *pdev)
1600 struct net_device *ndev = platform_get_drvdata(pdev); 1600 struct net_device *ndev = platform_get_drvdata(pdev);
1601 struct ks8695_priv *ksp = netdev_priv(ndev); 1601 struct ks8695_priv *ksp = netdev_priv(ndev);
1602 1602
1603 platform_set_drvdata(pdev, NULL);
1604 netif_napi_del(&ksp->napi); 1603 netif_napi_del(&ksp->napi);
1605 1604
1606 unregister_netdev(ndev); 1605 unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index fbcb9e74d7fc..e393d998be89 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1250,7 +1250,6 @@ static int ks8842_remove(struct platform_device *pdev)
1250 iounmap(adapter->hw_addr); 1250 iounmap(adapter->hw_addr);
1251 free_netdev(netdev); 1251 free_netdev(netdev);
1252 release_mem_region(iomem->start, resource_size(iomem)); 1252 release_mem_region(iomem->start, resource_size(iomem));
1253 platform_set_drvdata(pdev, NULL);
1254 return 0; 1253 return 0;
1255} 1254}
1256 1255
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ddaf138ce0d4..ac20098b542a 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -35,6 +35,9 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/ks8851_mll.h> 37#include <linux/ks8851_mll.h>
38#include <linux/of.h>
39#include <linux/of_device.h>
40#include <linux/of_net.h>
38 41
39#define DRV_NAME "ks8851_mll" 42#define DRV_NAME "ks8851_mll"
40 43
@@ -1524,6 +1527,13 @@ static int ks_hw_init(struct ks_net *ks)
1524 return true; 1527 return true;
1525} 1528}
1526 1529
1530#if defined(CONFIG_OF)
1531static const struct of_device_id ks8851_ml_dt_ids[] = {
1532 { .compatible = "micrel,ks8851-mll" },
1533 { /* sentinel */ }
1534};
1535MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
1536#endif
1527 1537
1528static int ks8851_probe(struct platform_device *pdev) 1538static int ks8851_probe(struct platform_device *pdev)
1529{ 1539{
@@ -1532,7 +1542,7 @@ static int ks8851_probe(struct platform_device *pdev)
1532 struct net_device *netdev; 1542 struct net_device *netdev;
1533 struct ks_net *ks; 1543 struct ks_net *ks;
1534 u16 id, data; 1544 u16 id, data;
1535 struct ks8851_mll_platform_data *pdata; 1545 const char *mac;
1536 1546
1537 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1547 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1538 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1548 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1619,13 +1629,21 @@ static int ks8851_probe(struct platform_device *pdev)
1619 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); 1629 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1620 1630
1621 /* overwriting the default MAC address */ 1631 /* overwriting the default MAC address */
1622 pdata = pdev->dev.platform_data; 1632 if (pdev->dev.of_node) {
1623 if (!pdata) { 1633 mac = of_get_mac_address(pdev->dev.of_node);
1624 netdev_err(netdev, "No platform data\n"); 1634 if (mac)
1625 err = -ENODEV; 1635 memcpy(ks->mac_addr, mac, ETH_ALEN);
1626 goto err_pdata; 1636 } else {
1637 struct ks8851_mll_platform_data *pdata;
1638
1639 pdata = pdev->dev.platform_data;
1640 if (!pdata) {
1641 netdev_err(netdev, "No platform data\n");
1642 err = -ENODEV;
1643 goto err_pdata;
1644 }
1645 memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
1627 } 1646 }
1628 memcpy(ks->mac_addr, pdata->mac_addr, 6);
1629 if (!is_valid_ether_addr(ks->mac_addr)) { 1647 if (!is_valid_ether_addr(ks->mac_addr)) {
1630 /* Use random MAC address if none passed */ 1648 /* Use random MAC address if none passed */
1631 eth_random_addr(ks->mac_addr); 1649 eth_random_addr(ks->mac_addr);
@@ -1671,7 +1689,6 @@ static int ks8851_remove(struct platform_device *pdev)
1671 iounmap(ks->hw_addr); 1689 iounmap(ks->hw_addr);
1672 free_netdev(netdev); 1690 free_netdev(netdev);
1673 release_mem_region(iomem->start, resource_size(iomem)); 1691 release_mem_region(iomem->start, resource_size(iomem));
1674 platform_set_drvdata(pdev, NULL);
1675 return 0; 1692 return 0;
1676 1693
1677} 1694}
@@ -1680,6 +1697,7 @@ static struct platform_driver ks8851_platform_driver = {
1680 .driver = { 1697 .driver = {
1681 .name = DRV_NAME, 1698 .name = DRV_NAME,
1682 .owner = THIS_MODULE, 1699 .owner = THIS_MODULE,
1700 .of_match_table = of_match_ptr(ks8851_ml_dt_ids),
1683 }, 1701 },
1684 .probe = ks8851_probe, 1702 .probe = ks8851_probe,
1685 .remove = ks8851_remove, 1703 .remove = ks8851_remove,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 7be9788ed0f6..967bae8b85c5 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3299,7 +3299,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
3299 if (mgp == NULL) 3299 if (mgp == NULL)
3300 return -EINVAL; 3300 return -EINVAL;
3301 netdev = mgp->dev; 3301 netdev = mgp->dev;
3302 pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */ 3302 pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */
3303 msleep(5); /* give card time to respond */ 3303 msleep(5); /* give card time to respond */
3304 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); 3304 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3305 if (vendor == 0xffff) { 3305 if (vendor == 0xffff) {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index cbfaed5f2f8d..5a20eaf903dd 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3444,7 +3444,7 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
3444 } 3444 }
3445 3445
3446 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3446 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3447 "%s : checksuming enabled", __func__); 3447 "%s : checksumming enabled", __func__);
3448 3448
3449 if (high_dma) { 3449 if (high_dma) {
3450 ndev->features |= NETIF_F_HIGHDMA; 3450 ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index cb9e63831500..dc2c6f561e9a 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -422,7 +422,6 @@ exit_free_pfifo:
422exit_free_xc: 422exit_free_xc:
423 free_xc(priv->xc); 423 free_xc(priv->xc);
424exit_free_netdev: 424exit_free_netdev:
425 platform_set_drvdata(pdev, NULL);
426 free_netdev(ndev); 425 free_netdev(ndev);
427exit: 426exit:
428 return ret; 427 return ret;
@@ -430,11 +429,9 @@ exit:
430 429
431static int netx_eth_drv_remove(struct platform_device *pdev) 430static int netx_eth_drv_remove(struct platform_device *pdev)
432{ 431{
433 struct net_device *ndev = dev_get_drvdata(&pdev->dev); 432 struct net_device *ndev = platform_get_drvdata(pdev);
434 struct netx_eth_priv *priv = netdev_priv(ndev); 433 struct netx_eth_priv *priv = netdev_priv(ndev);
435 434
436 platform_set_drvdata(pdev, NULL);
437
438 unregister_netdev(ndev); 435 unregister_netdev(ndev);
439 xc_stop(priv->xc); 436 xc_stop(priv->xc);
440 free_xc(priv->xc); 437 free_xc(priv->xc);
diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig
index 334c17183095..01182b559473 100644
--- a/drivers/net/ethernet/nuvoton/Kconfig
+++ b/drivers/net/ethernet/nuvoton/Kconfig
@@ -22,7 +22,6 @@ config W90P910_ETH
22 tristate "Nuvoton w90p910 Ethernet support" 22 tristate "Nuvoton w90p910 Ethernet support"
23 depends on ARM && ARCH_W90X900 23 depends on ARM && ARCH_W90X900
24 select PHYLIB 24 select PHYLIB
25 select NET_CORE
26 select MII 25 select MII
27 ---help--- 26 ---help---
28 Say Y here if you want to use built-in Ethernet ports 27 Say Y here if you want to use built-in Ethernet ports
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 3df8287b7452..e88bdb1aa669 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -1051,7 +1051,6 @@ failed_put_clk:
1051 clk_put(ether->clk); 1051 clk_put(ether->clk);
1052failed_free_rxirq: 1052failed_free_rxirq:
1053 free_irq(ether->rxirq, pdev); 1053 free_irq(ether->rxirq, pdev);
1054 platform_set_drvdata(pdev, NULL);
1055failed_free_txirq: 1054failed_free_txirq:
1056 free_irq(ether->txirq, pdev); 1055 free_irq(ether->txirq, pdev);
1057failed_free_io: 1056failed_free_io:
@@ -1080,7 +1079,6 @@ static int w90p910_ether_remove(struct platform_device *pdev)
1080 free_irq(ether->rxirq, dev); 1079 free_irq(ether->rxirq, dev);
1081 1080
1082 del_timer_sync(&ether->check_timer); 1081 del_timer_sync(&ether->check_timer);
1083 platform_set_drvdata(pdev, NULL);
1084 1082
1085 free_netdev(dev); 1083 free_netdev(dev);
1086 return 0; 1084 return 0;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index b003fe53c8e2..098b96dad66f 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -6340,7 +6340,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
6340 {0,}, 6340 {0,},
6341}; 6341};
6342 6342
6343static struct pci_driver driver = { 6343static struct pci_driver forcedeth_pci_driver = {
6344 .name = DRV_NAME, 6344 .name = DRV_NAME,
6345 .id_table = pci_tbl, 6345 .id_table = pci_tbl,
6346 .probe = nv_probe, 6346 .probe = nv_probe,
@@ -6349,16 +6349,6 @@ static struct pci_driver driver = {
6349 .driver.pm = NV_PM_OPS, 6349 .driver.pm = NV_PM_OPS,
6350}; 6350};
6351 6351
6352static int __init init_nic(void)
6353{
6354 return pci_register_driver(&driver);
6355}
6356
6357static void __exit exit_nic(void)
6358{
6359 pci_unregister_driver(&driver);
6360}
6361
6362module_param(max_interrupt_work, int, 0); 6352module_param(max_interrupt_work, int, 0);
6363MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6353MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6364module_param(optimization_mode, int, 0); 6354module_param(optimization_mode, int, 0);
@@ -6379,11 +6369,8 @@ module_param(debug_tx_timeout, bool, 0);
6379MODULE_PARM_DESC(debug_tx_timeout, 6369MODULE_PARM_DESC(debug_tx_timeout,
6380 "Dump tx related registers and ring when tx_timeout happens"); 6370 "Dump tx related registers and ring when tx_timeout happens");
6381 6371
6372module_pci_driver(forcedeth_pci_driver);
6382MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6373MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6383MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6374MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6384MODULE_LICENSE("GPL"); 6375MODULE_LICENSE("GPL");
6385
6386MODULE_DEVICE_TABLE(pci, pci_tbl); 6376MODULE_DEVICE_TABLE(pci, pci_tbl);
6387
6388module_init(init_nic);
6389module_exit(exit_nic);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 55a5548d6add..a061b93efe66 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1483,7 +1483,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1483 return 0; 1483 return 0;
1484 1484
1485err_out_unregister_netdev: 1485err_out_unregister_netdev:
1486 platform_set_drvdata(pdev, NULL);
1487 unregister_netdev(ndev); 1486 unregister_netdev(ndev);
1488err_out_dma_unmap: 1487err_out_dma_unmap:
1489 if (!use_iram_for_net(&pldat->pdev->dev) || 1488 if (!use_iram_for_net(&pldat->pdev->dev) ||
@@ -1511,7 +1510,6 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
1511 struct netdata_local *pldat = netdev_priv(ndev); 1510 struct netdata_local *pldat = netdev_priv(ndev);
1512 1511
1513 unregister_netdev(ndev); 1512 unregister_netdev(ndev);
1514 platform_set_drvdata(pdev, NULL);
1515 1513
1516 if (!use_iram_for_net(&pldat->pdev->dev) || 1514 if (!use_iram_for_net(&pldat->pdev->dev) ||
1517 pldat->dma_buff_size > lpc32xx_return_iram_size()) 1515 pldat->dma_buff_size > lpc32xx_return_iram_size())
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 91a8a5d28037..622aa75904c4 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1448,7 +1448,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
1448 1448
1449 SET_NETDEV_DEV(netdev, &pdev->dev); 1449 SET_NETDEV_DEV(netdev, &pdev->dev);
1450 1450
1451 dev_set_drvdata(&pdev->dev, netdev); 1451 platform_set_drvdata(pdev, netdev);
1452 p = netdev_priv(netdev); 1452 p = netdev_priv(netdev);
1453 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, 1453 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1454 OCTEON_MGMT_NAPI_WEIGHT); 1454 OCTEON_MGMT_NAPI_WEIGHT);
@@ -1570,7 +1570,7 @@ err:
1570 1570
1571static int octeon_mgmt_remove(struct platform_device *pdev) 1571static int octeon_mgmt_remove(struct platform_device *pdev)
1572{ 1572{
1573 struct net_device *netdev = dev_get_drvdata(&pdev->dev); 1573 struct net_device *netdev = platform_get_drvdata(pdev);
1574 1574
1575 unregister_netdev(netdev); 1575 unregister_netdev(netdev);
1576 free_netdev(netdev); 1576 free_netdev(netdev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 34d05bf72b2e..cb22341a14a8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -5,7 +5,6 @@
5config PCH_GBE 5config PCH_GBE
6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
7 depends on PCI 7 depends on PCI
8 select NET_CORE
9 select MII 8 select MII
10 select PTP_1588_CLOCK_PCH 9 select PTP_1588_CLOCK_PCH
11 ---help--- 10 ---help---
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 7fb7e178c74e..7779036690cc 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -633,6 +633,8 @@ struct pch_gbe_adapter {
633 struct pci_dev *ptp_pdev; 633 struct pci_dev *ptp_pdev;
634}; 634};
635 635
636#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw)
637
636extern const char pch_driver_version[]; 638extern const char pch_driver_version[];
637 639
638/* pch_gbe_main.c */ 640/* pch_gbe_main.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index 5ae03e815ee9..ff3ad70935a6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -19,6 +19,7 @@
19 */ 19 */
20#include "pch_gbe.h" 20#include "pch_gbe.h"
21#include "pch_gbe_phy.h" 21#include "pch_gbe_phy.h"
22#include "pch_gbe_api.h"
22 23
23/* bus type values */ 24/* bus type values */
24#define pch_gbe_bus_type_unknown 0 25#define pch_gbe_bus_type_unknown 0
@@ -70,7 +71,9 @@ static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
70 71
71 ret_val = pch_gbe_phy_get_id(hw); 72 ret_val = pch_gbe_phy_get_id(hw);
72 if (ret_val) { 73 if (ret_val) {
73 pr_err("pch_gbe_phy_get_id error\n"); 74 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
75
76 netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
74 return ret_val; 77 return ret_val;
75 } 78 }
76 pch_gbe_phy_init_setting(hw); 79 pch_gbe_phy_init_setting(hw);
@@ -112,10 +115,12 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
112 * 0: Successfully 115 * 0: Successfully
113 * ENOSYS: Function is not registered 116 * ENOSYS: Function is not registered
114 */ 117 */
115inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) 118s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
116{ 119{
117 if (!hw->reg) { 120 if (!hw->reg) {
118 pr_err("ERROR: Registers not mapped\n"); 121 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
122
123 netdev_err(adapter->netdev, "ERROR: Registers not mapped\n");
119 return -ENOSYS; 124 return -ENOSYS;
120 } 125 }
121 pch_gbe_plat_init_function_pointers(hw); 126 pch_gbe_plat_init_function_pointers(hw);
@@ -126,12 +131,15 @@ inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
126 * pch_gbe_hal_get_bus_info - Obtain bus information for adapter 131 * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
127 * @hw: Pointer to the HW structure 132 * @hw: Pointer to the HW structure
128 */ 133 */
129inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) 134void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
130{ 135{
131 if (!hw->func->get_bus_info) 136 if (!hw->func->get_bus_info) {
132 pr_err("ERROR: configuration\n"); 137 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
133 else 138
134 hw->func->get_bus_info(hw); 139 netdev_err(adapter->netdev, "ERROR: configuration\n");
140 return;
141 }
142 hw->func->get_bus_info(hw);
135} 143}
136 144
137/** 145/**
@@ -141,10 +149,12 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
141 * 0: Successfully 149 * 0: Successfully
142 * ENOSYS: Function is not registered 150 * ENOSYS: Function is not registered
143 */ 151 */
144inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) 152s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
145{ 153{
146 if (!hw->func->init_hw) { 154 if (!hw->func->init_hw) {
147 pr_err("ERROR: configuration\n"); 155 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
156
157 netdev_err(adapter->netdev, "ERROR: configuration\n");
148 return -ENOSYS; 158 return -ENOSYS;
149 } 159 }
150 return hw->func->init_hw(hw); 160 return hw->func->init_hw(hw);
@@ -159,7 +169,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
159 * 0: Successfully 169 * 0: Successfully
160 * Negative value: Failed 170 * Negative value: Failed
161 */ 171 */
162inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, 172s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
163 u16 *data) 173 u16 *data)
164{ 174{
165 if (!hw->func->read_phy_reg) 175 if (!hw->func->read_phy_reg)
@@ -176,7 +186,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
176 * 0: Successfully 186 * 0: Successfully
177 * Negative value: Failed 187 * Negative value: Failed
178 */ 188 */
179inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, 189s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
180 u16 data) 190 u16 data)
181{ 191{
182 if (!hw->func->write_phy_reg) 192 if (!hw->func->write_phy_reg)
@@ -188,24 +198,30 @@ inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
188 * pch_gbe_hal_phy_hw_reset - Hard PHY reset 198 * pch_gbe_hal_phy_hw_reset - Hard PHY reset
189 * @hw: Pointer to the HW structure 199 * @hw: Pointer to the HW structure
190 */ 200 */
191inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) 201void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
192{ 202{
193 if (!hw->func->reset_phy) 203 if (!hw->func->reset_phy) {
194 pr_err("ERROR: configuration\n"); 204 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
195 else 205
196 hw->func->reset_phy(hw); 206 netdev_err(adapter->netdev, "ERROR: configuration\n");
207 return;
208 }
209 hw->func->reset_phy(hw);
197} 210}
198 211
199/** 212/**
200 * pch_gbe_hal_phy_sw_reset - Soft PHY reset 213 * pch_gbe_hal_phy_sw_reset - Soft PHY reset
201 * @hw: Pointer to the HW structure 214 * @hw: Pointer to the HW structure
202 */ 215 */
203inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) 216void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
204{ 217{
205 if (!hw->func->sw_reset_phy) 218 if (!hw->func->sw_reset_phy) {
206 pr_err("ERROR: configuration\n"); 219 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
207 else 220
208 hw->func->sw_reset_phy(hw); 221 netdev_err(adapter->netdev, "ERROR: configuration\n");
222 return;
223 }
224 hw->func->sw_reset_phy(hw);
209} 225}
210 226
211/** 227/**
@@ -215,10 +231,12 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
215 * 0: Successfully 231 * 0: Successfully
216 * ENOSYS: Function is not registered 232 * ENOSYS: Function is not registered
217 */ 233 */
218inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) 234s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
219{ 235{
220 if (!hw->func->read_mac_addr) { 236 if (!hw->func->read_mac_addr) {
221 pr_err("ERROR: configuration\n"); 237 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
238
239 netdev_err(adapter->netdev, "ERROR: configuration\n");
222 return -ENOSYS; 240 return -ENOSYS;
223 } 241 }
224 return hw->func->read_mac_addr(hw); 242 return hw->func->read_mac_addr(hw);
@@ -228,7 +246,7 @@ inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
228 * pch_gbe_hal_power_up_phy - Power up PHY 246 * pch_gbe_hal_power_up_phy - Power up PHY
229 * @hw: Pointer to the HW structure 247 * @hw: Pointer to the HW structure
230 */ 248 */
231inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) 249void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
232{ 250{
233 if (hw->func->power_up_phy) 251 if (hw->func->power_up_phy)
234 hw->func->power_up_phy(hw); 252 hw->func->power_up_phy(hw);
@@ -238,7 +256,7 @@ inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
238 * pch_gbe_hal_power_down_phy - Power down PHY 256 * pch_gbe_hal_power_down_phy - Power down PHY
239 * @hw: Pointer to the HW structure 257 * @hw: Pointer to the HW structure
240 */ 258 */
241inline void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) 259void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
242{ 260{
243 if (hw->func->power_down_phy) 261 if (hw->func->power_down_phy)
244 hw->func->power_down_phy(hw); 262 hw->func->power_down_phy(hw);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 24b787be6062..1129db0cdf82 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -122,7 +122,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
122 } 122 }
123 ret = mii_ethtool_sset(&adapter->mii, ecmd); 123 ret = mii_ethtool_sset(&adapter->mii, ecmd);
124 if (ret) { 124 if (ret) {
125 pr_err("Error: mii_ethtool_sset\n"); 125 netdev_err(netdev, "Error: mii_ethtool_sset\n");
126 return ret; 126 return ret;
127 } 127 }
128 hw->mac.link_speed = speed; 128 hw->mac.link_speed = speed;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 0c1c65a9ce5e..ab1039a95bf9 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -287,7 +287,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
287 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 287 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
288} 288}
289 289
290inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) 290static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
291{ 291{
292 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); 292 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
293} 293}
@@ -300,6 +300,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
300 */ 300 */
301s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) 301s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
302{ 302{
303 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
303 u32 adr1a, adr1b; 304 u32 adr1a, adr1b;
304 305
305 adr1a = ioread32(&hw->reg->mac_adr[0].high); 306 adr1a = ioread32(&hw->reg->mac_adr[0].high);
@@ -312,7 +313,7 @@ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
312 hw->mac.addr[4] = (u8)(adr1b & 0xFF); 313 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
313 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF); 314 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
314 315
315 pr_debug("hw->mac.addr : %pM\n", hw->mac.addr); 316 netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
316 return 0; 317 return 0;
317} 318}
318 319
@@ -324,6 +325,7 @@ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
324static void pch_gbe_wait_clr_bit(void *reg, u32 bit) 325static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
325{ 326{
326 u32 tmp; 327 u32 tmp;
328
327 /* wait busy */ 329 /* wait busy */
328 tmp = 1000; 330 tmp = 1000;
329 while ((ioread32(reg) & bit) && --tmp) 331 while ((ioread32(reg) & bit) && --tmp)
@@ -340,9 +342,10 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
340 */ 342 */
341static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index) 343static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
342{ 344{
345 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
343 u32 mar_low, mar_high, adrmask; 346 u32 mar_low, mar_high, adrmask;
344 347
345 pr_debug("index : 0x%x\n", index); 348 netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
346 349
347 /* 350 /*
348 * HW expects these in little endian so we reverse the byte order 351 * HW expects these in little endian so we reverse the byte order
@@ -468,10 +471,11 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
468 */ 471 */
469s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw) 472s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
470{ 473{
474 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
471 struct pch_gbe_mac_info *mac = &hw->mac; 475 struct pch_gbe_mac_info *mac = &hw->mac;
472 u32 rx_fctrl; 476 u32 rx_fctrl;
473 477
474 pr_debug("mac->fc = %u\n", mac->fc); 478 netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
475 479
476 rx_fctrl = ioread32(&hw->reg->RX_FCTRL); 480 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
477 481
@@ -493,14 +497,16 @@ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
493 mac->tx_fc_enable = true; 497 mac->tx_fc_enable = true;
494 break; 498 break;
495 default: 499 default:
496 pr_err("Flow control param set incorrectly\n"); 500 netdev_err(adapter->netdev,
501 "Flow control param set incorrectly\n");
497 return -EINVAL; 502 return -EINVAL;
498 } 503 }
499 if (mac->link_duplex == DUPLEX_HALF) 504 if (mac->link_duplex == DUPLEX_HALF)
500 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN; 505 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
501 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL); 506 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
502 pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n", 507 netdev_dbg(adapter->netdev,
503 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable); 508 "RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
509 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
504 return 0; 510 return 0;
505} 511}
506 512
@@ -511,10 +517,11 @@ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
511 */ 517 */
512static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt) 518static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
513{ 519{
520 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
514 u32 addr_mask; 521 u32 addr_mask;
515 522
516 pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n", 523 netdev_dbg(adapter->netdev, "wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
517 wu_evt, ioread32(&hw->reg->ADDR_MASK)); 524 wu_evt, ioread32(&hw->reg->ADDR_MASK));
518 525
519 if (wu_evt) { 526 if (wu_evt) {
520 /* Set Wake-On-Lan address mask */ 527 /* Set Wake-On-Lan address mask */
@@ -546,6 +553,7 @@ static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
546u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, 553u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
547 u16 data) 554 u16 data)
548{ 555{
556 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
549 u32 data_out = 0; 557 u32 data_out = 0;
550 unsigned int i; 558 unsigned int i;
551 unsigned long flags; 559 unsigned long flags;
@@ -558,7 +566,7 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
558 udelay(20); 566 udelay(20);
559 } 567 }
560 if (i == 0) { 568 if (i == 0) {
561 pr_err("pch-gbe.miim won't go Ready\n"); 569 netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
562 spin_unlock_irqrestore(&hw->miim_lock, flags); 570 spin_unlock_irqrestore(&hw->miim_lock, flags);
563 return 0; /* No way to indicate timeout error */ 571 return 0; /* No way to indicate timeout error */
564 } 572 }
@@ -573,9 +581,9 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
573 } 581 }
574 spin_unlock_irqrestore(&hw->miim_lock, flags); 582 spin_unlock_irqrestore(&hw->miim_lock, flags);
575 583
576 pr_debug("PHY %s: reg=%d, data=0x%04X\n", 584 netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
577 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg, 585 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
578 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data); 586 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
579 return (u16) data_out; 587 return (u16) data_out;
580} 588}
581 589
@@ -585,6 +593,7 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
585 */ 593 */
586static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) 594static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
587{ 595{
596 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
588 unsigned long tmp2, tmp3; 597 unsigned long tmp2, tmp3;
589 598
590 /* Set Pause packet */ 599 /* Set Pause packet */
@@ -606,10 +615,13 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
606 /* Transmit Pause Packet */ 615 /* Transmit Pause Packet */
607 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ); 616 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
608 617
609 pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 618 netdev_dbg(adapter->netdev,
610 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2), 619 "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
611 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4), 620 ioread32(&hw->reg->PAUSE_PKT1),
612 ioread32(&hw->reg->PAUSE_PKT5)); 621 ioread32(&hw->reg->PAUSE_PKT2),
622 ioread32(&hw->reg->PAUSE_PKT3),
623 ioread32(&hw->reg->PAUSE_PKT4),
624 ioread32(&hw->reg->PAUSE_PKT5));
613 625
614 return; 626 return;
615} 627}
@@ -624,15 +636,15 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
624 */ 636 */
625static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) 637static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
626{ 638{
627 adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL); 639 adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
640 sizeof(*adapter->tx_ring), GFP_KERNEL);
628 if (!adapter->tx_ring) 641 if (!adapter->tx_ring)
629 return -ENOMEM; 642 return -ENOMEM;
630 643
631 adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL); 644 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
632 if (!adapter->rx_ring) { 645 sizeof(*adapter->rx_ring), GFP_KERNEL);
633 kfree(adapter->tx_ring); 646 if (!adapter->rx_ring)
634 return -ENOMEM; 647 return -ENOMEM;
635 }
636 return 0; 648 return 0;
637} 649}
638 650
@@ -669,7 +681,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
669 break; 681 break;
670 } 682 }
671 adapter->hw.phy.addr = adapter->mii.phy_id; 683 adapter->hw.phy.addr = adapter->mii.phy_id;
672 pr_debug("phy_addr = %d\n", adapter->mii.phy_id); 684 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
673 if (addr == 32) 685 if (addr == 32)
674 return -EAGAIN; 686 return -EAGAIN;
675 /* Selected the phy and isolate the rest */ 687 /* Selected the phy and isolate the rest */
@@ -758,13 +770,15 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
758 */ 770 */
759void pch_gbe_reset(struct pch_gbe_adapter *adapter) 771void pch_gbe_reset(struct pch_gbe_adapter *adapter)
760{ 772{
773 struct net_device *netdev = adapter->netdev;
774
761 pch_gbe_mac_reset_hw(&adapter->hw); 775 pch_gbe_mac_reset_hw(&adapter->hw);
762 /* reprogram multicast address register after reset */ 776 /* reprogram multicast address register after reset */
763 pch_gbe_set_multi(adapter->netdev); 777 pch_gbe_set_multi(netdev);
764 /* Setup the receive address. */ 778 /* Setup the receive address. */
765 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); 779 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
766 if (pch_gbe_hal_init_hw(&adapter->hw)) 780 if (pch_gbe_hal_init_hw(&adapter->hw))
767 pr_err("Hardware Error\n"); 781 netdev_err(netdev, "Hardware Error\n");
768} 782}
769 783
770/** 784/**
@@ -778,7 +792,7 @@ static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
778 free_irq(adapter->pdev->irq, netdev); 792 free_irq(adapter->pdev->irq, netdev);
779 if (adapter->have_msi) { 793 if (adapter->have_msi) {
780 pci_disable_msi(adapter->pdev); 794 pci_disable_msi(adapter->pdev);
781 pr_debug("call pci_disable_msi\n"); 795 netdev_dbg(netdev, "call pci_disable_msi\n");
782 } 796 }
783} 797}
784 798
@@ -795,7 +809,8 @@ static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
795 ioread32(&hw->reg->INT_ST); 809 ioread32(&hw->reg->INT_ST);
796 synchronize_irq(adapter->pdev->irq); 810 synchronize_irq(adapter->pdev->irq);
797 811
798 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN)); 812 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
813 ioread32(&hw->reg->INT_EN));
799} 814}
800 815
801/** 816/**
@@ -809,7 +824,8 @@ static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
809 if (likely(atomic_dec_and_test(&adapter->irq_sem))) 824 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
810 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN); 825 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
811 ioread32(&hw->reg->INT_ST); 826 ioread32(&hw->reg->INT_ST);
812 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN)); 827 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
828 ioread32(&hw->reg->INT_EN));
813} 829}
814 830
815 831
@@ -846,9 +862,9 @@ static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
846 struct pch_gbe_hw *hw = &adapter->hw; 862 struct pch_gbe_hw *hw = &adapter->hw;
847 u32 tdba, tdlen, dctrl; 863 u32 tdba, tdlen, dctrl;
848 864
849 pr_debug("dma addr = 0x%08llx size = 0x%08x\n", 865 netdev_dbg(adapter->netdev, "dma addr = 0x%08llx size = 0x%08x\n",
850 (unsigned long long)adapter->tx_ring->dma, 866 (unsigned long long)adapter->tx_ring->dma,
851 adapter->tx_ring->size); 867 adapter->tx_ring->size);
852 868
853 /* Setup the HW Tx Head and Tail descriptor pointers */ 869 /* Setup the HW Tx Head and Tail descriptor pointers */
854 tdba = adapter->tx_ring->dma; 870 tdba = adapter->tx_ring->dma;
@@ -894,9 +910,9 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
894 struct pch_gbe_hw *hw = &adapter->hw; 910 struct pch_gbe_hw *hw = &adapter->hw;
895 u32 rdba, rdlen, rxdma; 911 u32 rdba, rdlen, rxdma;
896 912
897 pr_debug("dma adr = 0x%08llx size = 0x%08x\n", 913 netdev_dbg(adapter->netdev, "dma adr = 0x%08llx size = 0x%08x\n",
898 (unsigned long long)adapter->rx_ring->dma, 914 (unsigned long long)adapter->rx_ring->dma,
899 adapter->rx_ring->size); 915 adapter->rx_ring->size);
900 916
901 pch_gbe_mac_force_mac_fc(hw); 917 pch_gbe_mac_force_mac_fc(hw);
902 918
@@ -907,9 +923,10 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
907 rxdma &= ~PCH_GBE_RX_DMA_EN; 923 rxdma &= ~PCH_GBE_RX_DMA_EN;
908 iowrite32(rxdma, &hw->reg->DMA_CTRL); 924 iowrite32(rxdma, &hw->reg->DMA_CTRL);
909 925
910 pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n", 926 netdev_dbg(adapter->netdev,
911 ioread32(&hw->reg->MAC_RX_EN), 927 "MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
912 ioread32(&hw->reg->DMA_CTRL)); 928 ioread32(&hw->reg->MAC_RX_EN),
929 ioread32(&hw->reg->DMA_CTRL));
913 930
914 /* Setup the HW Rx Head and Tail Descriptor Pointers and 931 /* Setup the HW Rx Head and Tail Descriptor Pointers and
915 * the Base and Length of the Rx Descriptor Ring */ 932 * the Base and Length of the Rx Descriptor Ring */
@@ -977,7 +994,8 @@ static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
977 buffer_info = &tx_ring->buffer_info[i]; 994 buffer_info = &tx_ring->buffer_info[i];
978 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info); 995 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
979 } 996 }
980 pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i); 997 netdev_dbg(adapter->netdev,
998 "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
981 999
982 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count; 1000 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
983 memset(tx_ring->buffer_info, 0, size); 1001 memset(tx_ring->buffer_info, 0, size);
@@ -1009,7 +1027,8 @@ pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1009 buffer_info = &rx_ring->buffer_info[i]; 1027 buffer_info = &rx_ring->buffer_info[i];
1010 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info); 1028 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1011 } 1029 }
1012 pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i); 1030 netdev_dbg(adapter->netdev,
1031 "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1013 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count; 1032 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1014 memset(rx_ring->buffer_info, 0, size); 1033 memset(rx_ring->buffer_info, 0, size);
1015 1034
@@ -1087,7 +1106,7 @@ static void pch_gbe_watchdog(unsigned long data)
1087 struct net_device *netdev = adapter->netdev; 1106 struct net_device *netdev = adapter->netdev;
1088 struct pch_gbe_hw *hw = &adapter->hw; 1107 struct pch_gbe_hw *hw = &adapter->hw;
1089 1108
1090 pr_debug("right now = %ld\n", jiffies); 1109 netdev_dbg(netdev, "right now = %ld\n", jiffies);
1091 1110
1092 pch_gbe_update_stats(adapter); 1111 pch_gbe_update_stats(adapter);
1093 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) { 1112 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
@@ -1095,7 +1114,7 @@ static void pch_gbe_watchdog(unsigned long data)
1095 netdev->tx_queue_len = adapter->tx_queue_len; 1114 netdev->tx_queue_len = adapter->tx_queue_len;
1096 /* mii library handles link maintenance tasks */ 1115 /* mii library handles link maintenance tasks */
1097 if (mii_ethtool_gset(&adapter->mii, &cmd)) { 1116 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1098 pr_err("ethtool get setting Error\n"); 1117 netdev_err(netdev, "ethtool get setting Error\n");
1099 mod_timer(&adapter->watchdog_timer, 1118 mod_timer(&adapter->watchdog_timer,
1100 round_jiffies(jiffies + 1119 round_jiffies(jiffies +
1101 PCH_GBE_WATCHDOG_PERIOD)); 1120 PCH_GBE_WATCHDOG_PERIOD));
@@ -1213,7 +1232,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1213 buffer_info->length, 1232 buffer_info->length,
1214 DMA_TO_DEVICE); 1233 DMA_TO_DEVICE);
1215 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1234 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1216 pr_err("TX DMA map failed\n"); 1235 netdev_err(adapter->netdev, "TX DMA map failed\n");
1217 buffer_info->dma = 0; 1236 buffer_info->dma = 0;
1218 buffer_info->time_stamp = 0; 1237 buffer_info->time_stamp = 0;
1219 tx_ring->next_to_use = ring_num; 1238 tx_ring->next_to_use = ring_num;
@@ -1333,13 +1352,13 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1333 /* When request status is no interruption factor */ 1352 /* When request status is no interruption factor */
1334 if (unlikely(!int_st)) 1353 if (unlikely(!int_st))
1335 return IRQ_NONE; /* Not our interrupt. End processing. */ 1354 return IRQ_NONE; /* Not our interrupt. End processing. */
1336 pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st); 1355 netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
1337 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1356 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1338 adapter->stats.intr_rx_frame_err_count++; 1357 adapter->stats.intr_rx_frame_err_count++;
1339 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1358 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1340 if (!adapter->rx_stop_flag) { 1359 if (!adapter->rx_stop_flag) {
1341 adapter->stats.intr_rx_fifo_err_count++; 1360 adapter->stats.intr_rx_fifo_err_count++;
1342 pr_debug("Rx fifo over run\n"); 1361 netdev_dbg(netdev, "Rx fifo over run\n");
1343 adapter->rx_stop_flag = true; 1362 adapter->rx_stop_flag = true;
1344 int_en = ioread32(&hw->reg->INT_EN); 1363 int_en = ioread32(&hw->reg->INT_EN);
1345 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1364 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
@@ -1359,7 +1378,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1359 /* When Rx descriptor is empty */ 1378 /* When Rx descriptor is empty */
1360 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1379 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1361 adapter->stats.intr_rx_dsc_empty_count++; 1380 adapter->stats.intr_rx_dsc_empty_count++;
1362 pr_debug("Rx descriptor is empty\n"); 1381 netdev_dbg(netdev, "Rx descriptor is empty\n");
1363 int_en = ioread32(&hw->reg->INT_EN); 1382 int_en = ioread32(&hw->reg->INT_EN);
1364 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1383 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1365 if (hw->mac.tx_fc_enable) { 1384 if (hw->mac.tx_fc_enable) {
@@ -1382,8 +1401,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1382 __napi_schedule(&adapter->napi); 1401 __napi_schedule(&adapter->napi);
1383 } 1402 }
1384 } 1403 }
1385 pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n", 1404 netdev_dbg(netdev, "return = 0x%08x INT_EN reg = 0x%08x\n",
1386 IRQ_HANDLED, ioread32(&hw->reg->INT_EN)); 1405 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1387 return IRQ_HANDLED; 1406 return IRQ_HANDLED;
1388} 1407}
1389 1408
@@ -1437,9 +1456,10 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1437 rx_desc->buffer_addr = (buffer_info->dma); 1456 rx_desc->buffer_addr = (buffer_info->dma);
1438 rx_desc->gbec_status = DSC_INIT16; 1457 rx_desc->gbec_status = DSC_INIT16;
1439 1458
1440 pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n", 1459 netdev_dbg(netdev,
1441 i, (unsigned long long)buffer_info->dma, 1460 "i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1442 buffer_info->length); 1461 i, (unsigned long long)buffer_info->dma,
1462 buffer_info->length);
1443 1463
1444 if (unlikely(++i == rx_ring->count)) 1464 if (unlikely(++i == rx_ring->count))
1445 i = 0; 1465 i = 0;
@@ -1531,12 +1551,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1531 bool cleaned = false; 1551 bool cleaned = false;
1532 int unused, thresh; 1552 int unused, thresh;
1533 1553
1534 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); 1554 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1555 tx_ring->next_to_clean);
1535 1556
1536 i = tx_ring->next_to_clean; 1557 i = tx_ring->next_to_clean;
1537 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); 1558 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1538 pr_debug("gbec_status:0x%04x dma_status:0x%04x\n", 1559 netdev_dbg(adapter->netdev, "gbec_status:0x%04x dma_status:0x%04x\n",
1539 tx_desc->gbec_status, tx_desc->dma_status); 1560 tx_desc->gbec_status, tx_desc->dma_status);
1540 1561
1541 unused = PCH_GBE_DESC_UNUSED(tx_ring); 1562 unused = PCH_GBE_DESC_UNUSED(tx_ring);
1542 thresh = tx_ring->count - PCH_GBE_TX_WEIGHT; 1563 thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
@@ -1544,8 +1565,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1544 { /* current marked clean, tx queue filling up, do extra clean */ 1565 { /* current marked clean, tx queue filling up, do extra clean */
1545 int j, k; 1566 int j, k;
1546 if (unused < 8) { /* tx queue nearly full */ 1567 if (unused < 8) { /* tx queue nearly full */
1547 pr_debug("clean_tx: transmit queue warning (%x,%x) unused=%d\n", 1568 netdev_dbg(adapter->netdev,
1548 tx_ring->next_to_clean,tx_ring->next_to_use,unused); 1569 "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1570 tx_ring->next_to_clean, tx_ring->next_to_use,
1571 unused);
1549 } 1572 }
1550 1573
1551 /* current marked clean, scan for more that need cleaning. */ 1574 /* current marked clean, scan for more that need cleaning. */
@@ -1557,49 +1580,56 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1557 if (++k >= tx_ring->count) k = 0; /*increment, wrap*/ 1580 if (++k >= tx_ring->count) k = 0; /*increment, wrap*/
1558 } 1581 }
1559 if (j < PCH_GBE_TX_WEIGHT) { 1582 if (j < PCH_GBE_TX_WEIGHT) {
1560 pr_debug("clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n", 1583 netdev_dbg(adapter->netdev,
1561 unused,j, i,k, tx_ring->next_to_use, tx_desc->gbec_status); 1584 "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1585 unused, j, i, k, tx_ring->next_to_use,
1586 tx_desc->gbec_status);
1562 i = k; /*found one to clean, usu gbec_status==2000.*/ 1587 i = k; /*found one to clean, usu gbec_status==2000.*/
1563 } 1588 }
1564 } 1589 }
1565 1590
1566 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { 1591 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1567 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); 1592 netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
1593 tx_desc->gbec_status);
1568 buffer_info = &tx_ring->buffer_info[i]; 1594 buffer_info = &tx_ring->buffer_info[i];
1569 skb = buffer_info->skb; 1595 skb = buffer_info->skb;
1570 cleaned = true; 1596 cleaned = true;
1571 1597
1572 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) { 1598 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1573 adapter->stats.tx_aborted_errors++; 1599 adapter->stats.tx_aborted_errors++;
1574 pr_err("Transfer Abort Error\n"); 1600 netdev_err(adapter->netdev, "Transfer Abort Error\n");
1575 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER) 1601 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1576 ) { 1602 ) {
1577 adapter->stats.tx_carrier_errors++; 1603 adapter->stats.tx_carrier_errors++;
1578 pr_err("Transfer Carrier Sense Error\n"); 1604 netdev_err(adapter->netdev,
1605 "Transfer Carrier Sense Error\n");
1579 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL) 1606 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1580 ) { 1607 ) {
1581 adapter->stats.tx_aborted_errors++; 1608 adapter->stats.tx_aborted_errors++;
1582 pr_err("Transfer Collision Abort Error\n"); 1609 netdev_err(adapter->netdev,
1610 "Transfer Collision Abort Error\n");
1583 } else if ((tx_desc->gbec_status & 1611 } else if ((tx_desc->gbec_status &
1584 (PCH_GBE_TXD_GMAC_STAT_SNGCOL | 1612 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1585 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) { 1613 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1586 adapter->stats.collisions++; 1614 adapter->stats.collisions++;
1587 adapter->stats.tx_packets++; 1615 adapter->stats.tx_packets++;
1588 adapter->stats.tx_bytes += skb->len; 1616 adapter->stats.tx_bytes += skb->len;
1589 pr_debug("Transfer Collision\n"); 1617 netdev_dbg(adapter->netdev, "Transfer Collision\n");
1590 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT) 1618 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1591 ) { 1619 ) {
1592 adapter->stats.tx_packets++; 1620 adapter->stats.tx_packets++;
1593 adapter->stats.tx_bytes += skb->len; 1621 adapter->stats.tx_bytes += skb->len;
1594 } 1622 }
1595 if (buffer_info->mapped) { 1623 if (buffer_info->mapped) {
1596 pr_debug("unmap buffer_info->dma : %d\n", i); 1624 netdev_dbg(adapter->netdev,
1625 "unmap buffer_info->dma : %d\n", i);
1597 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1626 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1598 buffer_info->length, DMA_TO_DEVICE); 1627 buffer_info->length, DMA_TO_DEVICE);
1599 buffer_info->mapped = false; 1628 buffer_info->mapped = false;
1600 } 1629 }
1601 if (buffer_info->skb) { 1630 if (buffer_info->skb) {
1602 pr_debug("trim buffer_info->skb : %d\n", i); 1631 netdev_dbg(adapter->netdev,
1632 "trim buffer_info->skb : %d\n", i);
1603 skb_trim(buffer_info->skb, 0); 1633 skb_trim(buffer_info->skb, 0);
1604 } 1634 }
1605 tx_desc->gbec_status = DSC_INIT16; 1635 tx_desc->gbec_status = DSC_INIT16;
@@ -1613,8 +1643,9 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1613 break; 1643 break;
1614 } 1644 }
1615 } 1645 }
1616 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", 1646 netdev_dbg(adapter->netdev,
1617 cleaned_count); 1647 "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1648 cleaned_count);
1618 if (cleaned_count > 0) { /*skip this if nothing cleaned*/ 1649 if (cleaned_count > 0) { /*skip this if nothing cleaned*/
1619 /* Recover from running out of Tx resources in xmit_frame */ 1650 /* Recover from running out of Tx resources in xmit_frame */
1620 spin_lock(&tx_ring->tx_lock); 1651 spin_lock(&tx_ring->tx_lock);
@@ -1622,12 +1653,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1622 { 1653 {
1623 netif_wake_queue(adapter->netdev); 1654 netif_wake_queue(adapter->netdev);
1624 adapter->stats.tx_restart_count++; 1655 adapter->stats.tx_restart_count++;
1625 pr_debug("Tx wake queue\n"); 1656 netdev_dbg(adapter->netdev, "Tx wake queue\n");
1626 } 1657 }
1627 1658
1628 tx_ring->next_to_clean = i; 1659 tx_ring->next_to_clean = i;
1629 1660
1630 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); 1661 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1662 tx_ring->next_to_clean);
1631 spin_unlock(&tx_ring->tx_lock); 1663 spin_unlock(&tx_ring->tx_lock);
1632 } 1664 }
1633 return cleaned; 1665 return cleaned;
@@ -1684,22 +1716,22 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1684 buffer_info->length, DMA_FROM_DEVICE); 1716 buffer_info->length, DMA_FROM_DEVICE);
1685 buffer_info->mapped = false; 1717 buffer_info->mapped = false;
1686 1718
1687 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1719 netdev_dbg(netdev,
1688 "TCP:0x%08x] BufInf = 0x%p\n", 1720 "RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x] BufInf = 0x%p\n",
1689 i, dma_status, gbec_status, tcp_ip_status, 1721 i, dma_status, gbec_status, tcp_ip_status,
1690 buffer_info); 1722 buffer_info);
1691 /* Error check */ 1723 /* Error check */
1692 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) { 1724 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1693 adapter->stats.rx_frame_errors++; 1725 adapter->stats.rx_frame_errors++;
1694 pr_err("Receive Not Octal Error\n"); 1726 netdev_err(netdev, "Receive Not Octal Error\n");
1695 } else if (unlikely(gbec_status & 1727 } else if (unlikely(gbec_status &
1696 PCH_GBE_RXD_GMAC_STAT_NBLERR)) { 1728 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1697 adapter->stats.rx_frame_errors++; 1729 adapter->stats.rx_frame_errors++;
1698 pr_err("Receive Nibble Error\n"); 1730 netdev_err(netdev, "Receive Nibble Error\n");
1699 } else if (unlikely(gbec_status & 1731 } else if (unlikely(gbec_status &
1700 PCH_GBE_RXD_GMAC_STAT_CRCERR)) { 1732 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1701 adapter->stats.rx_crc_errors++; 1733 adapter->stats.rx_crc_errors++;
1702 pr_err("Receive CRC Error\n"); 1734 netdev_err(netdev, "Receive CRC Error\n");
1703 } else { 1735 } else {
1704 /* get receive length */ 1736 /* get receive length */
1705 /* length convert[-3], length includes FCS length */ 1737 /* length convert[-3], length includes FCS length */
@@ -1730,8 +1762,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1730 1762
1731 napi_gro_receive(&adapter->napi, skb); 1763 napi_gro_receive(&adapter->napi, skb);
1732 (*work_done)++; 1764 (*work_done)++;
1733 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1765 netdev_dbg(netdev,
1734 skb->ip_summed, length); 1766 "Receive skb->ip_summed: %d length: %d\n",
1767 skb->ip_summed, length);
1735 } 1768 }
1736 /* return some buffers to hardware, one at a time is too slow */ 1769 /* return some buffers to hardware, one at a time is too slow */
1737 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1770 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
@@ -1787,10 +1820,10 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1787 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo); 1820 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1788 tx_desc->gbec_status = DSC_INIT16; 1821 tx_desc->gbec_status = DSC_INIT16;
1789 } 1822 }
1790 pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n" 1823 netdev_dbg(adapter->netdev,
1791 "next_to_clean = 0x%08x next_to_use = 0x%08x\n", 1824 "tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1792 tx_ring->desc, (unsigned long long)tx_ring->dma, 1825 tx_ring->desc, (unsigned long long)tx_ring->dma,
1793 tx_ring->next_to_clean, tx_ring->next_to_use); 1826 tx_ring->next_to_clean, tx_ring->next_to_use);
1794 return 0; 1827 return 0;
1795} 1828}
1796 1829
@@ -1829,10 +1862,10 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1829 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo); 1862 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1830 rx_desc->gbec_status = DSC_INIT16; 1863 rx_desc->gbec_status = DSC_INIT16;
1831 } 1864 }
1832 pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx " 1865 netdev_dbg(adapter->netdev,
1833 "next_to_clean = 0x%08x next_to_use = 0x%08x\n", 1866 "rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1834 rx_ring->desc, (unsigned long long)rx_ring->dma, 1867 rx_ring->desc, (unsigned long long)rx_ring->dma,
1835 rx_ring->next_to_clean, rx_ring->next_to_use); 1868 rx_ring->next_to_clean, rx_ring->next_to_use);
1836 return 0; 1869 return 0;
1837} 1870}
1838 1871
@@ -1886,9 +1919,9 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1886 flags = IRQF_SHARED; 1919 flags = IRQF_SHARED;
1887 adapter->have_msi = false; 1920 adapter->have_msi = false;
1888 err = pci_enable_msi(adapter->pdev); 1921 err = pci_enable_msi(adapter->pdev);
1889 pr_debug("call pci_enable_msi\n"); 1922 netdev_dbg(netdev, "call pci_enable_msi\n");
1890 if (err) { 1923 if (err) {
1891 pr_debug("call pci_enable_msi - Error: %d\n", err); 1924 netdev_dbg(netdev, "call pci_enable_msi - Error: %d\n", err);
1892 } else { 1925 } else {
1893 flags = 0; 1926 flags = 0;
1894 adapter->have_msi = true; 1927 adapter->have_msi = true;
@@ -1896,9 +1929,11 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1896 err = request_irq(adapter->pdev->irq, &pch_gbe_intr, 1929 err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1897 flags, netdev->name, netdev); 1930 flags, netdev->name, netdev);
1898 if (err) 1931 if (err)
1899 pr_err("Unable to allocate interrupt Error: %d\n", err); 1932 netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
1900 pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n", 1933 err);
1901 adapter->have_msi, flags, err); 1934 netdev_dbg(netdev,
1935 "adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1936 adapter->have_msi, flags, err);
1902 return err; 1937 return err;
1903} 1938}
1904 1939
@@ -1919,7 +1954,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1919 1954
1920 /* Ensure we have a valid MAC */ 1955 /* Ensure we have a valid MAC */
1921 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1956 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1922 pr_err("Error: Invalid MAC address\n"); 1957 netdev_err(netdev, "Error: Invalid MAC address\n");
1923 goto out; 1958 goto out;
1924 } 1959 }
1925 1960
@@ -1933,12 +1968,14 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1933 1968
1934 err = pch_gbe_request_irq(adapter); 1969 err = pch_gbe_request_irq(adapter);
1935 if (err) { 1970 if (err) {
1936 pr_err("Error: can't bring device up - irq request failed\n"); 1971 netdev_err(netdev,
1972 "Error: can't bring device up - irq request failed\n");
1937 goto out; 1973 goto out;
1938 } 1974 }
1939 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); 1975 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1940 if (err) { 1976 if (err) {
1941 pr_err("Error: can't bring device up - alloc rx buffers pool failed\n"); 1977 netdev_err(netdev,
1978 "Error: can't bring device up - alloc rx buffers pool failed\n");
1942 goto freeirq; 1979 goto freeirq;
1943 } 1980 }
1944 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1981 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
@@ -2015,11 +2052,11 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2015 2052
2016 /* Initialize the hardware-specific values */ 2053 /* Initialize the hardware-specific values */
2017 if (pch_gbe_hal_setup_init_funcs(hw)) { 2054 if (pch_gbe_hal_setup_init_funcs(hw)) {
2018 pr_err("Hardware Initialization Failure\n"); 2055 netdev_err(netdev, "Hardware Initialization Failure\n");
2019 return -EIO; 2056 return -EIO;
2020 } 2057 }
2021 if (pch_gbe_alloc_queues(adapter)) { 2058 if (pch_gbe_alloc_queues(adapter)) {
2022 pr_err("Unable to allocate memory for queues\n"); 2059 netdev_err(netdev, "Unable to allocate memory for queues\n");
2023 return -ENOMEM; 2060 return -ENOMEM;
2024 } 2061 }
2025 spin_lock_init(&adapter->hw.miim_lock); 2062 spin_lock_init(&adapter->hw.miim_lock);
@@ -2030,9 +2067,10 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2030 2067
2031 pch_gbe_init_stats(adapter); 2068 pch_gbe_init_stats(adapter);
2032 2069
2033 pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n", 2070 netdev_dbg(netdev,
2034 (u32) adapter->rx_buffer_len, 2071 "rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
2035 hw->mac.min_frame_size, hw->mac.max_frame_size); 2072 (u32) adapter->rx_buffer_len,
2073 hw->mac.min_frame_size, hw->mac.max_frame_size);
2036 return 0; 2074 return 0;
2037} 2075}
2038 2076
@@ -2061,7 +2099,7 @@ static int pch_gbe_open(struct net_device *netdev)
2061 err = pch_gbe_up(adapter); 2099 err = pch_gbe_up(adapter);
2062 if (err) 2100 if (err)
2063 goto err_up; 2101 goto err_up;
2064 pr_debug("Success End\n"); 2102 netdev_dbg(netdev, "Success End\n");
2065 return 0; 2103 return 0;
2066 2104
2067err_up: 2105err_up:
@@ -2072,7 +2110,7 @@ err_setup_rx:
2072 pch_gbe_free_tx_resources(adapter, adapter->tx_ring); 2110 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2073err_setup_tx: 2111err_setup_tx:
2074 pch_gbe_reset(adapter); 2112 pch_gbe_reset(adapter);
2075 pr_err("Error End\n"); 2113 netdev_err(netdev, "Error End\n");
2076 return err; 2114 return err;
2077} 2115}
2078 2116
@@ -2116,8 +2154,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2116 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { 2154 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2117 netif_stop_queue(netdev); 2155 netif_stop_queue(netdev);
2118 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2156 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2119 pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n", 2157 netdev_dbg(netdev,
2120 tx_ring->next_to_use, tx_ring->next_to_clean); 2158 "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2159 tx_ring->next_to_use, tx_ring->next_to_clean);
2121 return NETDEV_TX_BUSY; 2160 return NETDEV_TX_BUSY;
2122 } 2161 }
2123 2162
@@ -2152,7 +2191,7 @@ static void pch_gbe_set_multi(struct net_device *netdev)
2152 int i; 2191 int i;
2153 int mc_count; 2192 int mc_count;
2154 2193
2155 pr_debug("netdev->flags : 0x%08x\n", netdev->flags); 2194 netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
2156 2195
2157 /* Check for Promiscuous and All Multicast modes */ 2196 /* Check for Promiscuous and All Multicast modes */
2158 rctl = ioread32(&hw->reg->RX_MODE); 2197 rctl = ioread32(&hw->reg->RX_MODE);
@@ -2192,7 +2231,8 @@ static void pch_gbe_set_multi(struct net_device *netdev)
2192 PCH_GBE_MAR_ENTRIES); 2231 PCH_GBE_MAR_ENTRIES);
2193 kfree(mta_list); 2232 kfree(mta_list);
2194 2233
2195 pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", 2234 netdev_dbg(netdev,
2235 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2196 ioread32(&hw->reg->RX_MODE), mc_count); 2236 ioread32(&hw->reg->RX_MODE), mc_count);
2197} 2237}
2198 2238
@@ -2218,12 +2258,12 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2218 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0); 2258 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2219 ret_val = 0; 2259 ret_val = 0;
2220 } 2260 }
2221 pr_debug("ret_val : 0x%08x\n", ret_val); 2261 netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
2222 pr_debug("dev_addr : %pM\n", netdev->dev_addr); 2262 netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
2223 pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr); 2263 netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
2224 pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n", 2264 netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2225 ioread32(&adapter->hw.reg->mac_adr[0].high), 2265 ioread32(&adapter->hw.reg->mac_adr[0].high),
2226 ioread32(&adapter->hw.reg->mac_adr[0].low)); 2266 ioread32(&adapter->hw.reg->mac_adr[0].low));
2227 return ret_val; 2267 return ret_val;
2228} 2268}
2229 2269
@@ -2245,7 +2285,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2245 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2285 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2246 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 2286 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2247 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) { 2287 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2248 pr_err("Invalid MTU setting\n"); 2288 netdev_err(netdev, "Invalid MTU setting\n");
2249 return -EINVAL; 2289 return -EINVAL;
2250 } 2290 }
2251 if (max_frame <= PCH_GBE_FRAME_SIZE_2048) 2291 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
@@ -2274,9 +2314,10 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2274 adapter->hw.mac.max_frame_size = max_frame; 2314 adapter->hw.mac.max_frame_size = max_frame;
2275 } 2315 }
2276 2316
2277 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2317 netdev_dbg(netdev,
2278 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, 2318 "max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2279 adapter->hw.mac.max_frame_size); 2319 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2320 adapter->hw.mac.max_frame_size);
2280 return 0; 2321 return 0;
2281} 2322}
2282 2323
@@ -2317,7 +2358,7 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2317{ 2358{
2318 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2359 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2319 2360
2320 pr_debug("cmd : 0x%04x\n", cmd); 2361 netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
2321 2362
2322 if (cmd == SIOCSHWTSTAMP) 2363 if (cmd == SIOCSHWTSTAMP)
2323 return hwtstamp_ioctl(netdev, ifr, cmd); 2364 return hwtstamp_ioctl(netdev, ifr, cmd);
@@ -2354,7 +2395,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2354 bool poll_end_flag = false; 2395 bool poll_end_flag = false;
2355 bool cleaned = false; 2396 bool cleaned = false;
2356 2397
2357 pr_debug("budget : %d\n", budget); 2398 netdev_dbg(adapter->netdev, "budget : %d\n", budget);
2358 2399
2359 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2400 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2360 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2401 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
@@ -2377,8 +2418,9 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2377 pch_gbe_enable_dma_rx(&adapter->hw); 2418 pch_gbe_enable_dma_rx(&adapter->hw);
2378 } 2419 }
2379 2420
2380 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2421 netdev_dbg(adapter->netdev,
2381 poll_end_flag, work_done, budget); 2422 "poll_end_flag : %d work_done : %d budget : %d\n",
2423 poll_end_flag, work_done, budget);
2382 2424
2383 return work_done; 2425 return work_done;
2384} 2426}
@@ -2435,7 +2477,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2435 struct pch_gbe_hw *hw = &adapter->hw; 2477 struct pch_gbe_hw *hw = &adapter->hw;
2436 2478
2437 if (pci_enable_device(pdev)) { 2479 if (pci_enable_device(pdev)) {
2438 pr_err("Cannot re-enable PCI device after reset\n"); 2480 netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
2439 return PCI_ERS_RESULT_DISCONNECT; 2481 return PCI_ERS_RESULT_DISCONNECT;
2440 } 2482 }
2441 pci_set_master(pdev); 2483 pci_set_master(pdev);
@@ -2455,7 +2497,8 @@ static void pch_gbe_io_resume(struct pci_dev *pdev)
2455 2497
2456 if (netif_running(netdev)) { 2498 if (netif_running(netdev)) {
2457 if (pch_gbe_up(adapter)) { 2499 if (pch_gbe_up(adapter)) {
2458 pr_debug("can't bring device back up after reset\n"); 2500 netdev_dbg(netdev,
2501 "can't bring device back up after reset\n");
2459 return; 2502 return;
2460 } 2503 }
2461 } 2504 }
@@ -2509,7 +2552,7 @@ static int pch_gbe_resume(struct device *device)
2509 2552
2510 err = pci_enable_device(pdev); 2553 err = pci_enable_device(pdev);
2511 if (err) { 2554 if (err) {
2512 pr_err("Cannot enable PCI device from suspend\n"); 2555 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
2513 return err; 2556 return err;
2514 } 2557 }
2515 pci_set_master(pdev); 2558 pci_set_master(pdev);
@@ -2545,13 +2588,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
2545 2588
2546 pch_gbe_hal_phy_hw_reset(&adapter->hw); 2589 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2547 2590
2548 kfree(adapter->tx_ring);
2549 kfree(adapter->rx_ring);
2550
2551 iounmap(adapter->hw.reg);
2552 pci_release_regions(pdev);
2553 free_netdev(netdev); 2591 free_netdev(netdev);
2554 pci_disable_device(pdev);
2555} 2592}
2556 2593
2557static int pch_gbe_probe(struct pci_dev *pdev, 2594static int pch_gbe_probe(struct pci_dev *pdev,
@@ -2561,7 +2598,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2561 struct pch_gbe_adapter *adapter; 2598 struct pch_gbe_adapter *adapter;
2562 int ret; 2599 int ret;
2563 2600
2564 ret = pci_enable_device(pdev); 2601 ret = pcim_enable_device(pdev);
2565 if (ret) 2602 if (ret)
2566 return ret; 2603 return ret;
2567 2604
@@ -2574,24 +2611,22 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2574 if (ret) { 2611 if (ret) {
2575 dev_err(&pdev->dev, "ERR: No usable DMA " 2612 dev_err(&pdev->dev, "ERR: No usable DMA "
2576 "configuration, aborting\n"); 2613 "configuration, aborting\n");
2577 goto err_disable_device; 2614 return ret;
2578 } 2615 }
2579 } 2616 }
2580 } 2617 }
2581 2618
2582 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2619 ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
2583 if (ret) { 2620 if (ret) {
2584 dev_err(&pdev->dev, 2621 dev_err(&pdev->dev,
2585 "ERR: Can't reserve PCI I/O and memory resources\n"); 2622 "ERR: Can't reserve PCI I/O and memory resources\n");
2586 goto err_disable_device; 2623 return ret;
2587 } 2624 }
2588 pci_set_master(pdev); 2625 pci_set_master(pdev);
2589 2626
2590 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter)); 2627 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2591 if (!netdev) { 2628 if (!netdev)
2592 ret = -ENOMEM; 2629 return -ENOMEM;
2593 goto err_release_pci;
2594 }
2595 SET_NETDEV_DEV(netdev, &pdev->dev); 2630 SET_NETDEV_DEV(netdev, &pdev->dev);
2596 2631
2597 pci_set_drvdata(pdev, netdev); 2632 pci_set_drvdata(pdev, netdev);
@@ -2599,18 +2634,14 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2599 adapter->netdev = netdev; 2634 adapter->netdev = netdev;
2600 adapter->pdev = pdev; 2635 adapter->pdev = pdev;
2601 adapter->hw.back = adapter; 2636 adapter->hw.back = adapter;
2602 adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0); 2637 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2603 if (!adapter->hw.reg) {
2604 ret = -EIO;
2605 dev_err(&pdev->dev, "Can't ioremap\n");
2606 goto err_free_netdev;
2607 }
2608 2638
2609 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, 2639 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2610 PCI_DEVFN(12, 4)); 2640 PCI_DEVFN(12, 4));
2611 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { 2641 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2612 pr_err("Bad ptp filter\n"); 2642 dev_err(&pdev->dev, "Bad ptp filter\n");
2613 return -EINVAL; 2643 ret = -EINVAL;
2644 goto err_free_netdev;
2614 } 2645 }
2615 2646
2616 netdev->netdev_ops = &pch_gbe_netdev_ops; 2647 netdev->netdev_ops = &pch_gbe_netdev_ops;
@@ -2628,7 +2659,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2628 /* setup the private structure */ 2659 /* setup the private structure */
2629 ret = pch_gbe_sw_init(adapter); 2660 ret = pch_gbe_sw_init(adapter);
2630 if (ret) 2661 if (ret)
2631 goto err_iounmap; 2662 goto err_free_netdev;
2632 2663
2633 /* Initialize PHY */ 2664 /* Initialize PHY */
2634 ret = pch_gbe_init_phy(adapter); 2665 ret = pch_gbe_init_phy(adapter);
@@ -2684,16 +2715,8 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2684 2715
2685err_free_adapter: 2716err_free_adapter:
2686 pch_gbe_hal_phy_hw_reset(&adapter->hw); 2717 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2687 kfree(adapter->tx_ring);
2688 kfree(adapter->rx_ring);
2689err_iounmap:
2690 iounmap(adapter->hw.reg);
2691err_free_netdev: 2718err_free_netdev:
2692 free_netdev(netdev); 2719 free_netdev(netdev);
2693err_release_pci:
2694 pci_release_regions(pdev);
2695err_disable_device:
2696 pci_disable_device(pdev);
2697 return ret; 2720 return ret;
2698} 2721}
2699 2722
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 8653c3b81f84..cf7c9b3a255b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -237,16 +237,17 @@ static int pch_gbe_validate_option(int *value,
237 case enable_option: 237 case enable_option:
238 switch (*value) { 238 switch (*value) {
239 case OPTION_ENABLED: 239 case OPTION_ENABLED:
240 pr_debug("%s Enabled\n", opt->name); 240 netdev_dbg(adapter->netdev, "%s Enabled\n", opt->name);
241 return 0; 241 return 0;
242 case OPTION_DISABLED: 242 case OPTION_DISABLED:
243 pr_debug("%s Disabled\n", opt->name); 243 netdev_dbg(adapter->netdev, "%s Disabled\n", opt->name);
244 return 0; 244 return 0;
245 } 245 }
246 break; 246 break;
247 case range_option: 247 case range_option:
248 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 248 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
249 pr_debug("%s set to %i\n", opt->name, *value); 249 netdev_dbg(adapter->netdev, "%s set to %i\n",
250 opt->name, *value);
250 return 0; 251 return 0;
251 } 252 }
252 break; 253 break;
@@ -258,7 +259,8 @@ static int pch_gbe_validate_option(int *value,
258 ent = &opt->arg.l.p[i]; 259 ent = &opt->arg.l.p[i];
259 if (*value == ent->i) { 260 if (*value == ent->i) {
260 if (ent->str[0] != '\0') 261 if (ent->str[0] != '\0')
261 pr_debug("%s\n", ent->str); 262 netdev_dbg(adapter->netdev, "%s\n",
263 ent->str);
262 return 0; 264 return 0;
263 } 265 }
264 } 266 }
@@ -268,8 +270,8 @@ static int pch_gbe_validate_option(int *value,
268 BUG(); 270 BUG();
269 } 271 }
270 272
271 pr_debug("Invalid %s value specified (%i) %s\n", 273 netdev_dbg(adapter->netdev, "Invalid %s value specified (%i) %s\n",
272 opt->name, *value, opt->err); 274 opt->name, *value, opt->err);
273 *value = opt->def; 275 *value = opt->def;
274 return -1; 276 return -1;
275} 277}
@@ -318,7 +320,8 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
318 .p = an_list} } 320 .p = an_list} }
319 }; 321 };
320 if (speed || dplx) { 322 if (speed || dplx) {
321 pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n"); 323 netdev_dbg(adapter->netdev,
324 "AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
322 hw->phy.autoneg_advertised = opt.def; 325 hw->phy.autoneg_advertised = opt.def;
323 } else { 326 } else {
324 int tmp = AutoNeg; 327 int tmp = AutoNeg;
@@ -332,13 +335,16 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
332 case 0: 335 case 0:
333 hw->mac.autoneg = hw->mac.fc_autoneg = 1; 336 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
334 if ((speed || dplx)) 337 if ((speed || dplx))
335 pr_debug("Speed and duplex autonegotiation enabled\n"); 338 netdev_dbg(adapter->netdev,
339 "Speed and duplex autonegotiation enabled\n");
336 hw->mac.link_speed = SPEED_10; 340 hw->mac.link_speed = SPEED_10;
337 hw->mac.link_duplex = DUPLEX_HALF; 341 hw->mac.link_duplex = DUPLEX_HALF;
338 break; 342 break;
339 case HALF_DUPLEX: 343 case HALF_DUPLEX:
340 pr_debug("Half Duplex specified without Speed\n"); 344 netdev_dbg(adapter->netdev,
341 pr_debug("Using Autonegotiation at Half Duplex only\n"); 345 "Half Duplex specified without Speed\n");
346 netdev_dbg(adapter->netdev,
347 "Using Autonegotiation at Half Duplex only\n");
342 hw->mac.autoneg = hw->mac.fc_autoneg = 1; 348 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
343 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | 349 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
344 PHY_ADVERTISE_100_HALF; 350 PHY_ADVERTISE_100_HALF;
@@ -346,8 +352,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
346 hw->mac.link_duplex = DUPLEX_HALF; 352 hw->mac.link_duplex = DUPLEX_HALF;
347 break; 353 break;
348 case FULL_DUPLEX: 354 case FULL_DUPLEX:
349 pr_debug("Full Duplex specified without Speed\n"); 355 netdev_dbg(adapter->netdev,
350 pr_debug("Using Autonegotiation at Full Duplex only\n"); 356 "Full Duplex specified without Speed\n");
357 netdev_dbg(adapter->netdev,
358 "Using Autonegotiation at Full Duplex only\n");
351 hw->mac.autoneg = hw->mac.fc_autoneg = 1; 359 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
352 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL | 360 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL |
353 PHY_ADVERTISE_100_FULL | 361 PHY_ADVERTISE_100_FULL |
@@ -356,8 +364,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
356 hw->mac.link_duplex = DUPLEX_FULL; 364 hw->mac.link_duplex = DUPLEX_FULL;
357 break; 365 break;
358 case SPEED_10: 366 case SPEED_10:
359 pr_debug("10 Mbps Speed specified without Duplex\n"); 367 netdev_dbg(adapter->netdev,
360 pr_debug("Using Autonegotiation at 10 Mbps only\n"); 368 "10 Mbps Speed specified without Duplex\n");
369 netdev_dbg(adapter->netdev,
370 "Using Autonegotiation at 10 Mbps only\n");
361 hw->mac.autoneg = hw->mac.fc_autoneg = 1; 371 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
362 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | 372 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
363 PHY_ADVERTISE_10_FULL; 373 PHY_ADVERTISE_10_FULL;
@@ -365,22 +375,24 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
365 hw->mac.link_duplex = DUPLEX_HALF; 375 hw->mac.link_duplex = DUPLEX_HALF;
366 break; 376 break;
367 case SPEED_10 + HALF_DUPLEX: 377 case SPEED_10 + HALF_DUPLEX:
368 pr_debug("Forcing to 10 Mbps Half Duplex\n"); 378 netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Half Duplex\n");
369 hw->mac.autoneg = hw->mac.fc_autoneg = 0; 379 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
370 hw->phy.autoneg_advertised = 0; 380 hw->phy.autoneg_advertised = 0;
371 hw->mac.link_speed = SPEED_10; 381 hw->mac.link_speed = SPEED_10;
372 hw->mac.link_duplex = DUPLEX_HALF; 382 hw->mac.link_duplex = DUPLEX_HALF;
373 break; 383 break;
374 case SPEED_10 + FULL_DUPLEX: 384 case SPEED_10 + FULL_DUPLEX:
375 pr_debug("Forcing to 10 Mbps Full Duplex\n"); 385 netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Full Duplex\n");
376 hw->mac.autoneg = hw->mac.fc_autoneg = 0; 386 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
377 hw->phy.autoneg_advertised = 0; 387 hw->phy.autoneg_advertised = 0;
378 hw->mac.link_speed = SPEED_10; 388 hw->mac.link_speed = SPEED_10;
379 hw->mac.link_duplex = DUPLEX_FULL; 389 hw->mac.link_duplex = DUPLEX_FULL;
380 break; 390 break;
381 case SPEED_100: 391 case SPEED_100:
382 pr_debug("100 Mbps Speed specified without Duplex\n"); 392 netdev_dbg(adapter->netdev,
383 pr_debug("Using Autonegotiation at 100 Mbps only\n"); 393 "100 Mbps Speed specified without Duplex\n");
394 netdev_dbg(adapter->netdev,
395 "Using Autonegotiation at 100 Mbps only\n");
384 hw->mac.autoneg = hw->mac.fc_autoneg = 1; 396 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
385 hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF | 397 hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF |
386 PHY_ADVERTISE_100_FULL; 398 PHY_ADVERTISE_100_FULL;
@@ -388,28 +400,33 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
388 hw->mac.link_duplex = DUPLEX_HALF; 400 hw->mac.link_duplex = DUPLEX_HALF;
389 break; 401 break;
390 case SPEED_100 + HALF_DUPLEX: 402 case SPEED_100 + HALF_DUPLEX:
391 pr_debug("Forcing to 100 Mbps Half Duplex\n"); 403 netdev_dbg(adapter->netdev,
404 "Forcing to 100 Mbps Half Duplex\n");
392 hw->mac.autoneg = hw->mac.fc_autoneg = 0; 405 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
393 hw->phy.autoneg_advertised = 0; 406 hw->phy.autoneg_advertised = 0;
394 hw->mac.link_speed = SPEED_100; 407 hw->mac.link_speed = SPEED_100;
395 hw->mac.link_duplex = DUPLEX_HALF; 408 hw->mac.link_duplex = DUPLEX_HALF;
396 break; 409 break;
397 case SPEED_100 + FULL_DUPLEX: 410 case SPEED_100 + FULL_DUPLEX:
398 pr_debug("Forcing to 100 Mbps Full Duplex\n"); 411 netdev_dbg(adapter->netdev,
412 "Forcing to 100 Mbps Full Duplex\n");
399 hw->mac.autoneg = hw->mac.fc_autoneg = 0; 413 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
400 hw->phy.autoneg_advertised = 0; 414 hw->phy.autoneg_advertised = 0;
401 hw->mac.link_speed = SPEED_100; 415 hw->mac.link_speed = SPEED_100;
402 hw->mac.link_duplex = DUPLEX_FULL; 416 hw->mac.link_duplex = DUPLEX_FULL;
403 break; 417 break;
404 case SPEED_1000: 418 case SPEED_1000:
405 pr_debug("1000 Mbps Speed specified without Duplex\n"); 419 netdev_dbg(adapter->netdev,
420 "1000 Mbps Speed specified without Duplex\n");
406 goto full_duplex_only; 421 goto full_duplex_only;
407 case SPEED_1000 + HALF_DUPLEX: 422 case SPEED_1000 + HALF_DUPLEX:
408 pr_debug("Half Duplex is not supported at 1000 Mbps\n"); 423 netdev_dbg(adapter->netdev,
424 "Half Duplex is not supported at 1000 Mbps\n");
409 /* fall through */ 425 /* fall through */
410 case SPEED_1000 + FULL_DUPLEX: 426 case SPEED_1000 + FULL_DUPLEX:
411full_duplex_only: 427full_duplex_only:
412 pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n"); 428 netdev_dbg(adapter->netdev,
429 "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
413 hw->mac.autoneg = hw->mac.fc_autoneg = 1; 430 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
414 hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL; 431 hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL;
415 hw->mac.link_speed = SPEED_1000; 432 hw->mac.link_speed = SPEED_1000;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index 28bb9603d736..da079073a6c6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -97,6 +97,7 @@
97 */ 97 */
98s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw) 98s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw)
99{ 99{
100 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
100 struct pch_gbe_phy_info *phy = &hw->phy; 101 struct pch_gbe_phy_info *phy = &hw->phy;
101 s32 ret; 102 s32 ret;
102 u16 phy_id1; 103 u16 phy_id1;
@@ -115,8 +116,9 @@ s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw)
115 phy->id = (u32)phy_id1; 116 phy->id = (u32)phy_id1;
116 phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10)); 117 phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10));
117 phy->revision = (u32) (phy_id2 & 0x000F); 118 phy->revision = (u32) (phy_id2 & 0x000F);
118 pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n", 119 netdev_dbg(adapter->netdev,
119 phy->id, phy->revision); 120 "phy->id : 0x%08x phy->revision : 0x%08x\n",
121 phy->id, phy->revision);
120 return 0; 122 return 0;
121} 123}
122 124
@@ -134,7 +136,10 @@ s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data)
134 struct pch_gbe_phy_info *phy = &hw->phy; 136 struct pch_gbe_phy_info *phy = &hw->phy;
135 137
136 if (offset > PHY_MAX_REG_ADDRESS) { 138 if (offset > PHY_MAX_REG_ADDRESS) {
137 pr_err("PHY Address %d is out of range\n", offset); 139 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
140
141 netdev_err(adapter->netdev, "PHY Address %d is out of range\n",
142 offset);
138 return -EINVAL; 143 return -EINVAL;
139 } 144 }
140 *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ, 145 *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ,
@@ -156,7 +161,10 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
156 struct pch_gbe_phy_info *phy = &hw->phy; 161 struct pch_gbe_phy_info *phy = &hw->phy;
157 162
158 if (offset > PHY_MAX_REG_ADDRESS) { 163 if (offset > PHY_MAX_REG_ADDRESS) {
159 pr_err("PHY Address %d is out of range\n", offset); 164 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
165
166 netdev_err(adapter->netdev, "PHY Address %d is out of range\n",
167 offset);
160 return -EINVAL; 168 return -EINVAL;
161 } 169 }
162 pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE, 170 pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE,
@@ -235,7 +243,7 @@ void pch_gbe_phy_power_down(struct pch_gbe_hw *hw)
235 * pch_gbe_phy_set_rgmii - RGMII interface setting 243 * pch_gbe_phy_set_rgmii - RGMII interface setting
236 * @hw: Pointer to the HW structure 244 * @hw: Pointer to the HW structure
237 */ 245 */
238inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) 246void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
239{ 247{
240 pch_gbe_phy_sw_reset(hw); 248 pch_gbe_phy_sw_reset(hw);
241} 249}
@@ -246,15 +254,14 @@ inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
246 */ 254 */
247void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) 255void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
248{ 256{
249 struct pch_gbe_adapter *adapter; 257 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
250 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; 258 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
251 int ret; 259 int ret;
252 u16 mii_reg; 260 u16 mii_reg;
253 261
254 adapter = container_of(hw, struct pch_gbe_adapter, hw);
255 ret = mii_ethtool_gset(&adapter->mii, &cmd); 262 ret = mii_ethtool_gset(&adapter->mii, &cmd);
256 if (ret) 263 if (ret)
257 pr_err("Error: mii_ethtool_gset\n"); 264 netdev_err(adapter->netdev, "Error: mii_ethtool_gset\n");
258 265
259 ethtool_cmd_speed_set(&cmd, hw->mac.link_speed); 266 ethtool_cmd_speed_set(&cmd, hw->mac.link_speed);
260 cmd.duplex = hw->mac.link_duplex; 267 cmd.duplex = hw->mac.link_duplex;
@@ -263,12 +270,11 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
263 pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); 270 pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
264 ret = mii_ethtool_sset(&adapter->mii, &cmd); 271 ret = mii_ethtool_sset(&adapter->mii, &cmd);
265 if (ret) 272 if (ret)
266 pr_err("Error: mii_ethtool_sset\n"); 273 netdev_err(adapter->netdev, "Error: mii_ethtool_sset\n");
267 274
268 pch_gbe_phy_sw_reset(hw); 275 pch_gbe_phy_sw_reset(hw);
269 276
270 pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); 277 pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
271 mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; 278 mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
272 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); 279 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
273
274} 280}
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index cbbeca3f8c5c..8d5180043c70 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -21,7 +21,6 @@ if NET_PACKET_ENGINE
21config HAMACHI 21config HAMACHI
22 tristate "Packet Engines Hamachi GNIC-II support" 22 tristate "Packet Engines Hamachi GNIC-II support"
23 depends on PCI 23 depends on PCI
24 select NET_CORE
25 select MII 24 select MII
26 ---help--- 25 ---help---
27 If you have a Gigabit Ethernet card of this type, say Y and read 26 If you have a Gigabit Ethernet card of this type, say Y and read
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 322a36b76727..3fe09ab2d7c9 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 80 56#define _NETXEN_NIC_LINUX_SUBVERSION 81
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.80" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.81"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
@@ -1855,7 +1855,7 @@ static const struct netxen_brdinfo netxen_boards[] = {
1855 1855
1856#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) 1856#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
1857 1857
1858static inline void get_brd_name_by_type(u32 type, char *name) 1858static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name)
1859{ 1859{
1860 int i, found = 0; 1860 int i, found = 0;
1861 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { 1861 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
@@ -1864,10 +1864,14 @@ static inline void get_brd_name_by_type(u32 type, char *name)
1864 found = 1; 1864 found = 1;
1865 break; 1865 break;
1866 } 1866 }
1867 }
1867 1868
1869 if (!found) {
1870 strcpy(name, "Unknown");
1871 return -EINVAL;
1868 } 1872 }
1869 if (!found) 1873
1870 name = "Unknown"; 1874 return 0;
1871} 1875}
1872 1876
1873static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) 1877static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 28e076960bcb..32c790659f9c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -734,6 +734,9 @@ enum {
734#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700)) 734#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700))
735#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) 735#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X))
736#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X)) 736#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X))
737#define NETXEN_INTR_MODE_REG NETXEN_NIC_REG(0x44)
738#define NETXEN_MSI_MODE 0x1
739#define NETXEN_INTX_MODE 0x2
737 740
738#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18)) 741#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18))
739#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c)) 742#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index af951f343ff6..c401b0b4353d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -592,48 +592,60 @@ static const struct net_device_ops netxen_netdev_ops = {
592#endif 592#endif
593}; 593};
594 594
595static void 595static inline bool netxen_function_zero(struct pci_dev *pdev)
596netxen_setup_intr(struct netxen_adapter *adapter)
597{ 596{
598 struct netxen_legacy_intr_set *legacy_intrp; 597 return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
599 struct pci_dev *pdev = adapter->pdev; 598}
600 int err, num_msix;
601 599
602 if (adapter->rss_supported) { 600static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
603 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? 601 u32 mode)
604 MSIX_ENTRIES_PER_ADAPTER : 2; 602{
605 } else 603 NXWR32(adapter, NETXEN_INTR_MODE_REG, mode);
606 num_msix = 1; 604}
607 605
608 adapter->max_sds_rings = 1; 606static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter)
607{
608 return NXRD32(adapter, NETXEN_INTR_MODE_REG);
609}
609 610
610 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); 611static void
612netxen_initialize_interrupt_registers(struct netxen_adapter *adapter)
613{
614 struct netxen_legacy_intr_set *legacy_intrp;
615 u32 tgt_status_reg, int_state_reg;
611 616
612 if (adapter->ahw.revision_id >= NX_P3_B0) 617 if (adapter->ahw.revision_id >= NX_P3_B0)
613 legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; 618 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
614 else 619 else
615 legacy_intrp = &legacy_intr[0]; 620 legacy_intrp = &legacy_intr[0];
616 621
622 tgt_status_reg = legacy_intrp->tgt_status_reg;
623 int_state_reg = ISR_INT_STATE_REG;
624
617 adapter->int_vec_bit = legacy_intrp->int_vec_bit; 625 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
618 adapter->tgt_status_reg = netxen_get_ioaddr(adapter, 626 adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg);
619 legacy_intrp->tgt_status_reg);
620 adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, 627 adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
621 legacy_intrp->tgt_mask_reg); 628 legacy_intrp->tgt_mask_reg);
622 adapter->pci_int_reg = netxen_get_ioaddr(adapter, 629 adapter->pci_int_reg = netxen_get_ioaddr(adapter,
623 legacy_intrp->pci_int_reg); 630 legacy_intrp->pci_int_reg);
624 adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); 631 adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
625 632
626 if (adapter->ahw.revision_id >= NX_P3_B1) 633 if (adapter->ahw.revision_id >= NX_P3_B1)
627 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, 634 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
628 ISR_INT_STATE_REG); 635 int_state_reg);
629 else 636 else
630 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, 637 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
631 CRB_INT_VECTOR); 638 CRB_INT_VECTOR);
639}
632 640
633 netxen_set_msix_bit(pdev, 0); 641static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
642 int num_msix)
643{
644 struct pci_dev *pdev = adapter->pdev;
645 u32 value;
646 int err;
634 647
635 if (adapter->msix_supported) { 648 if (adapter->msix_supported) {
636
637 netxen_init_msix_entries(adapter, num_msix); 649 netxen_init_msix_entries(adapter, num_msix);
638 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 650 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
639 if (err == 0) { 651 if (err == 0) {
@@ -644,26 +656,59 @@ netxen_setup_intr(struct netxen_adapter *adapter)
644 adapter->max_sds_rings = num_msix; 656 adapter->max_sds_rings = num_msix;
645 657
646 dev_info(&pdev->dev, "using msi-x interrupts\n"); 658 dev_info(&pdev->dev, "using msi-x interrupts\n");
647 return; 659 return 0;
648 } 660 }
649
650 if (err > 0)
651 pci_disable_msix(pdev);
652
653 /* fall through for msi */ 661 /* fall through for msi */
654 } 662 }
655 663
656 if (use_msi && !pci_enable_msi(pdev)) { 664 if (use_msi && !pci_enable_msi(pdev)) {
665 value = msi_tgt_status[adapter->ahw.pci_func];
657 adapter->flags |= NETXEN_NIC_MSI_ENABLED; 666 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
658 adapter->tgt_status_reg = netxen_get_ioaddr(adapter, 667 adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value);
659 msi_tgt_status[adapter->ahw.pci_func]);
660 dev_info(&pdev->dev, "using msi interrupts\n");
661 adapter->msix_entries[0].vector = pdev->irq; 668 adapter->msix_entries[0].vector = pdev->irq;
662 return; 669 dev_info(&pdev->dev, "using msi interrupts\n");
670 return 0;
663 } 671 }
664 672
665 dev_info(&pdev->dev, "using legacy interrupts\n"); 673 dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n");
666 adapter->msix_entries[0].vector = pdev->irq; 674 return -EIO;
675}
676
677static int netxen_setup_intr(struct netxen_adapter *adapter)
678{
679 struct pci_dev *pdev = adapter->pdev;
680 int num_msix;
681
682 if (adapter->rss_supported)
683 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
684 MSIX_ENTRIES_PER_ADAPTER : 2;
685 else
686 num_msix = 1;
687
688 adapter->max_sds_rings = 1;
689 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
690
691 netxen_initialize_interrupt_registers(adapter);
692 netxen_set_msix_bit(pdev, 0);
693
694 if (netxen_function_zero(pdev)) {
695 if (!netxen_setup_msi_interrupts(adapter, num_msix))
696 netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
697 else
698 netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE);
699 } else {
700 if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE &&
701 netxen_setup_msi_interrupts(adapter, num_msix)) {
702 dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n");
703 return -EIO;
704 }
705 }
706
707 if (!NETXEN_IS_MSI_FAMILY(adapter)) {
708 adapter->msix_entries[0].vector = pdev->irq;
709 dev_info(&pdev->dev, "using legacy interrupts\n");
710 }
711 return 0;
667} 712}
668 713
669static void 714static void
@@ -841,7 +886,9 @@ netxen_check_options(struct netxen_adapter *adapter)
841 } 886 }
842 887
843 if (adapter->portnum == 0) { 888 if (adapter->portnum == 0) {
844 get_brd_name_by_type(adapter->ahw.board_type, brd_name); 889 if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type,
890 brd_name))
891 strcpy(serial_num, "Unknown");
845 892
846 pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", 893 pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
847 module_name(THIS_MODULE), 894 module_name(THIS_MODULE),
@@ -860,9 +907,9 @@ netxen_check_options(struct netxen_adapter *adapter)
860 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; 907 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
861 } 908 }
862 909
863 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n", 910 dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n",
864 fw_major, fw_minor, fw_build, 911 NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build,
865 adapter->ahw.cut_through ? "cut-through" : "legacy"); 912 adapter->ahw.cut_through ? "cut-through" : "legacy");
866 913
867 if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) 914 if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
868 adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); 915 adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
@@ -1508,7 +1555,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1508 1555
1509 netxen_nic_clear_stats(adapter); 1556 netxen_nic_clear_stats(adapter);
1510 1557
1511 netxen_setup_intr(adapter); 1558 err = netxen_setup_intr(adapter);
1559
1560 if (err) {
1561 dev_err(&adapter->pdev->dev,
1562 "Failed to setup interrupts, error = %d\n", err);
1563 goto err_out_disable_msi;
1564 }
1512 1565
1513 err = netxen_setup_netdev(adapter, netdev); 1566 err = netxen_setup_netdev(adapter, netdev);
1514 if (err) 1567 if (err)
@@ -1596,7 +1649,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
1596 clear_bit(__NX_RESETTING, &adapter->state); 1649 clear_bit(__NX_RESETTING, &adapter->state);
1597 1650
1598 netxen_teardown_intr(adapter); 1651 netxen_teardown_intr(adapter);
1599 1652 netxen_set_interrupt_mode(adapter, 0);
1600 netxen_remove_diag_entries(adapter); 1653 netxen_remove_diag_entries(adapter);
1601 1654
1602 netxen_cleanup_pci_map(adapter); 1655 netxen_cleanup_pci_map(adapter);
@@ -2721,7 +2774,7 @@ netxen_store_bridged_mode(struct device *dev,
2721 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 2774 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
2722 goto err_out; 2775 goto err_out;
2723 2776
2724 if (strict_strtoul(buf, 2, &new)) 2777 if (kstrtoul(buf, 2, &new))
2725 goto err_out; 2778 goto err_out;
2726 2779
2727 if (!netxen_config_bridged_mode(adapter, !!new)) 2780 if (!netxen_config_bridged_mode(adapter, !!new))
@@ -2760,7 +2813,7 @@ netxen_store_diag_mode(struct device *dev,
2760 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2813 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2761 unsigned long new; 2814 unsigned long new;
2762 2815
2763 if (strict_strtoul(buf, 2, &new)) 2816 if (kstrtoul(buf, 2, &new))
2764 return -EINVAL; 2817 return -EINVAL;
2765 2818
2766 if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) 2819 if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
@@ -3311,7 +3364,7 @@ static int netxen_netdev_event(struct notifier_block *this,
3311 unsigned long event, void *ptr) 3364 unsigned long event, void *ptr)
3312{ 3365{
3313 struct netxen_adapter *adapter; 3366 struct netxen_adapter *adapter;
3314 struct net_device *dev = (struct net_device *)ptr; 3367 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3315 struct net_device *orig_dev = dev; 3368 struct net_device *orig_dev = dev;
3316 struct net_device *slave; 3369 struct net_device *slave;
3317 3370
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index c1b693cb3df3..b00cf5665eab 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
38 38
39#define _QLCNIC_LINUX_MAJOR 5 39#define _QLCNIC_LINUX_MAJOR 5
40#define _QLCNIC_LINUX_MINOR 2 40#define _QLCNIC_LINUX_MINOR 2
41#define _QLCNIC_LINUX_SUBVERSION 42 41#define _QLCNIC_LINUX_SUBVERSION 44
42#define QLCNIC_LINUX_VERSIONID "5.2.42" 42#define QLCNIC_LINUX_VERSIONID "5.2.44"
43#define QLCNIC_DRV_IDC_VER 0x01 43#define QLCNIC_DRV_IDC_VER 0x01
44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -303,7 +303,6 @@ extern int qlcnic_use_msi;
303extern int qlcnic_use_msi_x; 303extern int qlcnic_use_msi_x;
304extern int qlcnic_auto_fw_reset; 304extern int qlcnic_auto_fw_reset;
305extern int qlcnic_load_fw_file; 305extern int qlcnic_load_fw_file;
306extern int qlcnic_config_npars;
307 306
308/* Number of status descriptors to handle per interrupt */ 307/* Number of status descriptors to handle per interrupt */
309#define MAX_STATUS_HANDLE (64) 308#define MAX_STATUS_HANDLE (64)
@@ -394,6 +393,9 @@ struct qlcnic_fw_dump {
394 u32 size; /* total size of the dump */ 393 u32 size; /* total size of the dump */
395 void *data; /* dump data area */ 394 void *data; /* dump data area */
396 struct qlcnic_dump_template_hdr *tmpl_hdr; 395 struct qlcnic_dump_template_hdr *tmpl_hdr;
396 dma_addr_t phys_addr;
397 void *dma_buffer;
398 bool use_pex_dma;
397}; 399};
398 400
399/* 401/*
@@ -427,6 +429,7 @@ struct qlcnic_hardware_context {
427 u8 nic_mode; 429 u8 nic_mode;
428 char diag_cnt; 430 char diag_cnt;
429 431
432 u16 max_uc_count;
430 u16 port_type; 433 u16 port_type;
431 u16 board_type; 434 u16 board_type;
432 u16 supported_type; 435 u16 supported_type;
@@ -443,9 +446,10 @@ struct qlcnic_hardware_context {
443 u16 max_mtu; 446 u16 max_mtu;
444 u32 msg_enable; 447 u32 msg_enable;
445 u16 act_pci_func; 448 u16 act_pci_func;
449 u16 max_pci_func;
446 450
447 u32 capabilities; 451 u32 capabilities;
448 u32 capabilities2; 452 u32 extra_capability[3];
449 u32 temp; 453 u32 temp;
450 u32 int_vec_bit; 454 u32 int_vec_bit;
451 u32 fw_hal_version; 455 u32 fw_hal_version;
@@ -815,7 +819,8 @@ struct qlcnic_mac_list_s {
815 819
816#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2 820#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
817#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 821#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
818#define QLCNIC_FW_CAPABILITY_2_OCBB BIT_5 822#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
823#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
819 824
820/* module types */ 825/* module types */
821#define LINKEVENT_MODULE_NOT_PRESENT 1 826#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -913,6 +918,9 @@ struct qlcnic_ipaddr {
913#define QLCNIC_IS_TSO_CAPABLE(adapter) \ 918#define QLCNIC_IS_TSO_CAPABLE(adapter) \
914 ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) 919 ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
915 920
921#define QLCNIC_BEACON_EANBLE 0xC
922#define QLCNIC_BEACON_DISABLE 0xD
923
916#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 924#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
917#define QLCNIC_MSIX_TBL_SPACE 8192 925#define QLCNIC_MSIX_TBL_SPACE 8192
918#define QLCNIC_PCI_REG_MSIX_TBL 0x44 926#define QLCNIC_PCI_REG_MSIX_TBL 0x44
@@ -932,6 +940,7 @@ struct qlcnic_ipaddr {
932#define __QLCNIC_SRIOV_ENABLE 10 940#define __QLCNIC_SRIOV_ENABLE 10
933#define __QLCNIC_SRIOV_CAPABLE 11 941#define __QLCNIC_SRIOV_CAPABLE 11
934#define __QLCNIC_MBX_POLL_ENABLE 12 942#define __QLCNIC_MBX_POLL_ENABLE 12
943#define __QLCNIC_DIAG_MODE 13
935 944
936#define QLCNIC_INTERRUPT_TEST 1 945#define QLCNIC_INTERRUPT_TEST 1
937#define QLCNIC_LOOPBACK_TEST 2 946#define QLCNIC_LOOPBACK_TEST 2
@@ -1467,7 +1476,7 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
1467void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter); 1476void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
1468 1477
1469int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); 1478int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1470int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *); 1479int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
1471int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); 1480int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1472netdev_features_t qlcnic_fix_features(struct net_device *netdev, 1481netdev_features_t qlcnic_fix_features(struct net_device *netdev,
1473 netdev_features_t features); 1482 netdev_features_t features);
@@ -1489,7 +1498,9 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1489int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t); 1498int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
1490int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); 1499int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
1491void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); 1500void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
1501void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
1492int qlcnic_enable_msix(struct qlcnic_adapter *, u32); 1502int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
1503void qlcnic_set_drv_version(struct qlcnic_adapter *);
1493 1504
1494/* eSwitch management functions */ 1505/* eSwitch management functions */
1495int qlcnic_config_switch_port(struct qlcnic_adapter *, 1506int qlcnic_config_switch_port(struct qlcnic_adapter *,
@@ -1543,6 +1554,7 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
1543int qlcnic_reset_npar_config(struct qlcnic_adapter *); 1554int qlcnic_reset_npar_config(struct qlcnic_adapter *);
1544int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); 1555int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
1545void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16); 1556void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
1557int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *);
1546int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); 1558int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
1547int qlcnic_read_mac_addr(struct qlcnic_adapter *); 1559int qlcnic_read_mac_addr(struct qlcnic_adapter *);
1548int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); 1560int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
@@ -1584,6 +1596,8 @@ struct qlcnic_nic_template {
1584 void (*napi_del)(struct qlcnic_adapter *); 1596 void (*napi_del)(struct qlcnic_adapter *);
1585 void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int); 1597 void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
1586 irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *); 1598 irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
1599 int (*shutdown)(struct pci_dev *);
1600 int (*resume)(struct qlcnic_adapter *);
1587}; 1601};
1588 1602
1589/* Adapter hardware abstraction */ 1603/* Adapter hardware abstraction */
@@ -1625,6 +1639,7 @@ struct qlcnic_hardware_ops {
1625 int (*config_promisc_mode) (struct qlcnic_adapter *, u32); 1639 int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
1626 void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16); 1640 void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
1627 int (*get_board_info) (struct qlcnic_adapter *); 1641 int (*get_board_info) (struct qlcnic_adapter *);
1642 void (*set_mac_filter_count) (struct qlcnic_adapter *);
1628 void (*free_mac_list) (struct qlcnic_adapter *); 1643 void (*free_mac_list) (struct qlcnic_adapter *);
1629}; 1644};
1630 1645
@@ -1787,6 +1802,18 @@ static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
1787 adapter->ahw->hw_ops->napi_enable(adapter); 1802 adapter->ahw->hw_ops->napi_enable(adapter);
1788} 1803}
1789 1804
1805static inline int __qlcnic_shutdown(struct pci_dev *pdev)
1806{
1807 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1808
1809 return adapter->nic_ops->shutdown(pdev);
1810}
1811
1812static inline int __qlcnic_resume(struct qlcnic_adapter *adapter)
1813{
1814 return adapter->nic_ops->resume(adapter);
1815}
1816
1790static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter) 1817static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
1791{ 1818{
1792 adapter->ahw->hw_ops->napi_disable(adapter); 1819 adapter->ahw->hw_ops->napi_disable(adapter);
@@ -1840,6 +1867,11 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
1840 return adapter->ahw->hw_ops->free_mac_list(adapter); 1867 return adapter->ahw->hw_ops->free_mac_list(adapter);
1841} 1868}
1842 1869
1870static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
1871{
1872 adapter->ahw->hw_ops->set_mac_filter_count(adapter);
1873}
1874
1843static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 1875static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
1844 u32 key) 1876 u32 key)
1845{ 1877{
@@ -1886,6 +1918,21 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
1886 writel(0xfbff, adapter->tgt_mask_reg); 1918 writel(0xfbff, adapter->tgt_mask_reg);
1887} 1919}
1888 1920
1921static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter)
1922{
1923 return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state);
1924}
1925
1926static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter)
1927{
1928 clear_bit(__QLCNIC_DIAG_MODE, &adapter->state);
1929}
1930
1931static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter)
1932{
1933 return test_bit(__QLCNIC_DIAG_MODE, &adapter->state);
1934}
1935
1889extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops; 1936extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops;
1890extern const struct ethtool_ops qlcnic_ethtool_ops; 1937extern const struct ethtool_ops qlcnic_ethtool_ops;
1891extern const struct ethtool_ops qlcnic_ethtool_failed_ops; 1938extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b4ff1e35a11d..0913c623a67e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -63,6 +63,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
63 {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, 63 {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
64 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, 64 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
65 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, 65 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
66 {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1},
66 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, 67 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
67 {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, 68 {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
68 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, 69 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
@@ -172,6 +173,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
172 .config_promisc_mode = qlcnic_83xx_nic_set_promisc, 173 .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
173 .change_l2_filter = qlcnic_83xx_change_l2_filter, 174 .change_l2_filter = qlcnic_83xx_change_l2_filter,
174 .get_board_info = qlcnic_83xx_get_port_info, 175 .get_board_info = qlcnic_83xx_get_port_info,
176 .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
175 .free_mac_list = qlcnic_82xx_free_mac_list, 177 .free_mac_list = qlcnic_82xx_free_mac_list,
176}; 178};
177 179
@@ -184,6 +186,8 @@ static struct qlcnic_nic_template qlcnic_83xx_ops = {
184 .napi_del = qlcnic_83xx_napi_del, 186 .napi_del = qlcnic_83xx_napi_del,
185 .config_ipaddr = qlcnic_83xx_config_ipaddr, 187 .config_ipaddr = qlcnic_83xx_config_ipaddr,
186 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, 188 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
189 .shutdown = qlcnic_83xx_shutdown,
190 .resume = qlcnic_83xx_resume,
187}; 191};
188 192
189void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw) 193void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
@@ -312,6 +316,11 @@ inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
312 writel(0, adapter->tgt_mask_reg); 316 writel(0, adapter->tgt_mask_reg);
313} 317}
314 318
319inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
320{
321 writel(1, adapter->tgt_mask_reg);
322}
323
315/* Enable MSI-x and INT-x interrupts */ 324/* Enable MSI-x and INT-x interrupts */
316void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter, 325void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
317 struct qlcnic_host_sds_ring *sds_ring) 326 struct qlcnic_host_sds_ring *sds_ring)
@@ -458,6 +467,9 @@ void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
458{ 467{
459 u32 num_msix; 468 u32 num_msix;
460 469
470 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
471 qlcnic_83xx_set_legacy_intr_mask(adapter);
472
461 qlcnic_83xx_disable_mbx_intr(adapter); 473 qlcnic_83xx_disable_mbx_intr(adapter);
462 474
463 if (adapter->flags & QLCNIC_MSIX_ENABLED) 475 if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -474,7 +486,6 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
474{ 486{
475 irq_handler_t handler; 487 irq_handler_t handler;
476 u32 val; 488 u32 val;
477 char name[32];
478 int err = 0; 489 int err = 0;
479 unsigned long flags = 0; 490 unsigned long flags = 0;
480 491
@@ -485,9 +496,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
485 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 496 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
486 handler = qlcnic_83xx_handle_aen; 497 handler = qlcnic_83xx_handle_aen;
487 val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector; 498 val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
488 snprintf(name, (IFNAMSIZ + 4), 499 err = request_irq(val, handler, flags, "qlcnic-MB", adapter);
489 "%s[%s]", "qlcnic", "aen");
490 err = request_irq(val, handler, flags, name, adapter);
491 if (err) { 500 if (err) {
492 dev_err(&adapter->pdev->dev, 501 dev_err(&adapter->pdev->dev,
493 "failed to register MBX interrupt\n"); 502 "failed to register MBX interrupt\n");
@@ -604,6 +613,22 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
604 return status; 613 return status;
605} 614}
606 615
616void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
617{
618 struct qlcnic_hardware_context *ahw = adapter->ahw;
619 u16 act_pci_fn = ahw->act_pci_func;
620 u16 count;
621
622 ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
623 if (act_pci_fn <= 2)
624 count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) /
625 act_pci_fn;
626 else
627 count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) /
628 act_pci_fn;
629 ahw->max_uc_count = count;
630}
631
607void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter) 632void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
608{ 633{
609 u32 val; 634 u32 val;
@@ -839,7 +864,9 @@ void qlcnic_83xx_idc_aen_work(struct work_struct *work)
839 int i, err = 0; 864 int i, err = 0;
840 865
841 adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work); 866 adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
842 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK); 867 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
868 if (err)
869 return;
843 870
844 for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++) 871 for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
845 cmd.req.arg[i] = adapter->ahw->mbox_aen[i]; 872 cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
@@ -1080,8 +1107,10 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1080 cap |= QLC_83XX_FW_CAP_LRO_MSS; 1107 cap |= QLC_83XX_FW_CAP_LRO_MSS;
1081 1108
1082 /* set mailbox hdr and capabilities */ 1109 /* set mailbox hdr and capabilities */
1083 qlcnic_alloc_mbx_args(&cmd, adapter, 1110 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1084 QLCNIC_CMD_CREATE_RX_CTX); 1111 QLCNIC_CMD_CREATE_RX_CTX);
1112 if (err)
1113 return err;
1085 1114
1086 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) 1115 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1087 cmd.req.arg[0] |= (0x3 << 29); 1116 cmd.req.arg[0] |= (0x3 << 29);
@@ -1239,7 +1268,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1239 mbx.intr_id = 0xffff; 1268 mbx.intr_id = 0xffff;
1240 mbx.src = 0; 1269 mbx.src = 0;
1241 1270
1242 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); 1271 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
1272 if (err)
1273 return err;
1243 1274
1244 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) 1275 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1245 cmd.req.arg[0] |= (0x3 << 29); 1276 cmd.req.arg[0] |= (0x3 << 29);
@@ -1385,8 +1416,11 @@ int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
1385 1416
1386 if (state) { 1417 if (state) {
1387 /* Get LED configuration */ 1418 /* Get LED configuration */
1388 qlcnic_alloc_mbx_args(&cmd, adapter, 1419 status = qlcnic_alloc_mbx_args(&cmd, adapter,
1389 QLCNIC_CMD_GET_LED_CONFIG); 1420 QLCNIC_CMD_GET_LED_CONFIG);
1421 if (status)
1422 return status;
1423
1390 status = qlcnic_issue_cmd(adapter, &cmd); 1424 status = qlcnic_issue_cmd(adapter, &cmd);
1391 if (status) { 1425 if (status) {
1392 dev_err(&adapter->pdev->dev, 1426 dev_err(&adapter->pdev->dev,
@@ -1400,8 +1434,11 @@ int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
1400 /* Set LED Configuration */ 1434 /* Set LED Configuration */
1401 mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) | 1435 mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
1402 LSW(QLC_83XX_LED_CONFIG); 1436 LSW(QLC_83XX_LED_CONFIG);
1403 qlcnic_alloc_mbx_args(&cmd, adapter, 1437 status = qlcnic_alloc_mbx_args(&cmd, adapter,
1404 QLCNIC_CMD_SET_LED_CONFIG); 1438 QLCNIC_CMD_SET_LED_CONFIG);
1439 if (status)
1440 return status;
1441
1405 cmd.req.arg[1] = mbx_in; 1442 cmd.req.arg[1] = mbx_in;
1406 cmd.req.arg[2] = mbx_in; 1443 cmd.req.arg[2] = mbx_in;
1407 cmd.req.arg[3] = mbx_in; 1444 cmd.req.arg[3] = mbx_in;
@@ -1418,8 +1455,11 @@ mbx_err:
1418 1455
1419 } else { 1456 } else {
1420 /* Restoring default LED configuration */ 1457 /* Restoring default LED configuration */
1421 qlcnic_alloc_mbx_args(&cmd, adapter, 1458 status = qlcnic_alloc_mbx_args(&cmd, adapter,
1422 QLCNIC_CMD_SET_LED_CONFIG); 1459 QLCNIC_CMD_SET_LED_CONFIG);
1460 if (status)
1461 return status;
1462
1423 cmd.req.arg[1] = adapter->ahw->mbox_reg[0]; 1463 cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
1424 cmd.req.arg[2] = adapter->ahw->mbox_reg[1]; 1464 cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
1425 cmd.req.arg[3] = adapter->ahw->mbox_reg[2]; 1465 cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
@@ -1489,10 +1529,18 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
1489 return; 1529 return;
1490 1530
1491 if (enable) { 1531 if (enable) {
1492 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); 1532 status = qlcnic_alloc_mbx_args(&cmd, adapter,
1533 QLCNIC_CMD_INIT_NIC_FUNC);
1534 if (status)
1535 return;
1536
1493 cmd.req.arg[1] = BIT_0 | BIT_31; 1537 cmd.req.arg[1] = BIT_0 | BIT_31;
1494 } else { 1538 } else {
1495 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); 1539 status = qlcnic_alloc_mbx_args(&cmd, adapter,
1540 QLCNIC_CMD_STOP_NIC_FUNC);
1541 if (status)
1542 return;
1543
1496 cmd.req.arg[1] = BIT_0 | BIT_31; 1544 cmd.req.arg[1] = BIT_0 | BIT_31;
1497 } 1545 }
1498 status = qlcnic_issue_cmd(adapter, &cmd); 1546 status = qlcnic_issue_cmd(adapter, &cmd);
@@ -1509,7 +1557,10 @@ int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
1509 struct qlcnic_cmd_args cmd; 1557 struct qlcnic_cmd_args cmd;
1510 int err; 1558 int err;
1511 1559
1512 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG); 1560 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
1561 if (err)
1562 return err;
1563
1513 cmd.req.arg[1] = adapter->ahw->port_config; 1564 cmd.req.arg[1] = adapter->ahw->port_config;
1514 err = qlcnic_issue_cmd(adapter, &cmd); 1565 err = qlcnic_issue_cmd(adapter, &cmd);
1515 if (err) 1566 if (err)
@@ -1523,7 +1574,10 @@ int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
1523 struct qlcnic_cmd_args cmd; 1574 struct qlcnic_cmd_args cmd;
1524 int err; 1575 int err;
1525 1576
1526 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG); 1577 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
1578 if (err)
1579 return err;
1580
1527 err = qlcnic_issue_cmd(adapter, &cmd); 1581 err = qlcnic_issue_cmd(adapter, &cmd);
1528 if (err) 1582 if (err)
1529 dev_info(&adapter->pdev->dev, "Get Port config failed\n"); 1583 dev_info(&adapter->pdev->dev, "Get Port config failed\n");
@@ -1539,7 +1593,10 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
1539 u32 temp; 1593 u32 temp;
1540 struct qlcnic_cmd_args cmd; 1594 struct qlcnic_cmd_args cmd;
1541 1595
1542 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT); 1596 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
1597 if (err)
1598 return err;
1599
1543 temp = adapter->recv_ctx->context_id << 16; 1600 temp = adapter->recv_ctx->context_id << 16;
1544 cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp; 1601 cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
1545 err = qlcnic_issue_cmd(adapter, &cmd); 1602 err = qlcnic_issue_cmd(adapter, &cmd);
@@ -1570,7 +1627,11 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
1570 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 1627 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1571 return -EIO; 1628 return -EIO;
1572 1629
1573 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); 1630 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1631 QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
1632 if (err)
1633 return err;
1634
1574 qlcnic_83xx_set_interface_id_promisc(adapter, &temp); 1635 qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
1575 cmd.req.arg[1] = (mode ? 1 : 0) | temp; 1636 cmd.req.arg[1] = (mode ? 1 : 0) | temp;
1576 err = qlcnic_issue_cmd(adapter, &cmd); 1637 err = qlcnic_issue_cmd(adapter, &cmd);
@@ -1588,16 +1649,24 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1588 struct qlcnic_hardware_context *ahw = adapter->ahw; 1649 struct qlcnic_hardware_context *ahw = adapter->ahw;
1589 int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings; 1650 int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
1590 1651
1591 QLCDB(adapter, DRV, "%s loopback test in progress\n",
1592 mode == QLCNIC_ILB_MODE ? "internal" : "external");
1593 if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { 1652 if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1594 dev_warn(&adapter->pdev->dev, 1653 netdev_warn(netdev,
1595 "Loopback test not supported for non privilege function\n"); 1654 "Loopback test not supported in non privileged mode\n");
1596 return ret; 1655 return ret;
1597 } 1656 }
1598 1657
1599 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 1658 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1659 netdev_info(netdev, "Device is resetting\n");
1600 return -EBUSY; 1660 return -EBUSY;
1661 }
1662
1663 if (qlcnic_get_diag_lock(adapter)) {
1664 netdev_info(netdev, "Device is in diagnostics mode\n");
1665 return -EBUSY;
1666 }
1667
1668 netdev_info(netdev, "%s loopback test in progress\n",
1669 mode == QLCNIC_ILB_MODE ? "internal" : "external");
1601 1670
1602 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, 1671 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
1603 max_sds_rings); 1672 max_sds_rings);
@@ -1610,13 +1679,19 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1610 1679
1611 /* Poll for link up event before running traffic */ 1680 /* Poll for link up event before running traffic */
1612 do { 1681 do {
1613 msleep(500); 1682 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1614 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1683 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1615 qlcnic_83xx_process_aen(adapter); 1684 qlcnic_83xx_process_aen(adapter);
1616 1685
1617 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { 1686 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1618 dev_info(&adapter->pdev->dev, 1687 netdev_info(netdev,
1619 "Firmware didn't sent link up event to loopback request\n"); 1688 "Device is resetting, free LB test resources\n");
1689 ret = -EIO;
1690 goto free_diag_res;
1691 }
1692 if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
1693 netdev_info(netdev,
1694 "Firmware didn't sent link up event to loopback request\n");
1620 ret = -QLCNIC_FW_NOT_RESPOND; 1695 ret = -QLCNIC_FW_NOT_RESPOND;
1621 qlcnic_83xx_clear_lb_mode(adapter, mode); 1696 qlcnic_83xx_clear_lb_mode(adapter, mode);
1622 goto free_diag_res; 1697 goto free_diag_res;
@@ -1638,13 +1713,14 @@ free_diag_res:
1638 1713
1639fail_diag_alloc: 1714fail_diag_alloc:
1640 adapter->max_sds_rings = max_sds_rings; 1715 adapter->max_sds_rings = max_sds_rings;
1641 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1716 qlcnic_release_diag_lock(adapter);
1642 return ret; 1717 return ret;
1643} 1718}
1644 1719
1645int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) 1720int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1646{ 1721{
1647 struct qlcnic_hardware_context *ahw = adapter->ahw; 1722 struct qlcnic_hardware_context *ahw = adapter->ahw;
1723 struct net_device *netdev = adapter->netdev;
1648 int status = 0, loop = 0; 1724 int status = 0, loop = 0;
1649 u32 config; 1725 u32 config;
1650 1726
@@ -1662,9 +1738,9 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1662 1738
1663 status = qlcnic_83xx_set_port_config(adapter); 1739 status = qlcnic_83xx_set_port_config(adapter);
1664 if (status) { 1740 if (status) {
1665 dev_err(&adapter->pdev->dev, 1741 netdev_err(netdev,
1666 "Failed to Set Loopback Mode = 0x%x.\n", 1742 "Failed to Set Loopback Mode = 0x%x.\n",
1667 ahw->port_config); 1743 ahw->port_config);
1668 ahw->port_config = config; 1744 ahw->port_config = config;
1669 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1745 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1670 return status; 1746 return status;
@@ -1672,13 +1748,19 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1672 1748
1673 /* Wait for Link and IDC Completion AEN */ 1749 /* Wait for Link and IDC Completion AEN */
1674 do { 1750 do {
1675 msleep(300); 1751 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1676 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1752 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1677 qlcnic_83xx_process_aen(adapter); 1753 qlcnic_83xx_process_aen(adapter);
1678 1754
1679 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { 1755 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1680 dev_err(&adapter->pdev->dev, 1756 netdev_info(netdev,
1681 "FW did not generate IDC completion AEN\n"); 1757 "Device is resetting, free LB test resources\n");
1758 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1759 return -EIO;
1760 }
1761 if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
1762 netdev_err(netdev,
1763 "Did not receive IDC completion AEN\n");
1682 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1764 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1683 qlcnic_83xx_clear_lb_mode(adapter, mode); 1765 qlcnic_83xx_clear_lb_mode(adapter, mode);
1684 return -EIO; 1766 return -EIO;
@@ -1693,6 +1775,7 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1693int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) 1775int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1694{ 1776{
1695 struct qlcnic_hardware_context *ahw = adapter->ahw; 1777 struct qlcnic_hardware_context *ahw = adapter->ahw;
1778 struct net_device *netdev = adapter->netdev;
1696 int status = 0, loop = 0; 1779 int status = 0, loop = 0;
1697 u32 config = ahw->port_config; 1780 u32 config = ahw->port_config;
1698 1781
@@ -1704,9 +1787,9 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1704 1787
1705 status = qlcnic_83xx_set_port_config(adapter); 1788 status = qlcnic_83xx_set_port_config(adapter);
1706 if (status) { 1789 if (status) {
1707 dev_err(&adapter->pdev->dev, 1790 netdev_err(netdev,
1708 "Failed to Clear Loopback Mode = 0x%x.\n", 1791 "Failed to Clear Loopback Mode = 0x%x.\n",
1709 ahw->port_config); 1792 ahw->port_config);
1710 ahw->port_config = config; 1793 ahw->port_config = config;
1711 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1794 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1712 return status; 1795 return status;
@@ -1714,13 +1797,20 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1714 1797
1715 /* Wait for Link and IDC Completion AEN */ 1798 /* Wait for Link and IDC Completion AEN */
1716 do { 1799 do {
1717 msleep(300); 1800 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1718 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1801 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1719 qlcnic_83xx_process_aen(adapter); 1802 qlcnic_83xx_process_aen(adapter);
1720 1803
1721 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { 1804 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1722 dev_err(&adapter->pdev->dev, 1805 netdev_info(netdev,
1723 "Firmware didn't sent IDC completion AEN\n"); 1806 "Device is resetting, free LB test resources\n");
1807 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1808 return -EIO;
1809 }
1810
1811 if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
1812 netdev_err(netdev,
1813 "Did not receive IDC completion AEN\n");
1724 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1814 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1725 return -EIO; 1815 return -EIO;
1726 } 1816 }
@@ -1749,7 +1839,11 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
1749 u32 temp = 0, temp_ip; 1839 u32 temp = 0, temp_ip;
1750 struct qlcnic_cmd_args cmd; 1840 struct qlcnic_cmd_args cmd;
1751 1841
1752 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); 1842 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1843 QLCNIC_CMD_CONFIGURE_IP_ADDR);
1844 if (err)
1845 return;
1846
1753 qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp); 1847 qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
1754 1848
1755 if (mode == QLCNIC_IP_UP) 1849 if (mode == QLCNIC_IP_UP)
@@ -1788,7 +1882,10 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
1788 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 1882 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1789 return 0; 1883 return 0;
1790 1884
1791 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO); 1885 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
1886 if (err)
1887 return err;
1888
1792 temp = adapter->recv_ctx->context_id << 16; 1889 temp = adapter->recv_ctx->context_id << 16;
1793 arg1 = lro_bit_mask | temp; 1890 arg1 = lro_bit_mask | temp;
1794 cmd.req.arg[1] = arg1; 1891 cmd.req.arg[1] = arg1;
@@ -1810,8 +1907,9 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
1810 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 1907 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1811 0x255b0ec26d5a56daULL }; 1908 0x255b0ec26d5a56daULL };
1812 1909
1813 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); 1910 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
1814 1911 if (err)
1912 return err;
1815 /* 1913 /*
1816 * RSS request: 1914 * RSS request:
1817 * bits 3-0: Rsvd 1915 * bits 3-0: Rsvd
@@ -1917,7 +2015,10 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
1917 struct qlcnic_cmd_args cmd; 2015 struct qlcnic_cmd_args cmd;
1918 u32 mac_low, mac_high; 2016 u32 mac_low, mac_high;
1919 2017
1920 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 2018 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
2019 if (err)
2020 return err;
2021
1921 qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd); 2022 qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
1922 err = qlcnic_issue_cmd(adapter, &cmd); 2023 err = qlcnic_issue_cmd(adapter, &cmd);
1923 2024
@@ -1948,7 +2049,10 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
1948 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 2049 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1949 return; 2050 return;
1950 2051
1951 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); 2052 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
2053 if (err)
2054 return;
2055
1952 if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) { 2056 if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) {
1953 temp = adapter->recv_ctx->context_id; 2057 temp = adapter->recv_ctx->context_id;
1954 cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16; 2058 cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
@@ -2020,7 +2124,10 @@ int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
2020 return err; 2124 return err;
2021 } 2125 }
2022 2126
2023 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH); 2127 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
2128 if (err)
2129 return err;
2130
2024 cmd.req.arg[1] = (port & 0xf) | BIT_4; 2131 cmd.req.arg[1] = (port & 0xf) | BIT_4;
2025 err = qlcnic_issue_cmd(adapter, &cmd); 2132 err = qlcnic_issue_cmd(adapter, &cmd);
2026 2133
@@ -2048,7 +2155,10 @@ int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
2048 return err; 2155 return err;
2049 } 2156 }
2050 2157
2051 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); 2158 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
2159 if (err)
2160 return err;
2161
2052 cmd.req.arg[1] = (nic->pci_func << 16); 2162 cmd.req.arg[1] = (nic->pci_func << 16);
2053 cmd.req.arg[2] = 0x1 << 16; 2163 cmd.req.arg[2] = 0x1 << 16;
2054 cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16); 2164 cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
@@ -2079,13 +2189,17 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
2079 u32 temp; 2189 u32 temp;
2080 u8 op = 0; 2190 u8 op = 0;
2081 struct qlcnic_cmd_args cmd; 2191 struct qlcnic_cmd_args cmd;
2192 struct qlcnic_hardware_context *ahw = adapter->ahw;
2082 2193
2083 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 2194 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
2084 if (func_id != adapter->ahw->pci_func) { 2195 if (err)
2196 return err;
2197
2198 if (func_id != ahw->pci_func) {
2085 temp = func_id << 16; 2199 temp = func_id << 16;
2086 cmd.req.arg[1] = op | BIT_31 | temp; 2200 cmd.req.arg[1] = op | BIT_31 | temp;
2087 } else { 2201 } else {
2088 cmd.req.arg[1] = adapter->ahw->pci_func << 16; 2202 cmd.req.arg[1] = ahw->pci_func << 16;
2089 } 2203 }
2090 err = qlcnic_issue_cmd(adapter, &cmd); 2204 err = qlcnic_issue_cmd(adapter, &cmd);
2091 if (err) { 2205 if (err) {
@@ -2112,6 +2226,9 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
2112 temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17; 2226 temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
2113 npar_info->max_linkspeed_reg_offset = temp; 2227 npar_info->max_linkspeed_reg_offset = temp;
2114 } 2228 }
2229 if (npar_info->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS)
2230 memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
2231 sizeof(ahw->extra_capability));
2115 2232
2116out: 2233out:
2117 qlcnic_free_mbx_args(&cmd); 2234 qlcnic_free_mbx_args(&cmd);
@@ -2121,26 +2238,28 @@ out:
2121int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, 2238int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
2122 struct qlcnic_pci_info *pci_info) 2239 struct qlcnic_pci_info *pci_info)
2123{ 2240{
2241 struct qlcnic_hardware_context *ahw = adapter->ahw;
2242 struct device *dev = &adapter->pdev->dev;
2243 struct qlcnic_cmd_args cmd;
2124 int i, err = 0, j = 0; 2244 int i, err = 0, j = 0;
2125 u32 temp; 2245 u32 temp;
2126 struct qlcnic_cmd_args cmd;
2127 2246
2128 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); 2247 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
2248 if (err)
2249 return err;
2250
2129 err = qlcnic_issue_cmd(adapter, &cmd); 2251 err = qlcnic_issue_cmd(adapter, &cmd);
2130 2252
2131 adapter->ahw->act_pci_func = 0; 2253 ahw->act_pci_func = 0;
2132 if (err == QLCNIC_RCODE_SUCCESS) { 2254 if (err == QLCNIC_RCODE_SUCCESS) {
2133 pci_info->func_count = cmd.rsp.arg[1] & 0xFF; 2255 ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
2134 dev_info(&adapter->pdev->dev,
2135 "%s: total functions = %d\n",
2136 __func__, pci_info->func_count);
2137 for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) { 2256 for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
2138 pci_info->id = cmd.rsp.arg[i] & 0xFFFF; 2257 pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
2139 pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; 2258 pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
2140 i++; 2259 i++;
2141 pci_info->type = cmd.rsp.arg[i] & 0xFFFF; 2260 pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
2142 if (pci_info->type == QLCNIC_TYPE_NIC) 2261 if (pci_info->type == QLCNIC_TYPE_NIC)
2143 adapter->ahw->act_pci_func++; 2262 ahw->act_pci_func++;
2144 temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; 2263 temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
2145 pci_info->default_port = temp; 2264 pci_info->default_port = temp;
2146 i++; 2265 i++;
@@ -2152,18 +2271,21 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
2152 i++; 2271 i++;
2153 memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2); 2272 memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
2154 i = i + 3; 2273 i = i + 3;
2155 2274 if (ahw->op_mode == QLCNIC_MGMT_FUNC)
2156 dev_info(&adapter->pdev->dev, "%s:\n" 2275 dev_info(dev, "id = %d active = %d type = %d\n"
2157 "\tid = %d active = %d type = %d\n" 2276 "\tport = %d min bw = %d max bw = %d\n"
2158 "\tport = %d min bw = %d max bw = %d\n" 2277 "\tmac_addr = %pM\n", pci_info->id,
2159 "\tmac_addr = %pM\n", __func__, 2278 pci_info->active, pci_info->type,
2160 pci_info->id, pci_info->active, pci_info->type, 2279 pci_info->default_port,
2161 pci_info->default_port, pci_info->tx_min_bw, 2280 pci_info->tx_min_bw,
2162 pci_info->tx_max_bw, pci_info->mac); 2281 pci_info->tx_max_bw, pci_info->mac);
2163 } 2282 }
2283 if (ahw->op_mode == QLCNIC_MGMT_FUNC)
2284 dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n",
2285 ahw->max_pci_func, ahw->act_pci_func);
2286
2164 } else { 2287 } else {
2165 dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n", 2288 dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
2166 err);
2167 err = -EIO; 2289 err = -EIO;
2168 } 2290 }
2169 2291
@@ -2180,7 +2302,10 @@ int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
2180 struct qlcnic_cmd_args cmd; 2302 struct qlcnic_cmd_args cmd;
2181 2303
2182 max_ints = adapter->ahw->num_msix - 1; 2304 max_ints = adapter->ahw->num_msix - 1;
2183 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); 2305 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
2306 if (err)
2307 return err;
2308
2184 cmd.req.arg[1] = max_ints; 2309 cmd.req.arg[1] = max_ints;
2185 2310
2186 if (qlcnic_sriov_vf_check(adapter)) 2311 if (qlcnic_sriov_vf_check(adapter))
@@ -2808,7 +2933,11 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
2808 dev_info(&adapter->pdev->dev, "link state down\n"); 2933 dev_info(&adapter->pdev->dev, "link state down\n");
2809 return config; 2934 return config;
2810 } 2935 }
2811 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); 2936
2937 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
2938 if (err)
2939 return err;
2940
2812 err = qlcnic_issue_cmd(adapter, &cmd); 2941 err = qlcnic_issue_cmd(adapter, &cmd);
2813 if (err) { 2942 if (err) {
2814 dev_info(&adapter->pdev->dev, 2943 dev_info(&adapter->pdev->dev,
@@ -3034,7 +3163,9 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
3034 struct net_device *netdev = adapter->netdev; 3163 struct net_device *netdev = adapter->netdev;
3035 int ret = 0; 3164 int ret = 0;
3036 3165
3037 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); 3166 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
3167 if (ret)
3168 return;
3038 /* Get Tx stats */ 3169 /* Get Tx stats */
3039 cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16); 3170 cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
3040 cmd.rsp.num = QLC_83XX_TX_STAT_REGS; 3171 cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
@@ -3113,8 +3244,10 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
3113 u8 val; 3244 u8 val;
3114 int ret, max_sds_rings = adapter->max_sds_rings; 3245 int ret, max_sds_rings = adapter->max_sds_rings;
3115 3246
3116 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 3247 if (qlcnic_get_diag_lock(adapter)) {
3117 return -EIO; 3248 netdev_info(netdev, "Device in diagnostics mode\n");
3249 return -EBUSY;
3250 }
3118 3251
3119 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, 3252 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
3120 max_sds_rings); 3253 max_sds_rings);
@@ -3122,7 +3255,9 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
3122 goto fail_diag_irq; 3255 goto fail_diag_irq;
3123 3256
3124 ahw->diag_cnt = 0; 3257 ahw->diag_cnt = 0;
3125 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); 3258 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
3259 if (ret)
3260 goto fail_diag_irq;
3126 3261
3127 if (adapter->flags & QLCNIC_MSIX_ENABLED) 3262 if (adapter->flags & QLCNIC_MSIX_ENABLED)
3128 intrpt_id = ahw->intr_tbl[0].id; 3263 intrpt_id = ahw->intr_tbl[0].id;
@@ -3156,7 +3291,7 @@ done:
3156 3291
3157fail_diag_irq: 3292fail_diag_irq:
3158 adapter->max_sds_rings = max_sds_rings; 3293 adapter->max_sds_rings = max_sds_rings;
3159 clear_bit(__QLCNIC_RESETTING, &adapter->state); 3294 qlcnic_release_diag_lock(adapter);
3160 return ret; 3295 return ret;
3161} 3296}
3162 3297
@@ -3260,3 +3395,54 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
3260 } 3395 }
3261 return 0; 3396 return 0;
3262} 3397}
3398
3399int qlcnic_83xx_shutdown(struct pci_dev *pdev)
3400{
3401 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3402 struct net_device *netdev = adapter->netdev;
3403 int retval;
3404
3405 netif_device_detach(netdev);
3406 qlcnic_cancel_idc_work(adapter);
3407
3408 if (netif_running(netdev))
3409 qlcnic_down(adapter, netdev);
3410
3411 qlcnic_83xx_disable_mbx_intr(adapter);
3412 cancel_delayed_work_sync(&adapter->idc_aen_work);
3413
3414 retval = pci_save_state(pdev);
3415 if (retval)
3416 return retval;
3417
3418 return 0;
3419}
3420
3421int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
3422{
3423 struct qlcnic_hardware_context *ahw = adapter->ahw;
3424 struct qlc_83xx_idc *idc = &ahw->idc;
3425 int err = 0;
3426
3427 err = qlcnic_83xx_idc_init(adapter);
3428 if (err)
3429 return err;
3430
3431 if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) {
3432 if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
3433 qlcnic_83xx_set_vnic_opmode(adapter);
3434 } else {
3435 err = qlcnic_83xx_check_vnic_state(adapter);
3436 if (err)
3437 return err;
3438 }
3439 }
3440
3441 err = qlcnic_83xx_idc_reattach_driver(adapter);
3442 if (err)
3443 return err;
3444
3445 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
3446 idc->delay);
3447 return err;
3448}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index f5db67fc9f55..2548d1403d75 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -36,7 +36,8 @@
36#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3 36#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3
37#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200 37#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200
38#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3 38#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3
39 39#define QLC_83XX_LB_WAIT_COUNT 250
40#define QLC_83XX_LB_MSLEEP_COUNT 20
40#define QLC_83XX_NO_NIC_RESOURCE 0x5 41#define QLC_83XX_NO_NIC_RESOURCE 0x5
41#define QLC_83XX_MAC_PRESENT 0xC 42#define QLC_83XX_MAC_PRESENT 0xC
42#define QLC_83XX_MAC_ABSENT 0xD 43#define QLC_83XX_MAC_ABSENT 0xD
@@ -314,6 +315,7 @@ struct qlc_83xx_idc {
314 u8 vnic_state; 315 u8 vnic_state;
315 u8 vnic_wait_limit; 316 u8 vnic_wait_limit;
316 u8 quiesce_req; 317 u8 quiesce_req;
318 u8 delay_reset;
317 char **name; 319 char **name;
318}; 320};
319 321
@@ -392,6 +394,8 @@ enum qlcnic_83xx_states {
392#define QLC_83XX_LB_MAX_FILTERS 2048 394#define QLC_83XX_LB_MAX_FILTERS 2048
393#define QLC_83XX_LB_BUCKET_SIZE 256 395#define QLC_83XX_LB_BUCKET_SIZE 256
394#define QLC_83XX_MINIMUM_VECTOR 3 396#define QLC_83XX_MINIMUM_VECTOR 3
397#define QLC_83XX_MAX_MC_COUNT 38
398#define QLC_83XX_MAX_UC_COUNT 4096
395 399
396#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) 400#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
397#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) 401#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
@@ -623,4 +627,11 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
623u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); 627u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
624void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); 628void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
625void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); 629void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
630void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
631int qlcnic_83xx_shutdown(struct pci_dev *);
632int qlcnic_83xx_resume(struct qlcnic_adapter *);
633int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
634int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
635int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
636int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
626#endif 637#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 5e7fb1dfb97b..f41dfab1e9a3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -606,7 +606,7 @@ static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
606 return 0; 606 return 0;
607} 607}
608 608
609static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) 609int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
610{ 610{
611 int err; 611 int err;
612 612
@@ -629,6 +629,7 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
629 return -EIO; 629 return -EIO;
630 } 630 }
631 631
632 qlcnic_set_drv_version(adapter);
632 qlcnic_83xx_idc_attach_driver(adapter); 633 qlcnic_83xx_idc_attach_driver(adapter);
633 634
634 return 0; 635 return 0;
@@ -649,6 +650,7 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
649 ahw->idc.collect_dump = 0; 650 ahw->idc.collect_dump = 0;
650 ahw->reset_context = 0; 651 ahw->reset_context = 0;
651 adapter->tx_timeo_cnt = 0; 652 adapter->tx_timeo_cnt = 0;
653 ahw->idc.delay_reset = 0;
652 654
653 clear_bit(__QLCNIC_RESETTING, &adapter->state); 655 clear_bit(__QLCNIC_RESETTING, &adapter->state);
654} 656}
@@ -883,21 +885,41 @@ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
883 int ret = 0; 885 int ret = 0;
884 886
885 if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { 887 if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
886 qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
887 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); 888 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
888 set_bit(__QLCNIC_RESETTING, &adapter->state); 889 set_bit(__QLCNIC_RESETTING, &adapter->state);
889 clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 890 clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
890 if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) 891 if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
891 qlcnic_83xx_disable_vnic_mode(adapter, 1); 892 qlcnic_83xx_disable_vnic_mode(adapter, 1);
892 qlcnic_83xx_idc_detach_driver(adapter); 893
894 if (qlcnic_check_diag_status(adapter)) {
895 dev_info(&adapter->pdev->dev,
896 "%s: Wait for diag completion\n", __func__);
897 adapter->ahw->idc.delay_reset = 1;
898 return 0;
899 } else {
900 qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
901 qlcnic_83xx_idc_detach_driver(adapter);
902 }
893 } 903 }
894 904
895 /* Check ACK from other functions */ 905 if (qlcnic_check_diag_status(adapter)) {
896 ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
897 if (ret) {
898 dev_info(&adapter->pdev->dev, 906 dev_info(&adapter->pdev->dev,
899 "%s: Waiting for reset ACK\n", __func__); 907 "%s: Wait for diag completion\n", __func__);
900 return 0; 908 return -1;
909 } else {
910 if (adapter->ahw->idc.delay_reset) {
911 qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
912 qlcnic_83xx_idc_detach_driver(adapter);
913 adapter->ahw->idc.delay_reset = 0;
914 }
915
916 /* Check for ACK from other functions */
917 ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
918 if (ret) {
919 dev_info(&adapter->pdev->dev,
920 "%s: Waiting for reset ACK\n", __func__);
921 return -1;
922 }
901 } 923 }
902 924
903 /* Transit to INIT state and restart the HW */ 925 /* Transit to INIT state and restart the HW */
@@ -1113,7 +1135,7 @@ qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
1113 return 0; 1135 return 0;
1114} 1136}
1115 1137
1116static int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) 1138int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
1117{ 1139{
1118 int ret = -EIO; 1140 int ret = -EIO;
1119 1141
@@ -1532,9 +1554,18 @@ static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
1532 1554
1533int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev) 1555int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
1534{ 1556{
1535 u8 *p_buff;
1536 u32 addr, count;
1537 struct qlcnic_hardware_context *ahw = p_dev->ahw; 1557 struct qlcnic_hardware_context *ahw = p_dev->ahw;
1558 u32 addr, count, prev_ver, curr_ver;
1559 u8 *p_buff;
1560
1561 if (ahw->reset.buff != NULL) {
1562 prev_ver = p_dev->fw_version;
1563 curr_ver = qlcnic_83xx_get_fw_version(p_dev);
1564 if (curr_ver > prev_ver)
1565 kfree(ahw->reset.buff);
1566 else
1567 return 0;
1568 }
1538 1569
1539 ahw->reset.seq_error = 0; 1570 ahw->reset.seq_error = 0;
1540 ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); 1571 ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
@@ -2062,7 +2093,11 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
2062 audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); 2093 audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
2063 2094
2064 if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) { 2095 if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
2065 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); 2096 status = qlcnic_alloc_mbx_args(&cmd, adapter,
2097 QLCNIC_CMD_STOP_NIC_FUNC);
2098 if (status)
2099 return;
2100
2066 cmd.req.arg[1] = BIT_31; 2101 cmd.req.arg[1] = BIT_31;
2067 status = qlcnic_issue_cmd(adapter, &cmd); 2102 status = qlcnic_issue_cmd(adapter, &cmd);
2068 if (status) 2103 if (status)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index b0c3de9ede03..599d1fda52f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -39,30 +39,21 @@ int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
39 return 0; 39 return 0;
40} 40}
41 41
42static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) 42int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
43{ 43{
44 u8 id; 44 u8 id;
45 int i, ret = -EBUSY; 45 int ret = -EBUSY;
46 u32 data = QLCNIC_MGMT_FUNC; 46 u32 data = QLCNIC_MGMT_FUNC;
47 struct qlcnic_hardware_context *ahw = adapter->ahw; 47 struct qlcnic_hardware_context *ahw = adapter->ahw;
48 48
49 if (qlcnic_83xx_lock_driver(adapter)) 49 if (qlcnic_83xx_lock_driver(adapter))
50 return ret; 50 return ret;
51 51
52 if (qlcnic_config_npars) { 52 id = ahw->pci_func;
53 for (i = 0; i < ahw->act_pci_func; i++) { 53 data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
54 id = adapter->npars[i].pci_func; 54 data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) |
55 if (id == ahw->pci_func) 55 QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id);
56 continue; 56
57 data |= qlcnic_config_npars &
58 QLC_83XX_SET_FUNC_OPMODE(0x3, id);
59 }
60 } else {
61 data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
62 data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, ahw->pci_func)) |
63 QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC,
64 ahw->pci_func);
65 }
66 QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data); 57 QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
67 58
68 qlcnic_83xx_unlock_driver(adapter); 59 qlcnic_83xx_unlock_driver(adapter);
@@ -196,20 +187,24 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
196 else 187 else
197 priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, 188 priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
198 ahw->pci_func); 189 ahw->pci_func);
199 190 switch (priv_level) {
200 if (priv_level == QLCNIC_NON_PRIV_FUNC) { 191 case QLCNIC_NON_PRIV_FUNC:
201 ahw->op_mode = QLCNIC_NON_PRIV_FUNC; 192 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
202 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 193 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
203 nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; 194 nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
204 } else if (priv_level == QLCNIC_PRIV_FUNC) { 195 break;
196 case QLCNIC_PRIV_FUNC:
205 ahw->op_mode = QLCNIC_PRIV_FUNC; 197 ahw->op_mode = QLCNIC_PRIV_FUNC;
206 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry; 198 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
207 nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; 199 nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
208 } else if (priv_level == QLCNIC_MGMT_FUNC) { 200 break;
201 case QLCNIC_MGMT_FUNC:
209 ahw->op_mode = QLCNIC_MGMT_FUNC; 202 ahw->op_mode = QLCNIC_MGMT_FUNC;
210 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 203 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
211 nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; 204 nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
212 } else { 205 break;
206 default:
207 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
213 return -EIO; 208 return -EIO;
214 } 209 }
215 210
@@ -218,8 +213,29 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
218 else 213 else
219 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; 214 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
220 215
221 adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; 216 ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
222 adapter->ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; 217 ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
218
219 return 0;
220}
221
222int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
223{
224 struct qlcnic_hardware_context *ahw = adapter->ahw;
225 struct qlc_83xx_idc *idc = &ahw->idc;
226 u32 state;
227
228 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
229 while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) {
230 msleep(1000);
231 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
232 }
233
234 if (!idc->vnic_wait_limit) {
235 dev_err(&adapter->pdev->dev,
236 "vNIC mode not operational, state check timed out.\n");
237 return -EIO;
238 }
223 239
224 return 0; 240 return 0;
225} 241}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 6acf82b9f018..0581a484ceb5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -36,7 +36,8 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
36 {QLCNIC_CMD_CONFIG_PORT, 4, 1}, 36 {QLCNIC_CMD_CONFIG_PORT, 4, 1},
37 {QLCNIC_CMD_TEMP_SIZE, 4, 4}, 37 {QLCNIC_CMD_TEMP_SIZE, 4, 4},
38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, 38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
39 {QLCNIC_CMD_SET_DRV_VER, 4, 1}, 39 {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
40 {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
40}; 41};
41 42
42static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) 43static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
@@ -181,7 +182,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
181 return cmd->rsp.arg[0]; 182 return cmd->rsp.arg[0];
182} 183}
183 184
184int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) 185int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
185{ 186{
186 struct qlcnic_cmd_args cmd; 187 struct qlcnic_cmd_args cmd;
187 u32 arg1, arg2, arg3; 188 u32 arg1, arg2, arg3;
@@ -193,7 +194,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
193 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, 194 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
194 _QLCNIC_LINUX_SUBVERSION); 195 _QLCNIC_LINUX_SUBVERSION);
195 196
196 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER); 197 err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd);
198 if (err)
199 return err;
200
197 memcpy(&arg1, drv_string, sizeof(u32)); 201 memcpy(&arg1, drv_string, sizeof(u32));
198 memcpy(&arg2, drv_string + 4, sizeof(u32)); 202 memcpy(&arg2, drv_string + 4, sizeof(u32));
199 memcpy(&arg3, drv_string + 8, sizeof(u32)); 203 memcpy(&arg3, drv_string + 8, sizeof(u32));
@@ -221,7 +225,10 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
221 225
222 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) 226 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
223 return err; 227 return err;
224 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); 228 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
229 if (err)
230 return err;
231
225 cmd.req.arg[1] = recv_ctx->context_id; 232 cmd.req.arg[1] = recv_ctx->context_id;
226 cmd.req.arg[2] = mtu; 233 cmd.req.arg[2] = mtu;
227 234
@@ -335,7 +342,10 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
335 } 342 }
336 343
337 phys_addr = hostrq_phys_addr; 344 phys_addr = hostrq_phys_addr;
338 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); 345 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
346 if (err)
347 goto out_free_rsp;
348
339 cmd.req.arg[1] = MSD(phys_addr); 349 cmd.req.arg[1] = MSD(phys_addr);
340 cmd.req.arg[2] = LSD(phys_addr); 350 cmd.req.arg[2] = LSD(phys_addr);
341 cmd.req.arg[3] = rq_size; 351 cmd.req.arg[3] = rq_size;
@@ -373,10 +383,10 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
373 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 383 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
374 recv_ctx->virt_port = prsp->virt_port; 384 recv_ctx->virt_port = prsp->virt_port;
375 385
386 qlcnic_free_mbx_args(&cmd);
376out_free_rsp: 387out_free_rsp:
377 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 388 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
378 cardrsp_phys_addr); 389 cardrsp_phys_addr);
379 qlcnic_free_mbx_args(&cmd);
380out_free_rq: 390out_free_rq:
381 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 391 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
382 return err; 392 return err;
@@ -388,7 +398,10 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
388 struct qlcnic_cmd_args cmd; 398 struct qlcnic_cmd_args cmd;
389 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 399 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
390 400
391 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); 401 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
402 if (err)
403 return;
404
392 cmd.req.arg[1] = recv_ctx->context_id; 405 cmd.req.arg[1] = recv_ctx->context_id;
393 err = qlcnic_issue_cmd(adapter, &cmd); 406 err = qlcnic_issue_cmd(adapter, &cmd);
394 if (err) 407 if (err)
@@ -457,7 +470,10 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
457 470
458 phys_addr = rq_phys_addr; 471 phys_addr = rq_phys_addr;
459 472
460 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); 473 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
474 if (err)
475 goto out_free_rsp;
476
461 cmd.req.arg[1] = MSD(phys_addr); 477 cmd.req.arg[1] = MSD(phys_addr);
462 cmd.req.arg[2] = LSD(phys_addr); 478 cmd.req.arg[2] = LSD(phys_addr);
463 cmd.req.arg[3] = rq_size; 479 cmd.req.arg[3] = rq_size;
@@ -473,12 +489,13 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
473 err = -EIO; 489 err = -EIO;
474 } 490 }
475 491
492 qlcnic_free_mbx_args(&cmd);
493
494out_free_rsp:
476 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, 495 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
477 rsp_phys_addr); 496 rsp_phys_addr);
478
479out_free_rq: 497out_free_rq:
480 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); 498 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
481 qlcnic_free_mbx_args(&cmd);
482 499
483 return err; 500 return err;
484} 501}
@@ -487,8 +504,11 @@ void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
487 struct qlcnic_host_tx_ring *tx_ring) 504 struct qlcnic_host_tx_ring *tx_ring)
488{ 505{
489 struct qlcnic_cmd_args cmd; 506 struct qlcnic_cmd_args cmd;
507 int ret;
490 508
491 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); 509 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
510 if (ret)
511 return;
492 512
493 cmd.req.arg[1] = tx_ring->ctx_id; 513 cmd.req.arg[1] = tx_ring->ctx_id;
494 if (qlcnic_issue_cmd(adapter, &cmd)) 514 if (qlcnic_issue_cmd(adapter, &cmd))
@@ -503,7 +523,10 @@ qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
503 int err; 523 int err;
504 struct qlcnic_cmd_args cmd; 524 struct qlcnic_cmd_args cmd;
505 525
506 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); 526 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
527 if (err)
528 return err;
529
507 cmd.req.arg[1] = config; 530 cmd.req.arg[1] = config;
508 err = qlcnic_issue_cmd(adapter, &cmd); 531 err = qlcnic_issue_cmd(adapter, &cmd);
509 qlcnic_free_mbx_args(&cmd); 532 qlcnic_free_mbx_args(&cmd);
@@ -707,7 +730,10 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
707 struct qlcnic_cmd_args cmd; 730 struct qlcnic_cmd_args cmd;
708 u32 mac_low, mac_high; 731 u32 mac_low, mac_high;
709 732
710 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 733 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
734 if (err)
735 return err;
736
711 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; 737 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
712 err = qlcnic_issue_cmd(adapter, &cmd); 738 err = qlcnic_issue_cmd(adapter, &cmd);
713 739
@@ -746,7 +772,10 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
746 772
747 nic_info = nic_info_addr; 773 nic_info = nic_info_addr;
748 774
749 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 775 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
776 if (err)
777 goto out_free_dma;
778
750 cmd.req.arg[1] = MSD(nic_dma_t); 779 cmd.req.arg[1] = MSD(nic_dma_t);
751 cmd.req.arg[2] = LSD(nic_dma_t); 780 cmd.req.arg[2] = LSD(nic_dma_t);
752 cmd.req.arg[3] = (func_id << 16 | nic_size); 781 cmd.req.arg[3] = (func_id << 16 | nic_size);
@@ -768,9 +797,10 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
768 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 797 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
769 } 798 }
770 799
800 qlcnic_free_mbx_args(&cmd);
801out_free_dma:
771 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 802 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
772 nic_dma_t); 803 nic_dma_t);
773 qlcnic_free_mbx_args(&cmd);
774 804
775 return err; 805 return err;
776} 806}
@@ -807,7 +837,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
807 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); 837 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
808 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 838 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
809 839
810 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); 840 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
841 if (err)
842 goto out_free_dma;
843
811 cmd.req.arg[1] = MSD(nic_dma_t); 844 cmd.req.arg[1] = MSD(nic_dma_t);
812 cmd.req.arg[2] = LSD(nic_dma_t); 845 cmd.req.arg[2] = LSD(nic_dma_t);
813 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); 846 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
@@ -819,9 +852,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
819 err = -EIO; 852 err = -EIO;
820 } 853 }
821 854
822 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
823 nic_dma_t);
824 qlcnic_free_mbx_args(&cmd); 855 qlcnic_free_mbx_args(&cmd);
856out_free_dma:
857 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
858 nic_dma_t);
825 859
826 return err; 860 return err;
827} 861}
@@ -845,7 +879,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
845 return -ENOMEM; 879 return -ENOMEM;
846 880
847 npar = pci_info_addr; 881 npar = pci_info_addr;
848 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); 882 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
883 if (err)
884 goto out_free_dma;
885
849 cmd.req.arg[1] = MSD(pci_info_dma_t); 886 cmd.req.arg[1] = MSD(pci_info_dma_t);
850 cmd.req.arg[2] = LSD(pci_info_dma_t); 887 cmd.req.arg[2] = LSD(pci_info_dma_t);
851 cmd.req.arg[3] = pci_size; 888 cmd.req.arg[3] = pci_size;
@@ -873,20 +910,22 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
873 err = -EIO; 910 err = -EIO;
874 } 911 }
875 912
913 qlcnic_free_mbx_args(&cmd);
914out_free_dma:
876 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, 915 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
877 pci_info_dma_t); 916 pci_info_dma_t);
878 qlcnic_free_mbx_args(&cmd);
879 917
880 return err; 918 return err;
881} 919}
882 920
883/* Configure eSwitch for port mirroring */ 921/* Configure eSwitch for port mirroring */
884int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, 922int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
885 u8 enable_mirroring, u8 pci_func) 923 u8 enable_mirroring, u8 pci_func)
886{ 924{
925 struct device *dev = &adapter->pdev->dev;
926 struct qlcnic_cmd_args cmd;
887 int err = -EIO; 927 int err = -EIO;
888 u32 arg1; 928 u32 arg1;
889 struct qlcnic_cmd_args cmd;
890 929
891 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || 930 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
892 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 931 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
@@ -895,18 +934,20 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
895 arg1 = id | (enable_mirroring ? BIT_4 : 0); 934 arg1 = id | (enable_mirroring ? BIT_4 : 0);
896 arg1 |= pci_func << 8; 935 arg1 |= pci_func << 8;
897 936
898 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING); 937 err = qlcnic_alloc_mbx_args(&cmd, adapter,
938 QLCNIC_CMD_SET_PORTMIRRORING);
939 if (err)
940 return err;
941
899 cmd.req.arg[1] = arg1; 942 cmd.req.arg[1] = arg1;
900 err = qlcnic_issue_cmd(adapter, &cmd); 943 err = qlcnic_issue_cmd(adapter, &cmd);
901 944
902 if (err != QLCNIC_RCODE_SUCCESS) 945 if (err != QLCNIC_RCODE_SUCCESS)
903 dev_err(&adapter->pdev->dev, 946 dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n",
904 "Failed to configure port mirroring%d on eswitch:%d\n",
905 pci_func, id); 947 pci_func, id);
906 else 948 else
907 dev_info(&adapter->pdev->dev, 949 dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n",
908 "Configured eSwitch %d for port mirroring:%d\n", 950 pci_func, id);
909 id, pci_func);
910 qlcnic_free_mbx_args(&cmd); 951 qlcnic_free_mbx_args(&cmd);
911 952
912 return err; 953 return err;
@@ -941,7 +982,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
941 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 982 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
942 arg1 |= rx_tx << 15 | stats_size << 16; 983 arg1 |= rx_tx << 15 | stats_size << 16;
943 984
944 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); 985 err = qlcnic_alloc_mbx_args(&cmd, adapter,
986 QLCNIC_CMD_GET_ESWITCH_STATS);
987 if (err)
988 goto out_free_dma;
989
945 cmd.req.arg[1] = arg1; 990 cmd.req.arg[1] = arg1;
946 cmd.req.arg[2] = MSD(stats_dma_t); 991 cmd.req.arg[2] = MSD(stats_dma_t);
947 cmd.req.arg[3] = LSD(stats_dma_t); 992 cmd.req.arg[3] = LSD(stats_dma_t);
@@ -963,9 +1008,10 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
963 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 1008 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
964 } 1009 }
965 1010
966 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
967 stats_dma_t);
968 qlcnic_free_mbx_args(&cmd); 1011 qlcnic_free_mbx_args(&cmd);
1012out_free_dma:
1013 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1014 stats_dma_t);
969 1015
970 return err; 1016 return err;
971} 1017}
@@ -989,7 +1035,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
989 if (!stats_addr) 1035 if (!stats_addr)
990 return -ENOMEM; 1036 return -ENOMEM;
991 1037
992 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); 1038 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
1039 if (err)
1040 goto out_free_dma;
1041
993 cmd.req.arg[1] = stats_size << 16; 1042 cmd.req.arg[1] = stats_size << 16;
994 cmd.req.arg[2] = MSD(stats_dma_t); 1043 cmd.req.arg[2] = MSD(stats_dma_t);
995 cmd.req.arg[3] = LSD(stats_dma_t); 1044 cmd.req.arg[3] = LSD(stats_dma_t);
@@ -1020,11 +1069,12 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1020 "%s: Get mac stats failed, err=%d.\n", __func__, err); 1069 "%s: Get mac stats failed, err=%d.\n", __func__, err);
1021 } 1070 }
1022 1071
1023 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1024 stats_dma_t);
1025
1026 qlcnic_free_mbx_args(&cmd); 1072 qlcnic_free_mbx_args(&cmd);
1027 1073
1074out_free_dma:
1075 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1076 stats_dma_t);
1077
1028 return err; 1078 return err;
1029} 1079}
1030 1080
@@ -1108,7 +1158,11 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1108 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; 1158 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1109 arg1 |= BIT_14 | rx_tx << 15; 1159 arg1 |= BIT_14 | rx_tx << 15;
1110 1160
1111 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); 1161 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1162 QLCNIC_CMD_GET_ESWITCH_STATS);
1163 if (err)
1164 return err;
1165
1112 cmd.req.arg[1] = arg1; 1166 cmd.req.arg[1] = arg1;
1113 err = qlcnic_issue_cmd(adapter, &cmd); 1167 err = qlcnic_issue_cmd(adapter, &cmd);
1114 qlcnic_free_mbx_args(&cmd); 1168 qlcnic_free_mbx_args(&cmd);
@@ -1121,17 +1175,19 @@ err_ret:
1121 return -EIO; 1175 return -EIO;
1122} 1176}
1123 1177
1124static int 1178static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1125__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1179 u32 *arg1, u32 *arg2)
1126 u32 *arg1, u32 *arg2)
1127{ 1180{
1128 int err = -EIO; 1181 struct device *dev = &adapter->pdev->dev;
1129 struct qlcnic_cmd_args cmd; 1182 struct qlcnic_cmd_args cmd;
1130 u8 pci_func; 1183 u8 pci_func = *arg1 >> 8;
1131 pci_func = (*arg1 >> 8); 1184 int err;
1185
1186 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1187 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
1188 if (err)
1189 return err;
1132 1190
1133 qlcnic_alloc_mbx_args(&cmd, adapter,
1134 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
1135 cmd.req.arg[1] = *arg1; 1191 cmd.req.arg[1] = *arg1;
1136 err = qlcnic_issue_cmd(adapter, &cmd); 1192 err = qlcnic_issue_cmd(adapter, &cmd);
1137 *arg1 = cmd.rsp.arg[1]; 1193 *arg1 = cmd.rsp.arg[1];
@@ -1139,12 +1195,11 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1139 qlcnic_free_mbx_args(&cmd); 1195 qlcnic_free_mbx_args(&cmd);
1140 1196
1141 if (err == QLCNIC_RCODE_SUCCESS) 1197 if (err == QLCNIC_RCODE_SUCCESS)
1142 dev_info(&adapter->pdev->dev, 1198 dev_info(dev, "Get eSwitch port config for vNIC function %d\n",
1143 "eSwitch port config for pci func %d\n", pci_func); 1199 pci_func);
1144 else 1200 else
1145 dev_err(&adapter->pdev->dev, 1201 dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n",
1146 "Failed to get eswitch port config for pci func %d\n", 1202 pci_func);
1147 pci_func);
1148 return err; 1203 return err;
1149} 1204}
1150/* Configure eSwitch port 1205/* Configure eSwitch port
@@ -1157,9 +1212,10 @@ op_type = 1 for port vlan_id
1157int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, 1212int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1158 struct qlcnic_esw_func_cfg *esw_cfg) 1213 struct qlcnic_esw_func_cfg *esw_cfg)
1159{ 1214{
1215 struct device *dev = &adapter->pdev->dev;
1216 struct qlcnic_cmd_args cmd;
1160 int err = -EIO, index; 1217 int err = -EIO, index;
1161 u32 arg1, arg2 = 0; 1218 u32 arg1, arg2 = 0;
1162 struct qlcnic_cmd_args cmd;
1163 u8 pci_func; 1219 u8 pci_func;
1164 1220
1165 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1221 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
@@ -1209,18 +1265,22 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1209 return err; 1265 return err;
1210 } 1266 }
1211 1267
1212 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH); 1268 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1269 QLCNIC_CMD_CONFIGURE_ESWITCH);
1270 if (err)
1271 return err;
1272
1213 cmd.req.arg[1] = arg1; 1273 cmd.req.arg[1] = arg1;
1214 cmd.req.arg[2] = arg2; 1274 cmd.req.arg[2] = arg2;
1215 err = qlcnic_issue_cmd(adapter, &cmd); 1275 err = qlcnic_issue_cmd(adapter, &cmd);
1216 qlcnic_free_mbx_args(&cmd); 1276 qlcnic_free_mbx_args(&cmd);
1217 1277
1218 if (err != QLCNIC_RCODE_SUCCESS) 1278 if (err != QLCNIC_RCODE_SUCCESS)
1219 dev_err(&adapter->pdev->dev, 1279 dev_err(dev, "Failed to configure eswitch for vNIC function %d\n",
1220 "Failed to configure eswitch pci func %d\n", pci_func); 1280 pci_func);
1221 else 1281 else
1222 dev_info(&adapter->pdev->dev, 1282 dev_info(dev, "Configured eSwitch for vNIC function %d\n",
1223 "Configured eSwitch for pci func %d\n", pci_func); 1283 pci_func);
1224 1284
1225 return err; 1285 return err;
1226} 1286}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index f67652de5a63..700a46324d09 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -846,7 +846,9 @@ static int qlcnic_irq_test(struct net_device *netdev)
846 goto clear_diag_irq; 846 goto clear_diag_irq;
847 847
848 ahw->diag_cnt = 0; 848 ahw->diag_cnt = 0;
849 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); 849 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
850 if (ret)
851 goto free_diag_res;
850 852
851 cmd.req.arg[1] = ahw->pci_func; 853 cmd.req.arg[1] = ahw->pci_func;
852 ret = qlcnic_issue_cmd(adapter, &cmd); 854 ret = qlcnic_issue_cmd(adapter, &cmd);
@@ -858,6 +860,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
858 860
859done: 861done:
860 qlcnic_free_mbx_args(&cmd); 862 qlcnic_free_mbx_args(&cmd);
863
864free_diag_res:
861 qlcnic_diag_free_res(netdev, max_sds_rings); 865 qlcnic_diag_free_res(netdev, max_sds_rings);
862 866
863clear_diag_irq: 867clear_diag_irq:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index c0f0c0d0a790..d262211b03b3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -672,6 +672,7 @@ enum {
672#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10 672#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10
673 673
674#define QLCNIC_MAX_MC_COUNT 38 674#define QLCNIC_MAX_MC_COUNT 38
675#define QLCNIC_MAX_UC_COUNT 512
675#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5 676#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
676 677
677#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 678#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 106a12f2a02f..5b5d2edf125d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -499,6 +499,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
499void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) 499void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
500{ 500{
501 struct qlcnic_adapter *adapter = netdev_priv(netdev); 501 struct qlcnic_adapter *adapter = netdev_priv(netdev);
502 struct qlcnic_hardware_context *ahw = adapter->ahw;
502 struct netdev_hw_addr *ha; 503 struct netdev_hw_addr *ha;
503 static const u8 bcast_addr[ETH_ALEN] = { 504 static const u8 bcast_addr[ETH_ALEN] = {
504 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 505 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
@@ -515,25 +516,30 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
515 if (netdev->flags & IFF_PROMISC) { 516 if (netdev->flags & IFF_PROMISC) {
516 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) 517 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
517 mode = VPORT_MISS_MODE_ACCEPT_ALL; 518 mode = VPORT_MISS_MODE_ACCEPT_ALL;
518 goto send_fw_cmd; 519 } else if (netdev->flags & IFF_ALLMULTI) {
519 } 520 if (netdev_mc_count(netdev) > ahw->max_mc_count) {
520 521 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
521 if ((netdev->flags & IFF_ALLMULTI) || 522 } else if (!netdev_mc_empty(netdev) &&
522 (netdev_mc_count(netdev) > adapter->ahw->max_mc_count)) { 523 !qlcnic_sriov_vf_check(adapter)) {
523 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 524 netdev_for_each_mc_addr(ha, netdev)
524 goto send_fw_cmd; 525 qlcnic_nic_add_mac(adapter, ha->addr,
526 vlan);
527 }
528 if (mode != VPORT_MISS_MODE_ACCEPT_MULTI &&
529 qlcnic_sriov_vf_check(adapter))
530 qlcnic_vf_add_mc_list(netdev, vlan);
525 } 531 }
526 532
527 if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) { 533 /* configure unicast MAC address, if there is not sufficient space
528 netdev_for_each_mc_addr(ha, netdev) { 534 * to store all the unicast addresses then enable promiscuous mode
535 */
536 if (netdev_uc_count(netdev) > ahw->max_uc_count) {
537 mode = VPORT_MISS_MODE_ACCEPT_ALL;
538 } else if (!netdev_uc_empty(netdev)) {
539 netdev_for_each_uc_addr(ha, netdev)
529 qlcnic_nic_add_mac(adapter, ha->addr, vlan); 540 qlcnic_nic_add_mac(adapter, ha->addr, vlan);
530 }
531 } 541 }
532 542
533 if (qlcnic_sriov_vf_check(adapter))
534 qlcnic_vf_add_mc_list(netdev, vlan);
535
536send_fw_cmd:
537 if (!qlcnic_sriov_vf_check(adapter)) { 543 if (!qlcnic_sriov_vf_check(adapter)) {
538 if (mode == VPORT_MISS_MODE_ACCEPT_ALL && 544 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
539 !adapter->fdb_mac_learn) { 545 !adapter->fdb_mac_learn) {
@@ -780,7 +786,8 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
780 word = 0; 786 word = 0;
781 if (enable) { 787 if (enable) {
782 word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK; 788 word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK;
783 if (adapter->ahw->capabilities2 & QLCNIC_FW_CAP2_HW_LRO_IPV6) 789 if (adapter->ahw->extra_capability[0] &
790 QLCNIC_FW_CAP2_HW_LRO_IPV6)
784 word |= QLCNIC_ENABLE_IPV6_LRO | 791 word |= QLCNIC_ENABLE_IPV6_LRO |
785 QLCNIC_NO_DEST_IPV6_CHECK; 792 QLCNIC_NO_DEST_IPV6_CHECK;
786 } 793 }
@@ -1503,6 +1510,21 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1503 return rv; 1510 return rv;
1504} 1511}
1505 1512
1513int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state)
1514{
1515 struct qlcnic_cmd_args cmd;
1516 int err;
1517
1518 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS);
1519 if (!err) {
1520 err = qlcnic_issue_cmd(adapter, &cmd);
1521 if (!err)
1522 *h_state = cmd.rsp.arg[1];
1523 }
1524 qlcnic_free_mbx_args(&cmd);
1525 return err;
1526}
1527
1506void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter) 1528void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
1507{ 1529{
1508 void __iomem *msix_base_addr; 1530 void __iomem *msix_base_addr;
@@ -1555,3 +1577,54 @@ void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
1555{ 1577{
1556 qlcnic_pcie_sem_unlock(adapter, 5); 1578 qlcnic_pcie_sem_unlock(adapter, 5);
1557} 1579}
1580
1581int qlcnic_82xx_shutdown(struct pci_dev *pdev)
1582{
1583 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1584 struct net_device *netdev = adapter->netdev;
1585 int retval;
1586
1587 netif_device_detach(netdev);
1588
1589 qlcnic_cancel_idc_work(adapter);
1590
1591 if (netif_running(netdev))
1592 qlcnic_down(adapter, netdev);
1593
1594 qlcnic_clr_all_drv_state(adapter, 0);
1595
1596 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1597
1598 retval = pci_save_state(pdev);
1599 if (retval)
1600 return retval;
1601
1602 if (qlcnic_wol_supported(adapter)) {
1603 pci_enable_wake(pdev, PCI_D3cold, 1);
1604 pci_enable_wake(pdev, PCI_D3hot, 1);
1605 }
1606
1607 return 0;
1608}
1609
1610int qlcnic_82xx_resume(struct qlcnic_adapter *adapter)
1611{
1612 struct net_device *netdev = adapter->netdev;
1613 int err;
1614
1615 err = qlcnic_start_firmware(adapter);
1616 if (err) {
1617 dev_err(&adapter->pdev->dev, "failed to start firmware\n");
1618 return err;
1619 }
1620
1621 if (netif_running(netdev)) {
1622 err = qlcnic_up(adapter, netdev);
1623 if (!err)
1624 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1625 }
1626
1627 netif_device_attach(netdev);
1628 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1629 return err;
1630}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index b6818f4356b9..2c22504f57aa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -86,7 +86,8 @@ enum qlcnic_regs {
86#define QLCNIC_CMD_BC_EVENT_SETUP 0x31 86#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
87#define QLCNIC_CMD_CONFIG_VPORT 0x32 87#define QLCNIC_CMD_CONFIG_VPORT 0x32
88#define QLCNIC_CMD_GET_MAC_STATS 0x37 88#define QLCNIC_CMD_GET_MAC_STATS 0x37
89#define QLCNIC_CMD_SET_DRV_VER 0x38 89#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
90#define QLCNIC_CMD_GET_LED_STATUS 0x3C
90#define QLCNIC_CMD_CONFIGURE_RSS 0x41 91#define QLCNIC_CMD_CONFIGURE_RSS 0x41
91#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 92#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
92#define QLCNIC_CMD_CONFIGURE_LED 0x44 93#define QLCNIC_CMD_CONFIGURE_LED 0x44
@@ -102,6 +103,7 @@ enum qlcnic_regs {
102#define QLCNIC_CMD_GET_LINK_STATUS 0x68 103#define QLCNIC_CMD_GET_LINK_STATUS 0x68
103#define QLCNIC_CMD_SET_LED_CONFIG 0x69 104#define QLCNIC_CMD_SET_LED_CONFIG 0x69
104#define QLCNIC_CMD_GET_LED_CONFIG 0x6A 105#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
106#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F
105#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B 107#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
106 108
107#define QLCNIC_INTRPT_INTX 1 109#define QLCNIC_INTRPT_INTX 1
@@ -197,4 +199,8 @@ void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
197void qlcnic_82xx_napi_enable(struct qlcnic_adapter *); 199void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
198void qlcnic_82xx_napi_disable(struct qlcnic_adapter *); 200void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
199void qlcnic_82xx_napi_del(struct qlcnic_adapter *); 201void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
202int qlcnic_82xx_shutdown(struct pci_dev *);
203int qlcnic_82xx_resume(struct qlcnic_adapter *);
204void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
205void qlcnic_fw_poll_work(struct work_struct *work);
200#endif /* __QLCNIC_HW_H_ */ 206#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index aeb26a850679..4528f8ec333b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -52,10 +52,6 @@ int qlcnic_load_fw_file;
52MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); 52MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
53module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); 53module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
54 54
55int qlcnic_config_npars;
56module_param(qlcnic_config_npars, int, 0444);
57MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)");
58
59static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 55static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
60static void qlcnic_remove(struct pci_dev *pdev); 56static void qlcnic_remove(struct pci_dev *pdev);
61static int qlcnic_open(struct net_device *netdev); 57static int qlcnic_open(struct net_device *netdev);
@@ -63,13 +59,11 @@ static int qlcnic_close(struct net_device *netdev);
63static void qlcnic_tx_timeout(struct net_device *netdev); 59static void qlcnic_tx_timeout(struct net_device *netdev);
64static void qlcnic_attach_work(struct work_struct *work); 60static void qlcnic_attach_work(struct work_struct *work);
65static void qlcnic_fwinit_work(struct work_struct *work); 61static void qlcnic_fwinit_work(struct work_struct *work);
66static void qlcnic_fw_poll_work(struct work_struct *work);
67#ifdef CONFIG_NET_POLL_CONTROLLER 62#ifdef CONFIG_NET_POLL_CONTROLLER
68static void qlcnic_poll_controller(struct net_device *netdev); 63static void qlcnic_poll_controller(struct net_device *netdev);
69#endif 64#endif
70 65
71static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); 66static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
72static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
73static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); 67static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
74 68
75static irqreturn_t qlcnic_tmp_intr(int irq, void *data); 69static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
@@ -364,12 +358,15 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
364 return ndo_dflt_fdb_del(ndm, tb, netdev, addr); 358 return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
365 359
366 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 360 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
367 if (is_unicast_ether_addr(addr)) 361 if (is_unicast_ether_addr(addr)) {
368 err = qlcnic_nic_del_mac(adapter, addr); 362 err = dev_uc_del(netdev, addr);
369 else if (is_multicast_ether_addr(addr)) 363 if (!err)
364 err = qlcnic_nic_del_mac(adapter, addr);
365 } else if (is_multicast_ether_addr(addr)) {
370 err = dev_mc_del(netdev, addr); 366 err = dev_mc_del(netdev, addr);
371 else 367 } else {
372 err = -EINVAL; 368 err = -EINVAL;
369 }
373 } 370 }
374 return err; 371 return err;
375} 372}
@@ -392,12 +389,16 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
392 if (ether_addr_equal(addr, adapter->mac_addr)) 389 if (ether_addr_equal(addr, adapter->mac_addr))
393 return err; 390 return err;
394 391
395 if (is_unicast_ether_addr(addr)) 392 if (is_unicast_ether_addr(addr)) {
396 err = qlcnic_nic_add_mac(adapter, addr, 0); 393 if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count)
397 else if (is_multicast_ether_addr(addr)) 394 err = dev_uc_add_excl(netdev, addr);
395 else
396 err = -ENOMEM;
397 } else if (is_multicast_ether_addr(addr)) {
398 err = dev_mc_add_excl(netdev, addr); 398 err = dev_mc_add_excl(netdev, addr);
399 else 399 } else {
400 err = -EINVAL; 400 err = -EINVAL;
401 }
401 402
402 return err; 403 return err;
403} 404}
@@ -449,6 +450,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
449 .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate, 450 .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate,
450 .ndo_get_vf_config = qlcnic_sriov_get_vf_config, 451 .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
451 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, 452 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
453 .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
452#endif 454#endif
453}; 455};
454 456
@@ -465,6 +467,8 @@ static struct qlcnic_nic_template qlcnic_ops = {
465 .napi_add = qlcnic_82xx_napi_add, 467 .napi_add = qlcnic_82xx_napi_add,
466 .napi_del = qlcnic_82xx_napi_del, 468 .napi_del = qlcnic_82xx_napi_del,
467 .config_ipaddr = qlcnic_82xx_config_ipaddr, 469 .config_ipaddr = qlcnic_82xx_config_ipaddr,
470 .shutdown = qlcnic_82xx_shutdown,
471 .resume = qlcnic_82xx_resume,
468 .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr, 472 .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr,
469}; 473};
470 474
@@ -508,6 +512,7 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
508 .config_promisc_mode = qlcnic_82xx_nic_set_promisc, 512 .config_promisc_mode = qlcnic_82xx_nic_set_promisc,
509 .change_l2_filter = qlcnic_82xx_change_filter, 513 .change_l2_filter = qlcnic_82xx_change_filter,
510 .get_board_info = qlcnic_82xx_get_board_info, 514 .get_board_info = qlcnic_82xx_get_board_info,
515 .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
511 .free_mac_list = qlcnic_82xx_free_mac_list, 516 .free_mac_list = qlcnic_82xx_free_mac_list,
512}; 517};
513 518
@@ -768,7 +773,7 @@ static int
768qlcnic_set_function_modes(struct qlcnic_adapter *adapter) 773qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
769{ 774{
770 u8 id; 775 u8 id;
771 int i, ret = 1; 776 int ret;
772 u32 data = QLCNIC_MGMT_FUNC; 777 u32 data = QLCNIC_MGMT_FUNC;
773 struct qlcnic_hardware_context *ahw = adapter->ahw; 778 struct qlcnic_hardware_context *ahw = adapter->ahw;
774 779
@@ -776,20 +781,10 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
776 if (ret) 781 if (ret)
777 goto err_lock; 782 goto err_lock;
778 783
779 if (qlcnic_config_npars) { 784 id = ahw->pci_func;
780 for (i = 0; i < ahw->act_pci_func; i++) { 785 data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
781 id = adapter->npars[i].pci_func; 786 data = (data & ~QLC_DEV_SET_DRV(0xf, id)) |
782 if (id == ahw->pci_func) 787 QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id);
783 continue;
784 data |= (qlcnic_config_npars &
785 QLC_DEV_SET_DRV(0xf, id));
786 }
787 } else {
788 data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
789 data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
790 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
791 ahw->pci_func));
792 }
793 QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data); 788 QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
794 qlcnic_api_unlock(adapter); 789 qlcnic_api_unlock(adapter);
795err_lock: 790err_lock:
@@ -875,6 +870,27 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
875 return 0; 870 return 0;
876} 871}
877 872
873static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
874 int index)
875{
876 struct pci_dev *pdev = adapter->pdev;
877 unsigned short subsystem_vendor;
878 bool ret = true;
879
880 subsystem_vendor = pdev->subsystem_vendor;
881
882 if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X ||
883 pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
884 if (qlcnic_boards[index].sub_vendor == subsystem_vendor &&
885 qlcnic_boards[index].sub_device == pdev->subsystem_device)
886 ret = true;
887 else
888 ret = false;
889 }
890
891 return ret;
892}
893
878static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name) 894static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
879{ 895{
880 struct pci_dev *pdev = adapter->pdev; 896 struct pci_dev *pdev = adapter->pdev;
@@ -882,20 +898,18 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
882 898
883 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { 899 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
884 if (qlcnic_boards[i].vendor == pdev->vendor && 900 if (qlcnic_boards[i].vendor == pdev->vendor &&
885 qlcnic_boards[i].device == pdev->device && 901 qlcnic_boards[i].device == pdev->device &&
886 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor && 902 qlcnic_validate_subsystem_id(adapter, i)) {
887 qlcnic_boards[i].sub_device == pdev->subsystem_device) { 903 found = 1;
888 sprintf(name, "%pM: %s" , 904 break;
889 adapter->mac_addr,
890 qlcnic_boards[i].short_name);
891 found = 1;
892 break;
893 } 905 }
894
895 } 906 }
896 907
897 if (!found) 908 if (!found)
898 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); 909 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
910 else
911 sprintf(name, "%pM: %s" , adapter->mac_addr,
912 qlcnic_boards[i].short_name);
899} 913}
900 914
901static void 915static void
@@ -980,7 +994,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
980 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { 994 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
981 u32 temp; 995 u32 temp;
982 temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); 996 temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
983 adapter->ahw->capabilities2 = temp; 997 adapter->ahw->extra_capability[0] = temp;
984 } 998 }
985 adapter->ahw->max_mac_filters = nic_info.max_mac_filters; 999 adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
986 adapter->ahw->max_mtu = nic_info.max_mtu; 1000 adapter->ahw->max_mtu = nic_info.max_mtu;
@@ -1395,16 +1409,23 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1395 for (ring = 0; ring < num_sds_rings; ring++) { 1409 for (ring = 0; ring < num_sds_rings; ring++) {
1396 sds_ring = &recv_ctx->sds_rings[ring]; 1410 sds_ring = &recv_ctx->sds_rings[ring];
1397 if (qlcnic_82xx_check(adapter) && 1411 if (qlcnic_82xx_check(adapter) &&
1398 (ring == (num_sds_rings - 1))) 1412 (ring == (num_sds_rings - 1))) {
1399 snprintf(sds_ring->name, 1413 if (!(adapter->flags &
1400 sizeof(sds_ring->name), 1414 QLCNIC_MSIX_ENABLED))
1401 "qlcnic-%s[Tx0+Rx%d]", 1415 snprintf(sds_ring->name,
1402 netdev->name, ring); 1416 sizeof(sds_ring->name),
1403 else 1417 "qlcnic");
1418 else
1419 snprintf(sds_ring->name,
1420 sizeof(sds_ring->name),
1421 "%s-tx-0-rx-%d",
1422 netdev->name, ring);
1423 } else {
1404 snprintf(sds_ring->name, 1424 snprintf(sds_ring->name,
1405 sizeof(sds_ring->name), 1425 sizeof(sds_ring->name),
1406 "qlcnic-%s[Rx%d]", 1426 "%s-rx-%d",
1407 netdev->name, ring); 1427 netdev->name, ring);
1428 }
1408 err = request_irq(sds_ring->irq, handler, flags, 1429 err = request_irq(sds_ring->irq, handler, flags,
1409 sds_ring->name, sds_ring); 1430 sds_ring->name, sds_ring);
1410 if (err) 1431 if (err)
@@ -1419,7 +1440,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1419 ring++) { 1440 ring++) {
1420 tx_ring = &adapter->tx_ring[ring]; 1441 tx_ring = &adapter->tx_ring[ring];
1421 snprintf(tx_ring->name, sizeof(tx_ring->name), 1442 snprintf(tx_ring->name, sizeof(tx_ring->name),
1422 "qlcnic-%s[Tx%d]", netdev->name, ring); 1443 "%s-tx-%d", netdev->name, ring);
1423 err = request_irq(tx_ring->irq, handler, flags, 1444 err = request_irq(tx_ring->irq, handler, flags,
1424 tx_ring->name, tx_ring); 1445 tx_ring->name, tx_ring);
1425 if (err) 1446 if (err)
@@ -1465,7 +1486,7 @@ static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
1465 u32 capab = 0; 1486 u32 capab = 0;
1466 1487
1467 if (qlcnic_82xx_check(adapter)) { 1488 if (qlcnic_82xx_check(adapter)) {
1468 if (adapter->ahw->capabilities2 & 1489 if (adapter->ahw->extra_capability[0] &
1469 QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) 1490 QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
1470 adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; 1491 adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
1471 } else { 1492 } else {
@@ -1816,6 +1837,22 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
1816 return err; 1837 return err;
1817} 1838}
1818 1839
1840void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
1841{
1842 struct qlcnic_hardware_context *ahw = adapter->ahw;
1843 u16 act_pci_fn = ahw->act_pci_func;
1844 u16 count;
1845
1846 ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
1847 if (act_pci_fn <= 2)
1848 count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) /
1849 act_pci_fn;
1850 else
1851 count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) /
1852 act_pci_fn;
1853 ahw->max_uc_count = count;
1854}
1855
1819int 1856int
1820qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 1857qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1821 int pci_using_dac) 1858 int pci_using_dac)
@@ -1825,7 +1862,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1825 1862
1826 adapter->rx_csum = 1; 1863 adapter->rx_csum = 1;
1827 adapter->ahw->mc_enabled = 0; 1864 adapter->ahw->mc_enabled = 0;
1828 adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT; 1865 qlcnic_set_mac_filter_count(adapter);
1829 1866
1830 netdev->netdev_ops = &qlcnic_netdev_ops; 1867 netdev->netdev_ops = &qlcnic_netdev_ops;
1831 netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ; 1868 netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
@@ -1863,6 +1900,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1863 netdev->features |= NETIF_F_LRO; 1900 netdev->features |= NETIF_F_LRO;
1864 1901
1865 netdev->hw_features = netdev->features; 1902 netdev->hw_features = netdev->features;
1903 netdev->priv_flags |= IFF_UNICAST_FLT;
1866 netdev->irq = adapter->msix_entries[0].vector; 1904 netdev->irq = adapter->msix_entries[0].vector;
1867 1905
1868 err = register_netdev(netdev); 1906 err = register_netdev(netdev);
@@ -1947,6 +1985,21 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
1947 return 0; 1985 return 0;
1948} 1986}
1949 1987
1988void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
1989{
1990 struct qlcnic_hardware_context *ahw = adapter->ahw;
1991 u32 fw_cmd = 0;
1992
1993 if (qlcnic_82xx_check(adapter))
1994 fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER;
1995 else if (qlcnic_83xx_check(adapter))
1996 fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
1997
1998 if ((ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) &&
1999 (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER))
2000 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2001}
2002
1950static int 2003static int
1951qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2004qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1952{ 2005{
@@ -1954,7 +2007,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1954 struct qlcnic_adapter *adapter = NULL; 2007 struct qlcnic_adapter *adapter = NULL;
1955 struct qlcnic_hardware_context *ahw; 2008 struct qlcnic_hardware_context *ahw;
1956 int err, pci_using_dac = -1; 2009 int err, pci_using_dac = -1;
1957 u32 capab2;
1958 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ 2010 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
1959 2011
1960 if (pdev->is_virtfn) 2012 if (pdev->is_virtfn)
@@ -2109,13 +2161,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2109 if (err) 2161 if (err)
2110 goto err_out_disable_mbx_intr; 2162 goto err_out_disable_mbx_intr;
2111 2163
2112 if (qlcnic_82xx_check(adapter)) { 2164 qlcnic_set_drv_version(adapter);
2113 if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
2114 capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
2115 if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB)
2116 qlcnic_fw_cmd_set_drv_version(adapter);
2117 }
2118 }
2119 2165
2120 pci_set_drvdata(pdev, adapter); 2166 pci_set_drvdata(pdev, adapter);
2121 2167
@@ -2231,37 +2277,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
2231 kfree(ahw); 2277 kfree(ahw);
2232 free_netdev(netdev); 2278 free_netdev(netdev);
2233} 2279}
2234static int __qlcnic_shutdown(struct pci_dev *pdev)
2235{
2236 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2237 struct net_device *netdev = adapter->netdev;
2238 int retval;
2239
2240 netif_device_detach(netdev);
2241
2242 qlcnic_cancel_idc_work(adapter);
2243
2244 if (netif_running(netdev))
2245 qlcnic_down(adapter, netdev);
2246
2247 qlcnic_sriov_cleanup(adapter);
2248 if (qlcnic_82xx_check(adapter))
2249 qlcnic_clr_all_drv_state(adapter, 0);
2250
2251 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2252
2253 retval = pci_save_state(pdev);
2254 if (retval)
2255 return retval;
2256 if (qlcnic_82xx_check(adapter)) {
2257 if (qlcnic_wol_supported(adapter)) {
2258 pci_enable_wake(pdev, PCI_D3cold, 1);
2259 pci_enable_wake(pdev, PCI_D3hot, 1);
2260 }
2261 }
2262
2263 return 0;
2264}
2265 2280
2266static void qlcnic_shutdown(struct pci_dev *pdev) 2281static void qlcnic_shutdown(struct pci_dev *pdev)
2267{ 2282{
@@ -2272,8 +2287,7 @@ static void qlcnic_shutdown(struct pci_dev *pdev)
2272} 2287}
2273 2288
2274#ifdef CONFIG_PM 2289#ifdef CONFIG_PM
2275static int 2290static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
2276qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
2277{ 2291{
2278 int retval; 2292 int retval;
2279 2293
@@ -2285,11 +2299,9 @@ qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
2285 return 0; 2299 return 0;
2286} 2300}
2287 2301
2288static int 2302static int qlcnic_resume(struct pci_dev *pdev)
2289qlcnic_resume(struct pci_dev *pdev)
2290{ 2303{
2291 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); 2304 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2292 struct net_device *netdev = adapter->netdev;
2293 int err; 2305 int err;
2294 2306
2295 err = pci_enable_device(pdev); 2307 err = pci_enable_device(pdev);
@@ -2300,23 +2312,7 @@ qlcnic_resume(struct pci_dev *pdev)
2300 pci_set_master(pdev); 2312 pci_set_master(pdev);
2301 pci_restore_state(pdev); 2313 pci_restore_state(pdev);
2302 2314
2303 err = qlcnic_start_firmware(adapter); 2315 return __qlcnic_resume(adapter);
2304 if (err) {
2305 dev_err(&pdev->dev, "failed to start firmware\n");
2306 return err;
2307 }
2308
2309 if (netif_running(netdev)) {
2310 err = qlcnic_up(adapter, netdev);
2311 if (err)
2312 goto done;
2313
2314 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2315 }
2316done:
2317 netif_device_attach(netdev);
2318 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2319 return 0;
2320} 2316}
2321#endif 2317#endif
2322 2318
@@ -2655,8 +2651,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2655 return 0; 2651 return 0;
2656} 2652}
2657 2653
2658static void 2654void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
2659qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
2660{ 2655{
2661 u32 val; 2656 u32 val;
2662 2657
@@ -3086,6 +3081,7 @@ done:
3086 adapter->fw_fail_cnt = 0; 3081 adapter->fw_fail_cnt = 0;
3087 adapter->flags &= ~QLCNIC_FW_HANG; 3082 adapter->flags &= ~QLCNIC_FW_HANG;
3088 clear_bit(__QLCNIC_RESETTING, &adapter->state); 3083 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3084 qlcnic_set_drv_version(adapter);
3089 3085
3090 if (!qlcnic_clr_drv_state(adapter)) 3086 if (!qlcnic_clr_drv_state(adapter))
3091 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, 3087 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
@@ -3166,8 +3162,7 @@ detach:
3166 return 1; 3162 return 1;
3167} 3163}
3168 3164
3169static void 3165void qlcnic_fw_poll_work(struct work_struct *work)
3170qlcnic_fw_poll_work(struct work_struct *work)
3171{ 3166{
3172 struct qlcnic_adapter *adapter = container_of(work, 3167 struct qlcnic_adapter *adapter = container_of(work,
3173 struct qlcnic_adapter, fw_work.work); 3168 struct qlcnic_adapter, fw_work.work);
@@ -3219,7 +3214,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3219 if (err) 3214 if (err)
3220 return err; 3215 return err;
3221 3216
3222 pci_set_power_state(pdev, PCI_D0);
3223 pci_set_master(pdev); 3217 pci_set_master(pdev);
3224 pci_restore_state(pdev); 3218 pci_restore_state(pdev);
3225 3219
@@ -3517,7 +3511,7 @@ static int qlcnic_netdev_event(struct notifier_block *this,
3517 unsigned long event, void *ptr) 3511 unsigned long event, void *ptr)
3518{ 3512{
3519 struct qlcnic_adapter *adapter; 3513 struct qlcnic_adapter *adapter;
3520 struct net_device *dev = (struct net_device *)ptr; 3514 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3521 3515
3522recheck: 3516recheck:
3523 if (dev == NULL) 3517 if (dev == NULL)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 4b9bab18ebd9..ab8a6744d402 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -15,6 +15,7 @@
15#define QLC_83XX_MINIDUMP_FLASH 0x520000 15#define QLC_83XX_MINIDUMP_FLASH 0x520000
16#define QLC_83XX_OCM_INDEX 3 16#define QLC_83XX_OCM_INDEX 3
17#define QLC_83XX_PCI_INDEX 0 17#define QLC_83XX_PCI_INDEX 0
18#define QLC_83XX_DMA_ENGINE_INDEX 8
18 19
19static const u32 qlcnic_ms_read_data[] = { 20static const u32 qlcnic_ms_read_data[] = {
20 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC 21 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
@@ -32,6 +33,16 @@ static const u32 qlcnic_ms_read_data[] = {
32 33
33#define QLCNIC_DUMP_MASK_MAX 0xff 34#define QLCNIC_DUMP_MASK_MAX 0xff
34 35
36struct qlcnic_pex_dma_descriptor {
37 u32 read_data_size;
38 u32 dma_desc_cmd;
39 u32 src_addr_low;
40 u32 src_addr_high;
41 u32 dma_bus_addr_low;
42 u32 dma_bus_addr_high;
43 u32 rsvd[6];
44} __packed;
45
35struct qlcnic_common_entry_hdr { 46struct qlcnic_common_entry_hdr {
36 u32 type; 47 u32 type;
37 u32 offset; 48 u32 offset;
@@ -90,7 +101,10 @@ struct __ocm {
90} __packed; 101} __packed;
91 102
92struct __mem { 103struct __mem {
93 u8 rsvd[24]; 104 u32 desc_card_addr;
105 u32 dma_desc_cmd;
106 u32 start_dma_cmd;
107 u32 rsvd[3];
94 u32 addr; 108 u32 addr;
95 u32 size; 109 u32 size;
96} __packed; 110} __packed;
@@ -466,12 +480,12 @@ skip_poll:
466 return l2->no_ops * l2->read_addr_num * sizeof(u32); 480 return l2->no_ops * l2->read_addr_num * sizeof(u32);
467} 481}
468 482
469static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, 483static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
470 struct qlcnic_dump_entry *entry, __le32 *buffer) 484 struct __mem *mem, __le32 *buffer,
485 int *ret)
471{ 486{
472 u32 addr, data, test, ret = 0; 487 u32 addr, data, test;
473 int i, reg_read; 488 int i, reg_read;
474 struct __mem *mem = &entry->region.mem;
475 489
476 reg_read = mem->size; 490 reg_read = mem->size;
477 addr = mem->addr; 491 addr = mem->addr;
@@ -480,7 +494,8 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
480 dev_info(&adapter->pdev->dev, 494 dev_info(&adapter->pdev->dev,
481 "Unaligned memory addr:0x%x size:0x%x\n", 495 "Unaligned memory addr:0x%x size:0x%x\n",
482 addr, reg_read); 496 addr, reg_read);
483 return -EINVAL; 497 *ret = -EINVAL;
498 return 0;
484 } 499 }
485 500
486 mutex_lock(&adapter->ahw->mem_lock); 501 mutex_lock(&adapter->ahw->mem_lock);
@@ -499,7 +514,7 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
499 if (printk_ratelimit()) { 514 if (printk_ratelimit()) {
500 dev_err(&adapter->pdev->dev, 515 dev_err(&adapter->pdev->dev,
501 "failed to read through agent\n"); 516 "failed to read through agent\n");
502 ret = -EINVAL; 517 *ret = -EIO;
503 goto out; 518 goto out;
504 } 519 }
505 } 520 }
@@ -516,6 +531,181 @@ out:
516 return mem->size; 531 return mem->size;
517} 532}
518 533
534/* DMA register base address */
535#define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
536
537/* DMA register offsets w.r.t base address */
538#define QLC_DMA_CMD_BUFF_ADDR_LOW 0
539#define QLC_DMA_CMD_BUFF_ADDR_HI 4
540#define QLC_DMA_CMD_STATUS_CTRL 8
541
542#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
543
544static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
545 struct __mem *mem)
546{
547 struct qlcnic_dump_template_hdr *tmpl_hdr;
548 struct device *dev = &adapter->pdev->dev;
549 u32 dma_no, dma_base_addr, temp_addr;
550 int i, ret, dma_sts;
551
552 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
553 dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
554 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
555
556 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
557 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
558 mem->desc_card_addr);
559 if (ret)
560 return ret;
561
562 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
563 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
564 if (ret)
565 return ret;
566
567 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
568 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
569 mem->start_dma_cmd);
570 if (ret)
571 return ret;
572
573 /* Wait for DMA to complete */
574 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
575 for (i = 0; i < 400; i++) {
576 dma_sts = qlcnic_ind_rd(adapter, temp_addr);
577
578 if (dma_sts & BIT_1)
579 usleep_range(250, 500);
580 else
581 break;
582 }
583
584 if (i >= 400) {
585 dev_info(dev, "PEX DMA operation timed out");
586 ret = -EIO;
587 }
588
589 return ret;
590}
591
592static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
593 struct __mem *mem,
594 __le32 *buffer, int *ret)
595{
596 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
597 u32 temp, dma_base_addr, size = 0, read_size = 0;
598 struct qlcnic_pex_dma_descriptor *dma_descr;
599 struct qlcnic_dump_template_hdr *tmpl_hdr;
600 struct device *dev = &adapter->pdev->dev;
601 dma_addr_t dma_phys_addr;
602 void *dma_buffer;
603
604 tmpl_hdr = fw_dump->tmpl_hdr;
605
606 /* Check if DMA engine is available */
607 temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
608 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
609 temp = qlcnic_ind_rd(adapter,
610 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
611
612 if (!(temp & BIT_31)) {
613 dev_info(dev, "%s: DMA engine is not available\n", __func__);
614 *ret = -EIO;
615 return 0;
616 }
617
618 /* Create DMA descriptor */
619 dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
620 GFP_KERNEL);
621 if (!dma_descr) {
622 *ret = -ENOMEM;
623 return 0;
624 }
625
626 /* dma_desc_cmd 0:15 = 0
627 * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
628 * dma_desc_cmd 20:23 = pci function number
629 * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
630 */
631 dma_phys_addr = fw_dump->phys_addr;
632 dma_buffer = fw_dump->dma_buffer;
633 temp = 0;
634 temp = mem->dma_desc_cmd & 0xff0f;
635 temp |= (adapter->ahw->pci_func & 0xf) << 4;
636 dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
637 dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
638 dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
639 dma_descr->src_addr_high = 0;
640
641 /* Collect memory dump using multiple DMA operations if required */
642 while (read_size < mem->size) {
643 if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
644 size = QLC_PEX_DMA_READ_SIZE;
645 else
646 size = mem->size - read_size;
647
648 dma_descr->src_addr_low = mem->addr + read_size;
649 dma_descr->read_data_size = size;
650
651 /* Write DMA descriptor to MS memory*/
652 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
653 *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
654 (u32 *)dma_descr, temp);
655 if (*ret) {
656 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
657 mem->desc_card_addr);
658 goto free_dma_descr;
659 }
660
661 *ret = qlcnic_start_pex_dma(adapter, mem);
662 if (*ret) {
663 dev_info(dev, "Failed to start PEX DMA operation\n");
664 goto free_dma_descr;
665 }
666
667 memcpy(buffer, dma_buffer, size);
668 buffer += size / 4;
669 read_size += size;
670 }
671
672free_dma_descr:
673 kfree(dma_descr);
674
675 return read_size;
676}
677
678static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
679 struct qlcnic_dump_entry *entry, __le32 *buffer)
680{
681 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
682 struct device *dev = &adapter->pdev->dev;
683 struct __mem *mem = &entry->region.mem;
684 u32 data_size;
685 int ret = 0;
686
687 if (fw_dump->use_pex_dma) {
688 data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
689 &ret);
690 if (ret)
691 dev_info(dev,
692 "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
693 entry->hdr.mask);
694 else
695 return data_size;
696 }
697
698 data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
699 if (ret) {
700 dev_info(dev,
701 "Failed to read memory dump using test agent method: mask[0x%x]\n",
702 entry->hdr.mask);
703 return 0;
704 } else {
705 return data_size;
706 }
707}
708
519static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter, 709static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
520 struct qlcnic_dump_entry *entry, __le32 *buffer) 710 struct qlcnic_dump_entry *entry, __le32 *buffer)
521{ 711{
@@ -893,6 +1083,12 @@ flash_temp:
893 1083
894 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
895 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; 1085 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
1086
1087 if ((tmpl_hdr->version & 0xffffff) >= 0x20001)
1088 ahw->fw_dump.use_pex_dma = true;
1089 else
1090 ahw->fw_dump.use_pex_dma = false;
1091
896 ahw->fw_dump.enable = 1; 1092 ahw->fw_dump.enable = 1;
897 1093
898 return 0; 1094 return 0;
@@ -910,7 +1106,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
910 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1106 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
911 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; 1107 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
912 static const struct qlcnic_dump_operations *fw_dump_ops; 1108 static const struct qlcnic_dump_operations *fw_dump_ops;
1109 struct device *dev = &adapter->pdev->dev;
913 struct qlcnic_hardware_context *ahw; 1110 struct qlcnic_hardware_context *ahw;
1111 void *temp_buffer;
914 1112
915 ahw = adapter->ahw; 1113 ahw = adapter->ahw;
916 1114
@@ -944,6 +1142,16 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
944 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; 1142 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
945 tmpl_hdr->sys_info[1] = adapter->fw_version; 1143 tmpl_hdr->sys_info[1] = adapter->fw_version;
946 1144
1145 if (fw_dump->use_pex_dma) {
1146 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1147 &fw_dump->phys_addr,
1148 GFP_KERNEL);
1149 if (!temp_buffer)
1150 fw_dump->use_pex_dma = false;
1151 else
1152 fw_dump->dma_buffer = temp_buffer;
1153 }
1154
947 if (qlcnic_82xx_check(adapter)) { 1155 if (qlcnic_82xx_check(adapter)) {
948 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); 1156 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
949 fw_dump_ops = qlcnic_fw_dump_ops; 1157 fw_dump_ops = qlcnic_fw_dump_ops;
@@ -1002,6 +1210,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1002 return 0; 1210 return 0;
1003 } 1211 }
1004error: 1212error:
1213 if (fw_dump->use_pex_dma)
1214 dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1215 fw_dump->dma_buffer, fw_dump->phys_addr);
1005 vfree(fw_dump->data); 1216 vfree(fw_dump->data);
1006 return -EINVAL; 1217 return -EINVAL;
1007} 1218}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index d85fbb57c25b..0daf660e12a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -129,6 +129,7 @@ struct qlcnic_vport {
129 u8 vlan_mode; 129 u8 vlan_mode;
130 u16 vlan; 130 u16 vlan;
131 u8 qos; 131 u8 qos;
132 bool spoofchk;
132 u8 mac[6]; 133 u8 mac[6];
133}; 134};
134 135
@@ -194,6 +195,8 @@ int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
194int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *, 195int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
195 struct qlcnic_info *, u16); 196 struct qlcnic_info *, u16);
196int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8); 197int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
198int qlcnic_sriov_vf_shutdown(struct pci_dev *);
199int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
197 200
198static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter) 201static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
199{ 202{
@@ -225,6 +228,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int);
225int qlcnic_sriov_get_vf_config(struct net_device *, int , 228int qlcnic_sriov_get_vf_config(struct net_device *, int ,
226 struct ifla_vf_info *); 229 struct ifla_vf_info *);
227int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8); 230int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
231int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
228#else 232#else
229static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {} 233static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
230static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {} 234static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 196b2d100407..62380ce89905 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -35,6 +35,7 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); 35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
36static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, 36static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
37 struct qlcnic_cmd_args *); 37 struct qlcnic_cmd_args *);
38static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
38 39
39static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { 40static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
40 .read_crb = qlcnic_83xx_read_crb, 41 .read_crb = qlcnic_83xx_read_crb,
@@ -75,6 +76,8 @@ static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
75 .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, 76 .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work,
76 .napi_add = qlcnic_83xx_napi_add, 77 .napi_add = qlcnic_83xx_napi_add,
77 .napi_del = qlcnic_83xx_napi_del, 78 .napi_del = qlcnic_83xx_napi_del,
79 .shutdown = qlcnic_sriov_vf_shutdown,
80 .resume = qlcnic_sriov_vf_resume,
78 .config_ipaddr = qlcnic_83xx_config_ipaddr, 81 .config_ipaddr = qlcnic_83xx_config_ipaddr,
79 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, 82 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
80}; 83};
@@ -179,6 +182,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
179 spin_lock_init(&vf->rcv_pend.lock); 182 spin_lock_init(&vf->rcv_pend.lock);
180 init_completion(&vf->ch_free_cmpl); 183 init_completion(&vf->ch_free_cmpl);
181 184
185 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
186
182 if (qlcnic_sriov_pf_check(adapter)) { 187 if (qlcnic_sriov_pf_check(adapter)) {
183 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); 188 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
184 if (!vp) { 189 if (!vp) {
@@ -187,6 +192,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
187 } 192 }
188 sriov->vf_info[i].vp = vp; 193 sriov->vf_info[i].vp = vp;
189 vp->max_tx_bw = MAX_BW; 194 vp->max_tx_bw = MAX_BW;
195 vp->spoofchk = true;
190 random_ether_addr(vp->mac); 196 random_ether_addr(vp->mac);
191 dev_info(&adapter->pdev->dev, 197 dev_info(&adapter->pdev->dev,
192 "MAC Address %pM is configured for VF %d\n", 198 "MAC Address %pM is configured for VF %d\n",
@@ -652,6 +658,8 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
652 if (qlcnic_read_mac_addr(adapter)) 658 if (qlcnic_read_mac_addr(adapter))
653 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); 659 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
654 660
661 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
662
655 clear_bit(__QLCNIC_RESETTING, &adapter->state); 663 clear_bit(__QLCNIC_RESETTING, &adapter->state);
656 return 0; 664 return 0;
657} 665}
@@ -864,7 +872,6 @@ static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
864 vf->adapter->need_fw_reset) 872 vf->adapter->need_fw_reset)
865 return; 873 return;
866 874
867 INIT_WORK(&vf->trans_work, func);
868 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); 875 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
869} 876}
870 877
@@ -1675,7 +1682,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1675 qlcnic_sriov_vf_attach(adapter); 1682 qlcnic_sriov_vf_attach(adapter);
1676 adapter->fw_fail_cnt = 0; 1683 adapter->fw_fail_cnt = 0;
1677 dev_info(dev, 1684 dev_info(dev,
1678 "%s: Reinitalization of VF 0x%x done after FW reset\n", 1685 "%s: Reinitialization of VF 0x%x done after FW reset\n",
1679 __func__, func); 1686 __func__, func);
1680 } else { 1687 } else {
1681 dev_err(dev, 1688 dev_err(dev,
@@ -1949,3 +1956,54 @@ static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
1949 kfree(cur); 1956 kfree(cur);
1950 } 1957 }
1951} 1958}
1959
1960int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
1961{
1962 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1963 struct net_device *netdev = adapter->netdev;
1964 int retval;
1965
1966 netif_device_detach(netdev);
1967 qlcnic_cancel_idc_work(adapter);
1968
1969 if (netif_running(netdev))
1970 qlcnic_down(adapter, netdev);
1971
1972 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1973 qlcnic_sriov_cfg_bc_intr(adapter, 0);
1974 qlcnic_83xx_disable_mbx_intr(adapter);
1975 cancel_delayed_work_sync(&adapter->idc_aen_work);
1976
1977 retval = pci_save_state(pdev);
1978 if (retval)
1979 return retval;
1980
1981 return 0;
1982}
1983
1984int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
1985{
1986 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1987 struct net_device *netdev = adapter->netdev;
1988 int err;
1989
1990 set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1991 qlcnic_83xx_enable_mbx_intrpt(adapter);
1992 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1993 if (err)
1994 return err;
1995
1996 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1997 if (!err) {
1998 if (netif_running(netdev)) {
1999 err = qlcnic_up(adapter, netdev);
2000 if (!err)
2001 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2002 }
2003 }
2004
2005 netif_device_attach(netdev);
2006 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2007 idc->delay);
2008 return err;
2009}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 1a66ccded235..ee0c1d307966 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -580,6 +580,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
580 struct qlcnic_cmd_args cmd; 580 struct qlcnic_cmd_args cmd;
581 struct qlcnic_vport *vp; 581 struct qlcnic_vport *vp;
582 int err, id; 582 int err, id;
583 u8 *mac;
583 584
584 id = qlcnic_sriov_func_to_index(adapter, func); 585 id = qlcnic_sriov_func_to_index(adapter, func);
585 if (id < 0) 586 if (id < 0)
@@ -591,6 +592,14 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
591 return err; 592 return err;
592 593
593 cmd.req.arg[1] = 0x3 | func << 16; 594 cmd.req.arg[1] = 0x3 | func << 16;
595 if (vp->spoofchk == true) {
596 mac = vp->mac;
597 cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8;
598 cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 |
599 mac[2] << 24;
600 cmd.req.arg[5] = mac[1] | mac[0] << 8;
601 }
602
594 if (vp->vlan_mode == QLC_PVID_MODE) { 603 if (vp->vlan_mode == QLC_PVID_MODE) {
595 cmd.req.arg[2] |= BIT_6; 604 cmd.req.arg[2] |= BIT_6;
596 cmd.req.arg[3] |= vp->vlan << 8; 605 cmd.req.arg[3] |= vp->vlan << 8;
@@ -1767,6 +1776,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1767 memcpy(&ivi->mac, vp->mac, ETH_ALEN); 1776 memcpy(&ivi->mac, vp->mac, ETH_ALEN);
1768 ivi->vlan = vp->vlan; 1777 ivi->vlan = vp->vlan;
1769 ivi->qos = vp->qos; 1778 ivi->qos = vp->qos;
1779 ivi->spoofchk = vp->spoofchk;
1770 if (vp->max_tx_bw == MAX_BW) 1780 if (vp->max_tx_bw == MAX_BW)
1771 ivi->tx_rate = 0; 1781 ivi->tx_rate = 0;
1772 else 1782 else
@@ -1775,3 +1785,29 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1775 ivi->vf = vf; 1785 ivi->vf = vf;
1776 return 0; 1786 return 0;
1777} 1787}
1788
1789int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk)
1790{
1791 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1792 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1793 struct qlcnic_vf_info *vf_info;
1794 struct qlcnic_vport *vp;
1795
1796 if (!qlcnic_sriov_pf_check(adapter))
1797 return -EOPNOTSUPP;
1798
1799 if (vf >= sriov->num_vfs)
1800 return -EINVAL;
1801
1802 vf_info = &sriov->vf_info[vf];
1803 vp = vf_info->vp;
1804 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
1805 netdev_err(netdev,
1806 "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
1807 vf);
1808 return -EOPNOTSUPP;
1809 }
1810
1811 vp->spoofchk = chk;
1812 return 0;
1813}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index e7a2fe21b649..10ed82b3baca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -47,7 +47,7 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev,
47 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 47 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
48 goto err_out; 48 goto err_out;
49 49
50 if (strict_strtoul(buf, 2, &new)) 50 if (kstrtoul(buf, 2, &new))
51 goto err_out; 51 goto err_out;
52 52
53 if (!qlcnic_config_bridged_mode(adapter, !!new)) 53 if (!qlcnic_config_bridged_mode(adapter, !!new))
@@ -77,7 +77,7 @@ static ssize_t qlcnic_store_diag_mode(struct device *dev,
77 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 77 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
78 unsigned long new; 78 unsigned long new;
79 79
80 if (strict_strtoul(buf, 2, &new)) 80 if (kstrtoul(buf, 2, &new))
81 return -EINVAL; 81 return -EINVAL;
82 82
83 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) 83 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
@@ -114,57 +114,51 @@ static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
114 return 0; 114 return 0;
115} 115}
116 116
117static ssize_t qlcnic_store_beacon(struct device *dev, 117static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
118 struct device_attribute *attr, 118 const char *buf, size_t len)
119 const char *buf, size_t len)
120{ 119{
121 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
122 struct qlcnic_hardware_context *ahw = adapter->ahw; 120 struct qlcnic_hardware_context *ahw = adapter->ahw;
123 int err, max_sds_rings = adapter->max_sds_rings;
124 u16 beacon;
125 u8 b_state, b_rate;
126 unsigned long h_beacon; 121 unsigned long h_beacon;
122 int err;
127 123
128 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { 124 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
129 dev_warn(dev, 125 return -EIO;
130 "LED test not supported in non privileged mode\n");
131 return -EOPNOTSUPP;
132 }
133 126
134 if (qlcnic_83xx_check(adapter) && 127 if (kstrtoul(buf, 2, &h_beacon))
135 !test_bit(__QLCNIC_RESETTING, &adapter->state)) { 128 return -EINVAL;
136 if (kstrtoul(buf, 2, &h_beacon))
137 return -EINVAL;
138 129
139 if (ahw->beacon_state == h_beacon) 130 if (ahw->beacon_state == h_beacon)
140 return len; 131 return len;
141 132
142 rtnl_lock(); 133 rtnl_lock();
143 if (!ahw->beacon_state) { 134 if (!ahw->beacon_state) {
144 if (test_and_set_bit(__QLCNIC_LED_ENABLE, 135 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
145 &adapter->state)) { 136 rtnl_unlock();
146 rtnl_unlock(); 137 return -EBUSY;
147 return -EBUSY;
148 }
149 }
150 if (h_beacon) {
151 err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
152 if (err)
153 goto beacon_err;
154 } else {
155 err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
156 if (err)
157 goto beacon_err;
158 } 138 }
159 /* set the current beacon state */ 139 }
140
141 if (h_beacon)
142 err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
143 else
144 err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
145 if (!err)
160 ahw->beacon_state = h_beacon; 146 ahw->beacon_state = h_beacon;
161beacon_err:
162 if (!ahw->beacon_state)
163 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
164 147
165 rtnl_unlock(); 148 if (!ahw->beacon_state)
166 return len; 149 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
167 } 150
151 rtnl_unlock();
152 return len;
153}
154
155static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
156 const char *buf, size_t len)
157{
158 struct qlcnic_hardware_context *ahw = adapter->ahw;
159 int err, max_sds_rings = adapter->max_sds_rings;
160 u16 beacon;
161 u8 h_beacon_state, b_state, b_rate;
168 162
169 if (len != sizeof(u16)) 163 if (len != sizeof(u16))
170 return QL_STATUS_INVALID_PARAM; 164 return QL_STATUS_INVALID_PARAM;
@@ -174,16 +168,29 @@ beacon_err:
174 if (err) 168 if (err)
175 return err; 169 return err;
176 170
177 if (adapter->ahw->beacon_state == b_state) 171 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
172 err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
173 if (!err) {
174 dev_info(&adapter->pdev->dev,
175 "Failed to get current beacon state\n");
176 } else {
177 if (h_beacon_state == QLCNIC_BEACON_DISABLE)
178 ahw->beacon_state = 0;
179 else if (h_beacon_state == QLCNIC_BEACON_EANBLE)
180 ahw->beacon_state = 2;
181 }
182 }
183
184 if (ahw->beacon_state == b_state)
178 return len; 185 return len;
179 186
180 rtnl_lock(); 187 rtnl_lock();
181 188 if (!ahw->beacon_state) {
182 if (!adapter->ahw->beacon_state)
183 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { 189 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
184 rtnl_unlock(); 190 rtnl_unlock();
185 return -EBUSY; 191 return -EBUSY;
186 } 192 }
193 }
187 194
188 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 195 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
189 err = -EIO; 196 err = -EIO;
@@ -206,14 +213,37 @@ beacon_err:
206 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) 213 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
207 qlcnic_diag_free_res(adapter->netdev, max_sds_rings); 214 qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
208 215
209 out: 216out:
210 if (!adapter->ahw->beacon_state) 217 if (!ahw->beacon_state)
211 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); 218 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
212 rtnl_unlock(); 219 rtnl_unlock();
213 220
214 return err; 221 return err;
215} 222}
216 223
224static ssize_t qlcnic_store_beacon(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t len)
227{
228 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
229 int err = 0;
230
231 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
232 dev_warn(dev,
233 "LED test not supported in non privileged mode\n");
234 return -EOPNOTSUPP;
235 }
236
237 if (qlcnic_82xx_check(adapter))
238 err = qlcnic_82xx_store_beacon(adapter, buf, len);
239 else if (qlcnic_83xx_check(adapter))
240 err = qlcnic_83xx_store_beacon(adapter, buf, len);
241 else
242 return -EIO;
243
244 return err;
245}
246
217static ssize_t qlcnic_show_beacon(struct device *dev, 247static ssize_t qlcnic_show_beacon(struct device *dev,
218 struct device_attribute *attr, char *buf) 248 struct device_attribute *attr, char *buf)
219{ 249{
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f87cc216045b..2553cf4503b9 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4946,15 +4946,4 @@ static struct pci_driver qlge_driver = {
4946 .err_handler = &qlge_err_handler 4946 .err_handler = &qlge_err_handler
4947}; 4947};
4948 4948
4949static int __init qlge_init_module(void) 4949module_pci_driver(qlge_driver);
4950{
4951 return pci_register_driver(&qlge_driver);
4952}
4953
4954static void __exit qlge_exit(void)
4955{
4956 pci_unregister_driver(&qlge_driver);
4957}
4958
4959module_init(qlge_init_module);
4960module_exit(qlge_exit);
diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig
index c8ba4b3494c1..2055f7eb2ba9 100644
--- a/drivers/net/ethernet/rdc/Kconfig
+++ b/drivers/net/ethernet/rdc/Kconfig
@@ -22,7 +22,6 @@ config R6040
22 tristate "RDC R6040 Fast Ethernet Adapter support" 22 tristate "RDC R6040 Fast Ethernet Adapter support"
23 depends on PCI 23 depends on PCI
24 select CRC32 24 select CRC32
25 select NET_CORE
26 select MII 25 select MII
27 select PHYLIB 26 select PHYLIB
28 ---help--- 27 ---help---
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 03523459c406..e6acb9fa5767 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1817,7 +1817,7 @@ static int cp_set_eeprom(struct net_device *dev,
1817/* Put the board into D3cold state and wait for WakeUp signal */ 1817/* Put the board into D3cold state and wait for WakeUp signal */
1818static void cp_set_d3_state (struct cp_private *cp) 1818static void cp_set_d3_state (struct cp_private *cp)
1819{ 1819{
1820 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */ 1820 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */
1821 pci_set_power_state (cp->pdev, PCI_D3hot); 1821 pci_set_power_state (cp->pdev, PCI_D3hot);
1822} 1822}
1823 1823
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 783fa8b5cde7..ae5d027096ed 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -37,7 +37,6 @@ config 8139CP
37 tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support" 37 tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support"
38 depends on PCI 38 depends on PCI
39 select CRC32 39 select CRC32
40 select NET_CORE
41 select MII 40 select MII
42 ---help--- 41 ---help---
43 This is a driver for the Fast Ethernet PCI network cards based on 42 This is a driver for the Fast Ethernet PCI network cards based on
@@ -52,7 +51,6 @@ config 8139TOO
52 tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support" 51 tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support"
53 depends on PCI 52 depends on PCI
54 select CRC32 53 select CRC32
55 select NET_CORE
56 select MII 54 select MII
57 ---help--- 55 ---help---
58 This is a driver for the Fast Ethernet PCI network cards based on 56 This is a driver for the Fast Ethernet PCI network cards based on
@@ -107,7 +105,6 @@ config R8169
107 depends on PCI 105 depends on PCI
108 select FW_LOADER 106 select FW_LOADER
109 select CRC32 107 select CRC32
110 select NET_CORE
111 select MII 108 select MII
112 ---help--- 109 ---help---
113 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. 110 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index bed9841d728c..544514e66187 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -4,14 +4,7 @@
4 4
5config SH_ETH 5config SH_ETH
6 tristate "Renesas SuperH Ethernet support" 6 tristate "Renesas SuperH Ethernet support"
7 depends on (SUPERH || ARCH_SHMOBILE) && \
8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
11 CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || \
12 ARCH_R8A7778 || ARCH_R8A7779)
13 select CRC32 7 select CRC32
14 select NET_CORE
15 select MII 8 select MII
16 select MDIO_BITBANG 9 select MDIO_BITBANG
17 select PHYLIB 10 select PHYLIB
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e29fe8dbd226..a753928bab9c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -313,9 +313,14 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
313 [TSU_ADRL31] = 0x01fc, 313 [TSU_ADRL31] = 0x01fc,
314}; 314};
315 315
316#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ 316static int sh_eth_is_gether(struct sh_eth_private *mdp)
317 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 317{
318 defined(CONFIG_ARCH_R8A7740) 318 if (mdp->reg_offset == sh_eth_offset_gigabit)
319 return 1;
320 else
321 return 0;
322}
323
319static void sh_eth_select_mii(struct net_device *ndev) 324static void sh_eth_select_mii(struct net_device *ndev)
320{ 325{
321 u32 value = 0x0; 326 u32 value = 0x0;
@@ -339,11 +344,7 @@ static void sh_eth_select_mii(struct net_device *ndev)
339 344
340 sh_eth_write(ndev, value, RMII_MII); 345 sh_eth_write(ndev, value, RMII_MII);
341} 346}
342#endif
343 347
344/* There is CPU dependent code */
345#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779)
346#define SH_ETH_RESET_DEFAULT 1
347static void sh_eth_set_duplex(struct net_device *ndev) 348static void sh_eth_set_duplex(struct net_device *ndev)
348{ 349{
349 struct sh_eth_private *mdp = netdev_priv(ndev); 350 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -354,7 +355,8 @@ static void sh_eth_set_duplex(struct net_device *ndev)
354 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 355 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
355} 356}
356 357
357static void sh_eth_set_rate(struct net_device *ndev) 358/* There is CPU dependent code */
359static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
358{ 360{
359 struct sh_eth_private *mdp = netdev_priv(ndev); 361 struct sh_eth_private *mdp = netdev_priv(ndev);
360 362
@@ -371,9 +373,9 @@ static void sh_eth_set_rate(struct net_device *ndev)
371} 373}
372 374
373/* R8A7778/9 */ 375/* R8A7778/9 */
374static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 376static struct sh_eth_cpu_data r8a777x_data = {
375 .set_duplex = sh_eth_set_duplex, 377 .set_duplex = sh_eth_set_duplex,
376 .set_rate = sh_eth_set_rate, 378 .set_rate = sh_eth_set_rate_r8a777x,
377 379
378 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 380 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
379 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 381 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
@@ -383,26 +385,14 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 385 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
384 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 386 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
385 EESR_ECI, 387 EESR_ECI,
386 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
387 388
388 .apr = 1, 389 .apr = 1,
389 .mpr = 1, 390 .mpr = 1,
390 .tpauser = 1, 391 .tpauser = 1,
391 .hw_swap = 1, 392 .hw_swap = 1,
392}; 393};
393#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
394#define SH_ETH_RESET_DEFAULT 1
395static void sh_eth_set_duplex(struct net_device *ndev)
396{
397 struct sh_eth_private *mdp = netdev_priv(ndev);
398 394
399 if (mdp->duplex) /* Full */ 395static void sh_eth_set_rate_sh7724(struct net_device *ndev)
400 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
401 else /* Half */
402 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
403}
404
405static void sh_eth_set_rate(struct net_device *ndev)
406{ 396{
407 struct sh_eth_private *mdp = netdev_priv(ndev); 397 struct sh_eth_private *mdp = netdev_priv(ndev);
408 398
@@ -419,19 +409,18 @@ static void sh_eth_set_rate(struct net_device *ndev)
419} 409}
420 410
421/* SH7724 */ 411/* SH7724 */
422static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 412static struct sh_eth_cpu_data sh7724_data = {
423 .set_duplex = sh_eth_set_duplex, 413 .set_duplex = sh_eth_set_duplex,
424 .set_rate = sh_eth_set_rate, 414 .set_rate = sh_eth_set_rate_sh7724,
425 415
426 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 416 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
427 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 417 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
428 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 418 .eesipr_value = 0x01ff009f,
429 419
430 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 420 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
431 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 421 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
432 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 422 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
433 EESR_ECI, 423 EESR_ECI,
434 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
435 424
436 .apr = 1, 425 .apr = 1,
437 .mpr = 1, 426 .mpr = 1,
@@ -440,22 +429,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
440 .rpadir = 1, 429 .rpadir = 1,
441 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 430 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
442}; 431};
443#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
444#define SH_ETH_HAS_BOTH_MODULES 1
445#define SH_ETH_HAS_TSU 1
446static int sh_eth_check_reset(struct net_device *ndev);
447
448static void sh_eth_set_duplex(struct net_device *ndev)
449{
450 struct sh_eth_private *mdp = netdev_priv(ndev);
451 432
452 if (mdp->duplex) /* Full */ 433static void sh_eth_set_rate_sh7757(struct net_device *ndev)
453 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
454 else /* Half */
455 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
456}
457
458static void sh_eth_set_rate(struct net_device *ndev)
459{ 434{
460 struct sh_eth_private *mdp = netdev_priv(ndev); 435 struct sh_eth_private *mdp = netdev_priv(ndev);
461 436
@@ -472,9 +447,9 @@ static void sh_eth_set_rate(struct net_device *ndev)
472} 447}
473 448
474/* SH7757 */ 449/* SH7757 */
475static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 450static struct sh_eth_cpu_data sh7757_data = {
476 .set_duplex = sh_eth_set_duplex, 451 .set_duplex = sh_eth_set_duplex,
477 .set_rate = sh_eth_set_rate, 452 .set_rate = sh_eth_set_rate_sh7757,
478 453
479 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 454 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
480 .rmcr_value = 0x00000001, 455 .rmcr_value = 0x00000001,
@@ -483,8 +458,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
483 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 458 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
484 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 459 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
485 EESR_ECI, 460 EESR_ECI,
486 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
487 461
462 .irq_flags = IRQF_SHARED,
488 .apr = 1, 463 .apr = 1,
489 .mpr = 1, 464 .mpr = 1,
490 .tpauser = 1, 465 .tpauser = 1,
@@ -494,7 +469,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
494 .rpadir_value = 2 << 16, 469 .rpadir_value = 2 << 16,
495}; 470};
496 471
497#define SH_GIGA_ETH_BASE 0xfee00000 472#define SH_GIGA_ETH_BASE 0xfee00000UL
498#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 473#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
499#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 474#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
500static void sh_eth_chip_reset_giga(struct net_device *ndev) 475static void sh_eth_chip_reset_giga(struct net_device *ndev)
@@ -519,52 +494,6 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
519 } 494 }
520} 495}
521 496
522static int sh_eth_is_gether(struct sh_eth_private *mdp);
523static int sh_eth_reset(struct net_device *ndev)
524{
525 struct sh_eth_private *mdp = netdev_priv(ndev);
526 int ret = 0;
527
528 if (sh_eth_is_gether(mdp)) {
529 sh_eth_write(ndev, 0x03, EDSR);
530 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
531 EDMR);
532
533 ret = sh_eth_check_reset(ndev);
534 if (ret)
535 goto out;
536
537 /* Table Init */
538 sh_eth_write(ndev, 0x0, TDLAR);
539 sh_eth_write(ndev, 0x0, TDFAR);
540 sh_eth_write(ndev, 0x0, TDFXR);
541 sh_eth_write(ndev, 0x0, TDFFR);
542 sh_eth_write(ndev, 0x0, RDLAR);
543 sh_eth_write(ndev, 0x0, RDFAR);
544 sh_eth_write(ndev, 0x0, RDFXR);
545 sh_eth_write(ndev, 0x0, RDFFR);
546 } else {
547 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
548 EDMR);
549 mdelay(3);
550 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
551 EDMR);
552 }
553
554out:
555 return ret;
556}
557
558static void sh_eth_set_duplex_giga(struct net_device *ndev)
559{
560 struct sh_eth_private *mdp = netdev_priv(ndev);
561
562 if (mdp->duplex) /* Full */
563 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
564 else /* Half */
565 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
566}
567
568static void sh_eth_set_rate_giga(struct net_device *ndev) 497static void sh_eth_set_rate_giga(struct net_device *ndev)
569{ 498{
570 struct sh_eth_private *mdp = netdev_priv(ndev); 499 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -585,9 +514,9 @@ static void sh_eth_set_rate_giga(struct net_device *ndev)
585} 514}
586 515
587/* SH7757(GETHERC) */ 516/* SH7757(GETHERC) */
588static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { 517static struct sh_eth_cpu_data sh7757_data_giga = {
589 .chip_reset = sh_eth_chip_reset_giga, 518 .chip_reset = sh_eth_chip_reset_giga,
590 .set_duplex = sh_eth_set_duplex_giga, 519 .set_duplex = sh_eth_set_duplex,
591 .set_rate = sh_eth_set_rate_giga, 520 .set_rate = sh_eth_set_rate_giga,
592 521
593 .ecsr_value = ECSR_ICD | ECSR_MPD, 522 .ecsr_value = ECSR_ICD | ECSR_MPD,
@@ -598,11 +527,10 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
598 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 527 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
599 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 528 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
600 EESR_TDE | EESR_ECI, 529 EESR_TDE | EESR_ECI,
601 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
602 EESR_TFE,
603 .fdr_value = 0x0000072f, 530 .fdr_value = 0x0000072f,
604 .rmcr_value = 0x00000001, 531 .rmcr_value = 0x00000001,
605 532
533 .irq_flags = IRQF_SHARED,
606 .apr = 1, 534 .apr = 1,
607 .mpr = 1, 535 .mpr = 1,
608 .tpauser = 1, 536 .tpauser = 1,
@@ -615,19 +543,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
615 .tsu = 1, 543 .tsu = 1,
616}; 544};
617 545
618static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
619{
620 if (sh_eth_is_gether(mdp))
621 return &sh_eth_my_cpu_data_giga;
622 else
623 return &sh_eth_my_cpu_data;
624}
625
626#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
627#define SH_ETH_HAS_TSU 1
628static int sh_eth_check_reset(struct net_device *ndev);
629static void sh_eth_reset_hw_crc(struct net_device *ndev);
630
631static void sh_eth_chip_reset(struct net_device *ndev) 546static void sh_eth_chip_reset(struct net_device *ndev)
632{ 547{
633 struct sh_eth_private *mdp = netdev_priv(ndev); 548 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -637,17 +552,7 @@ static void sh_eth_chip_reset(struct net_device *ndev)
637 mdelay(1); 552 mdelay(1);
638} 553}
639 554
640static void sh_eth_set_duplex(struct net_device *ndev) 555static void sh_eth_set_rate_gether(struct net_device *ndev)
641{
642 struct sh_eth_private *mdp = netdev_priv(ndev);
643
644 if (mdp->duplex) /* Full */
645 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
646 else /* Half */
647 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
648}
649
650static void sh_eth_set_rate(struct net_device *ndev)
651{ 556{
652 struct sh_eth_private *mdp = netdev_priv(ndev); 557 struct sh_eth_private *mdp = netdev_priv(ndev);
653 558
@@ -666,11 +571,11 @@ static void sh_eth_set_rate(struct net_device *ndev)
666 } 571 }
667} 572}
668 573
669/* sh7763 */ 574/* SH7734 */
670static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 575static struct sh_eth_cpu_data sh7734_data = {
671 .chip_reset = sh_eth_chip_reset, 576 .chip_reset = sh_eth_chip_reset,
672 .set_duplex = sh_eth_set_duplex, 577 .set_duplex = sh_eth_set_duplex,
673 .set_rate = sh_eth_set_rate, 578 .set_rate = sh_eth_set_rate_gether,
674 579
675 .ecsr_value = ECSR_ICD | ECSR_MPD, 580 .ecsr_value = ECSR_ICD | ECSR_MPD,
676 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 581 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
@@ -680,8 +585,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
680 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 585 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
681 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 586 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
682 EESR_TDE | EESR_ECI, 587 EESR_TDE | EESR_ECI,
683 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
684 EESR_TFE,
685 588
686 .apr = 1, 589 .apr = 1,
687 .mpr = 1, 590 .mpr = 1,
@@ -691,54 +594,37 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
691 .no_trimd = 1, 594 .no_trimd = 1,
692 .no_ade = 1, 595 .no_ade = 1,
693 .tsu = 1, 596 .tsu = 1,
694#if defined(CONFIG_CPU_SUBTYPE_SH7734) 597 .hw_crc = 1,
695 .hw_crc = 1, 598 .select_mii = 1,
696 .select_mii = 1,
697#endif
698}; 599};
699 600
700static int sh_eth_reset(struct net_device *ndev) 601/* SH7763 */
701{ 602static struct sh_eth_cpu_data sh7763_data = {
702 int ret = 0; 603 .chip_reset = sh_eth_chip_reset,
703 604 .set_duplex = sh_eth_set_duplex,
704 sh_eth_write(ndev, EDSR_ENALL, EDSR); 605 .set_rate = sh_eth_set_rate_gether,
705 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
706
707 ret = sh_eth_check_reset(ndev);
708 if (ret)
709 goto out;
710 606
711 /* Table Init */ 607 .ecsr_value = ECSR_ICD | ECSR_MPD,
712 sh_eth_write(ndev, 0x0, TDLAR); 608 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
713 sh_eth_write(ndev, 0x0, TDFAR); 609 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
714 sh_eth_write(ndev, 0x0, TDFXR);
715 sh_eth_write(ndev, 0x0, TDFFR);
716 sh_eth_write(ndev, 0x0, RDLAR);
717 sh_eth_write(ndev, 0x0, RDFAR);
718 sh_eth_write(ndev, 0x0, RDFXR);
719 sh_eth_write(ndev, 0x0, RDFFR);
720
721 /* Reset HW CRC register */
722 sh_eth_reset_hw_crc(ndev);
723
724 /* Select MII mode */
725 if (sh_eth_my_cpu_data.select_mii)
726 sh_eth_select_mii(ndev);
727out:
728 return ret;
729}
730 610
731static void sh_eth_reset_hw_crc(struct net_device *ndev) 611 .tx_check = EESR_TC1 | EESR_FTC,
732{ 612 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
733 if (sh_eth_my_cpu_data.hw_crc) 613 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
734 sh_eth_write(ndev, 0x0, CSMR); 614 EESR_ECI,
735}
736 615
737#elif defined(CONFIG_ARCH_R8A7740) 616 .apr = 1,
738#define SH_ETH_HAS_TSU 1 617 .mpr = 1,
739static int sh_eth_check_reset(struct net_device *ndev); 618 .tpauser = 1,
619 .bculr = 1,
620 .hw_swap = 1,
621 .no_trimd = 1,
622 .no_ade = 1,
623 .tsu = 1,
624 .irq_flags = IRQF_SHARED,
625};
740 626
741static void sh_eth_chip_reset(struct net_device *ndev) 627static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
742{ 628{
743 struct sh_eth_private *mdp = netdev_priv(ndev); 629 struct sh_eth_private *mdp = netdev_priv(ndev);
744 630
@@ -749,65 +635,11 @@ static void sh_eth_chip_reset(struct net_device *ndev)
749 sh_eth_select_mii(ndev); 635 sh_eth_select_mii(ndev);
750} 636}
751 637
752static int sh_eth_reset(struct net_device *ndev)
753{
754 int ret = 0;
755
756 sh_eth_write(ndev, EDSR_ENALL, EDSR);
757 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
758
759 ret = sh_eth_check_reset(ndev);
760 if (ret)
761 goto out;
762
763 /* Table Init */
764 sh_eth_write(ndev, 0x0, TDLAR);
765 sh_eth_write(ndev, 0x0, TDFAR);
766 sh_eth_write(ndev, 0x0, TDFXR);
767 sh_eth_write(ndev, 0x0, TDFFR);
768 sh_eth_write(ndev, 0x0, RDLAR);
769 sh_eth_write(ndev, 0x0, RDFAR);
770 sh_eth_write(ndev, 0x0, RDFXR);
771 sh_eth_write(ndev, 0x0, RDFFR);
772
773out:
774 return ret;
775}
776
777static void sh_eth_set_duplex(struct net_device *ndev)
778{
779 struct sh_eth_private *mdp = netdev_priv(ndev);
780
781 if (mdp->duplex) /* Full */
782 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
783 else /* Half */
784 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
785}
786
787static void sh_eth_set_rate(struct net_device *ndev)
788{
789 struct sh_eth_private *mdp = netdev_priv(ndev);
790
791 switch (mdp->speed) {
792 case 10: /* 10BASE */
793 sh_eth_write(ndev, GECMR_10, GECMR);
794 break;
795 case 100:/* 100BASE */
796 sh_eth_write(ndev, GECMR_100, GECMR);
797 break;
798 case 1000: /* 1000BASE */
799 sh_eth_write(ndev, GECMR_1000, GECMR);
800 break;
801 default:
802 break;
803 }
804}
805
806/* R8A7740 */ 638/* R8A7740 */
807static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 639static struct sh_eth_cpu_data r8a7740_data = {
808 .chip_reset = sh_eth_chip_reset, 640 .chip_reset = sh_eth_chip_reset_r8a7740,
809 .set_duplex = sh_eth_set_duplex, 641 .set_duplex = sh_eth_set_duplex,
810 .set_rate = sh_eth_set_rate, 642 .set_rate = sh_eth_set_rate_gether,
811 643
812 .ecsr_value = ECSR_ICD | ECSR_MPD, 644 .ecsr_value = ECSR_ICD | ECSR_MPD,
813 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 645 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
@@ -817,8 +649,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
817 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 649 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
818 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 650 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
819 EESR_TDE | EESR_ECI, 651 EESR_TDE | EESR_ECI,
820 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
821 EESR_TFE,
822 652
823 .apr = 1, 653 .apr = 1,
824 .mpr = 1, 654 .mpr = 1,
@@ -829,11 +659,10 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
829 .no_ade = 1, 659 .no_ade = 1,
830 .tsu = 1, 660 .tsu = 1,
831 .select_mii = 1, 661 .select_mii = 1,
662 .shift_rd0 = 1,
832}; 663};
833 664
834#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 665static struct sh_eth_cpu_data sh7619_data = {
835#define SH_ETH_RESET_DEFAULT 1
836static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
837 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 666 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
838 667
839 .apr = 1, 668 .apr = 1,
@@ -841,14 +670,11 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
841 .tpauser = 1, 670 .tpauser = 1,
842 .hw_swap = 1, 671 .hw_swap = 1,
843}; 672};
844#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 673
845#define SH_ETH_RESET_DEFAULT 1 674static struct sh_eth_cpu_data sh771x_data = {
846#define SH_ETH_HAS_TSU 1
847static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
848 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 675 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
849 .tsu = 1, 676 .tsu = 1,
850}; 677};
851#endif
852 678
853static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 679static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
854{ 680{
@@ -873,22 +699,8 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
873 699
874 if (!cd->eesr_err_check) 700 if (!cd->eesr_err_check)
875 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 701 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
876
877 if (!cd->tx_error_check)
878 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
879} 702}
880 703
881#if defined(SH_ETH_RESET_DEFAULT)
882/* Chip Reset */
883static int sh_eth_reset(struct net_device *ndev)
884{
885 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
886 mdelay(3);
887 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
888
889 return 0;
890}
891#else
892static int sh_eth_check_reset(struct net_device *ndev) 704static int sh_eth_check_reset(struct net_device *ndev)
893{ 705{
894 int ret = 0; 706 int ret = 0;
@@ -906,7 +718,49 @@ static int sh_eth_check_reset(struct net_device *ndev)
906 } 718 }
907 return ret; 719 return ret;
908} 720}
909#endif 721
722static int sh_eth_reset(struct net_device *ndev)
723{
724 struct sh_eth_private *mdp = netdev_priv(ndev);
725 int ret = 0;
726
727 if (sh_eth_is_gether(mdp)) {
728 sh_eth_write(ndev, EDSR_ENALL, EDSR);
729 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
730 EDMR);
731
732 ret = sh_eth_check_reset(ndev);
733 if (ret)
734 goto out;
735
736 /* Table Init */
737 sh_eth_write(ndev, 0x0, TDLAR);
738 sh_eth_write(ndev, 0x0, TDFAR);
739 sh_eth_write(ndev, 0x0, TDFXR);
740 sh_eth_write(ndev, 0x0, TDFFR);
741 sh_eth_write(ndev, 0x0, RDLAR);
742 sh_eth_write(ndev, 0x0, RDFAR);
743 sh_eth_write(ndev, 0x0, RDFXR);
744 sh_eth_write(ndev, 0x0, RDFFR);
745
746 /* Reset HW CRC register */
747 if (mdp->cd->hw_crc)
748 sh_eth_write(ndev, 0x0, CSMR);
749
750 /* Select MII mode */
751 if (mdp->cd->select_mii)
752 sh_eth_select_mii(ndev);
753 } else {
754 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
755 EDMR);
756 mdelay(3);
757 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
758 EDMR);
759 }
760
761out:
762 return ret;
763}
910 764
911#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 765#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
912static void sh_eth_set_receive_align(struct sk_buff *skb) 766static void sh_eth_set_receive_align(struct sk_buff *skb)
@@ -982,14 +836,6 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
982 } 836 }
983} 837}
984 838
985static int sh_eth_is_gether(struct sh_eth_private *mdp)
986{
987 if (mdp->reg_offset == sh_eth_offset_gigabit)
988 return 1;
989 else
990 return 0;
991}
992
993static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 839static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
994{ 840{
995 if (sh_eth_is_gether(mdp)) 841 if (sh_eth_is_gether(mdp))
@@ -1388,7 +1234,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1388} 1234}
1389 1235
1390/* Packet receive function */ 1236/* Packet receive function */
1391static int sh_eth_rx(struct net_device *ndev, u32 intr_status) 1237static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1392{ 1238{
1393 struct sh_eth_private *mdp = netdev_priv(ndev); 1239 struct sh_eth_private *mdp = netdev_priv(ndev);
1394 struct sh_eth_rxdesc *rxdesc; 1240 struct sh_eth_rxdesc *rxdesc;
@@ -1396,6 +1242,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1396 int entry = mdp->cur_rx % mdp->num_rx_ring; 1242 int entry = mdp->cur_rx % mdp->num_rx_ring;
1397 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1243 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1398 struct sk_buff *skb; 1244 struct sk_buff *skb;
1245 int exceeded = 0;
1399 u16 pkt_len = 0; 1246 u16 pkt_len = 0;
1400 u32 desc_status; 1247 u32 desc_status;
1401 1248
@@ -1407,10 +1254,15 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1407 if (--boguscnt < 0) 1254 if (--boguscnt < 0)
1408 break; 1255 break;
1409 1256
1257 if (*quota <= 0) {
1258 exceeded = 1;
1259 break;
1260 }
1261 (*quota)--;
1262
1410 if (!(desc_status & RDFEND)) 1263 if (!(desc_status & RDFEND))
1411 ndev->stats.rx_length_errors++; 1264 ndev->stats.rx_length_errors++;
1412 1265
1413#if defined(CONFIG_ARCH_R8A7740)
1414 /* 1266 /*
1415 * In case of almost all GETHER/ETHERs, the Receive Frame State 1267 * In case of almost all GETHER/ETHERs, the Receive Frame State
1416 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1268 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
@@ -1418,8 +1270,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1418 * bits are from bit 25 to bit 16. So, the driver needs right 1270 * bits are from bit 25 to bit 16. So, the driver needs right
1419 * shifting by 16. 1271 * shifting by 16.
1420 */ 1272 */
1421 desc_status >>= 16; 1273 if (mdp->cd->shift_rd0)
1422#endif 1274 desc_status >>= 16;
1423 1275
1424 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1276 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1425 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1277 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
@@ -1494,7 +1346,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1494 sh_eth_write(ndev, EDRRR_R, EDRRR); 1346 sh_eth_write(ndev, EDRRR_R, EDRRR);
1495 } 1347 }
1496 1348
1497 return 0; 1349 return exceeded;
1498} 1350}
1499 1351
1500static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1352static void sh_eth_rcv_snd_disable(struct net_device *ndev)
@@ -1636,7 +1488,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1636 struct sh_eth_private *mdp = netdev_priv(ndev); 1488 struct sh_eth_private *mdp = netdev_priv(ndev);
1637 struct sh_eth_cpu_data *cd = mdp->cd; 1489 struct sh_eth_cpu_data *cd = mdp->cd;
1638 irqreturn_t ret = IRQ_NONE; 1490 irqreturn_t ret = IRQ_NONE;
1639 unsigned long intr_status; 1491 unsigned long intr_status, intr_enable;
1640 1492
1641 spin_lock(&mdp->lock); 1493 spin_lock(&mdp->lock);
1642 1494
@@ -1647,34 +1499,41 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1647 * and we need to fully handle it in sh_eth_error() in order to quench 1499 * and we need to fully handle it in sh_eth_error() in order to quench
1648 * it as it doesn't get cleared by just writing 1 to the ECI bit... 1500 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1649 */ 1501 */
1650 intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; 1502 intr_enable = sh_eth_read(ndev, EESIPR);
1651 /* Clear interrupt */ 1503 intr_status &= intr_enable | DMAC_M_ECI;
1652 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1504 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1653 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
1654 cd->tx_check | cd->eesr_err_check)) {
1655 sh_eth_write(ndev, intr_status, EESR);
1656 ret = IRQ_HANDLED; 1505 ret = IRQ_HANDLED;
1657 } else 1506 else
1658 goto other_irq; 1507 goto other_irq;
1659 1508
1660 if (intr_status & (EESR_FRC | /* Frame recv*/ 1509 if (intr_status & EESR_RX_CHECK) {
1661 EESR_RMAF | /* Multi cast address recv*/ 1510 if (napi_schedule_prep(&mdp->napi)) {
1662 EESR_RRF | /* Bit frame recv */ 1511 /* Mask Rx interrupts */
1663 EESR_RTLF | /* Long frame recv*/ 1512 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1664 EESR_RTSF | /* short frame recv */ 1513 EESIPR);
1665 EESR_PRE | /* PHY-LSI recv error */ 1514 __napi_schedule(&mdp->napi);
1666 EESR_CERF)){ /* recv frame CRC error */ 1515 } else {
1667 sh_eth_rx(ndev, intr_status); 1516 dev_warn(&ndev->dev,
1517 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1518 intr_status, intr_enable);
1519 }
1668 } 1520 }
1669 1521
1670 /* Tx Check */ 1522 /* Tx Check */
1671 if (intr_status & cd->tx_check) { 1523 if (intr_status & cd->tx_check) {
1524 /* Clear Tx interrupts */
1525 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1526
1672 sh_eth_txfree(ndev); 1527 sh_eth_txfree(ndev);
1673 netif_wake_queue(ndev); 1528 netif_wake_queue(ndev);
1674 } 1529 }
1675 1530
1676 if (intr_status & cd->eesr_err_check) 1531 if (intr_status & cd->eesr_err_check) {
1532 /* Clear error interrupts */
1533 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1534
1677 sh_eth_error(ndev, intr_status); 1535 sh_eth_error(ndev, intr_status);
1536 }
1678 1537
1679other_irq: 1538other_irq:
1680 spin_unlock(&mdp->lock); 1539 spin_unlock(&mdp->lock);
@@ -1682,6 +1541,33 @@ other_irq:
1682 return ret; 1541 return ret;
1683} 1542}
1684 1543
1544static int sh_eth_poll(struct napi_struct *napi, int budget)
1545{
1546 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1547 napi);
1548 struct net_device *ndev = napi->dev;
1549 int quota = budget;
1550 unsigned long intr_status;
1551
1552 for (;;) {
1553 intr_status = sh_eth_read(ndev, EESR);
1554 if (!(intr_status & EESR_RX_CHECK))
1555 break;
1556 /* Clear Rx interrupts */
1557 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1558
1559 if (sh_eth_rx(ndev, intr_status, &quota))
1560 goto out;
1561 }
1562
1563 napi_complete(napi);
1564
1565 /* Reenable Rx interrupts */
1566 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1567out:
1568 return budget - quota;
1569}
1570
1685/* PHY state control function */ 1571/* PHY state control function */
1686static void sh_eth_adjust_link(struct net_device *ndev) 1572static void sh_eth_adjust_link(struct net_device *ndev)
1687{ 1573{
@@ -1972,14 +1858,7 @@ static int sh_eth_open(struct net_device *ndev)
1972 pm_runtime_get_sync(&mdp->pdev->dev); 1858 pm_runtime_get_sync(&mdp->pdev->dev);
1973 1859
1974 ret = request_irq(ndev->irq, sh_eth_interrupt, 1860 ret = request_irq(ndev->irq, sh_eth_interrupt,
1975#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1861 mdp->cd->irq_flags, ndev->name, ndev);
1976 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1977 defined(CONFIG_CPU_SUBTYPE_SH7757)
1978 IRQF_SHARED,
1979#else
1980 0,
1981#endif
1982 ndev->name, ndev);
1983 if (ret) { 1862 if (ret) {
1984 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1863 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1985 return ret; 1864 return ret;
@@ -2000,6 +1879,8 @@ static int sh_eth_open(struct net_device *ndev)
2000 if (ret) 1879 if (ret)
2001 goto out_free_irq; 1880 goto out_free_irq;
2002 1881
1882 napi_enable(&mdp->napi);
1883
2003 return ret; 1884 return ret;
2004 1885
2005out_free_irq: 1886out_free_irq:
@@ -2095,6 +1976,8 @@ static int sh_eth_close(struct net_device *ndev)
2095{ 1976{
2096 struct sh_eth_private *mdp = netdev_priv(ndev); 1977 struct sh_eth_private *mdp = netdev_priv(ndev);
2097 1978
1979 napi_disable(&mdp->napi);
1980
2098 netif_stop_queue(ndev); 1981 netif_stop_queue(ndev);
2099 1982
2100 /* Disable interrupts by clearing the interrupt mask. */ 1983 /* Disable interrupts by clearing the interrupt mask. */
@@ -2165,7 +2048,6 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
2165 return phy_mii_ioctl(phydev, rq, cmd); 2048 return phy_mii_ioctl(phydev, rq, cmd);
2166} 2049}
2167 2050
2168#if defined(SH_ETH_HAS_TSU)
2169/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 2051/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2170static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 2052static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2171 int entry) 2053 int entry)
@@ -2508,7 +2390,6 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2508 2390
2509 return 0; 2391 return 0;
2510} 2392}
2511#endif /* SH_ETH_HAS_TSU */
2512 2393
2513/* SuperH's TSU register init function */ 2394/* SuperH's TSU register init function */
2514static void sh_eth_tsu_init(struct sh_eth_private *mdp) 2395static void sh_eth_tsu_init(struct sh_eth_private *mdp)
@@ -2652,11 +2533,21 @@ static const struct net_device_ops sh_eth_netdev_ops = {
2652 .ndo_stop = sh_eth_close, 2533 .ndo_stop = sh_eth_close,
2653 .ndo_start_xmit = sh_eth_start_xmit, 2534 .ndo_start_xmit = sh_eth_start_xmit,
2654 .ndo_get_stats = sh_eth_get_stats, 2535 .ndo_get_stats = sh_eth_get_stats,
2655#if defined(SH_ETH_HAS_TSU) 2536 .ndo_tx_timeout = sh_eth_tx_timeout,
2537 .ndo_do_ioctl = sh_eth_do_ioctl,
2538 .ndo_validate_addr = eth_validate_addr,
2539 .ndo_set_mac_address = eth_mac_addr,
2540 .ndo_change_mtu = eth_change_mtu,
2541};
2542
2543static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2544 .ndo_open = sh_eth_open,
2545 .ndo_stop = sh_eth_close,
2546 .ndo_start_xmit = sh_eth_start_xmit,
2547 .ndo_get_stats = sh_eth_get_stats,
2656 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2548 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2657 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2549 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2658 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2550 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2659#endif
2660 .ndo_tx_timeout = sh_eth_tx_timeout, 2551 .ndo_tx_timeout = sh_eth_tx_timeout,
2661 .ndo_do_ioctl = sh_eth_do_ioctl, 2552 .ndo_do_ioctl = sh_eth_do_ioctl,
2662 .ndo_validate_addr = eth_validate_addr, 2553 .ndo_validate_addr = eth_validate_addr,
@@ -2671,6 +2562,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2671 struct net_device *ndev = NULL; 2562 struct net_device *ndev = NULL;
2672 struct sh_eth_private *mdp = NULL; 2563 struct sh_eth_private *mdp = NULL;
2673 struct sh_eth_plat_data *pd = pdev->dev.platform_data; 2564 struct sh_eth_plat_data *pd = pdev->dev.platform_data;
2565 const struct platform_device_id *id = platform_get_device_id(pdev);
2674 2566
2675 /* get base addr */ 2567 /* get base addr */
2676 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2568 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2729,15 +2621,14 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2729 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); 2621 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2730 2622
2731 /* set cpu data */ 2623 /* set cpu data */
2732#if defined(SH_ETH_HAS_BOTH_MODULES) 2624 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2733 mdp->cd = sh_eth_get_cpu_data(mdp);
2734#else
2735 mdp->cd = &sh_eth_my_cpu_data;
2736#endif
2737 sh_eth_set_default_cpu_data(mdp->cd); 2625 sh_eth_set_default_cpu_data(mdp->cd);
2738 2626
2739 /* set function */ 2627 /* set function */
2740 ndev->netdev_ops = &sh_eth_netdev_ops; 2628 if (mdp->cd->tsu)
2629 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2630 else
2631 ndev->netdev_ops = &sh_eth_netdev_ops;
2741 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2632 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2742 ndev->watchdog_timeo = TX_TIMEOUT; 2633 ndev->watchdog_timeo = TX_TIMEOUT;
2743 2634
@@ -2776,10 +2667,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2776 } 2667 }
2777 } 2668 }
2778 2669
2670 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2671
2779 /* network device register */ 2672 /* network device register */
2780 ret = register_netdev(ndev); 2673 ret = register_netdev(ndev);
2781 if (ret) 2674 if (ret)
2782 goto out_release; 2675 goto out_napi_del;
2783 2676
2784 /* mdio bus init */ 2677 /* mdio bus init */
2785 ret = sh_mdio_init(ndev, pdev->id, pd); 2678 ret = sh_mdio_init(ndev, pdev->id, pd);
@@ -2797,6 +2690,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2797out_unregister: 2690out_unregister:
2798 unregister_netdev(ndev); 2691 unregister_netdev(ndev);
2799 2692
2693out_napi_del:
2694 netif_napi_del(&mdp->napi);
2695
2800out_release: 2696out_release:
2801 /* net_dev free */ 2697 /* net_dev free */
2802 if (ndev) 2698 if (ndev)
@@ -2809,16 +2705,18 @@ out:
2809static int sh_eth_drv_remove(struct platform_device *pdev) 2705static int sh_eth_drv_remove(struct platform_device *pdev)
2810{ 2706{
2811 struct net_device *ndev = platform_get_drvdata(pdev); 2707 struct net_device *ndev = platform_get_drvdata(pdev);
2708 struct sh_eth_private *mdp = netdev_priv(ndev);
2812 2709
2813 sh_mdio_release(ndev); 2710 sh_mdio_release(ndev);
2814 unregister_netdev(ndev); 2711 unregister_netdev(ndev);
2712 netif_napi_del(&mdp->napi);
2815 pm_runtime_disable(&pdev->dev); 2713 pm_runtime_disable(&pdev->dev);
2816 free_netdev(ndev); 2714 free_netdev(ndev);
2817 platform_set_drvdata(pdev, NULL);
2818 2715
2819 return 0; 2716 return 0;
2820} 2717}
2821 2718
2719#ifdef CONFIG_PM
2822static int sh_eth_runtime_nop(struct device *dev) 2720static int sh_eth_runtime_nop(struct device *dev)
2823{ 2721{
2824 /* 2722 /*
@@ -2832,17 +2730,36 @@ static int sh_eth_runtime_nop(struct device *dev)
2832 return 0; 2730 return 0;
2833} 2731}
2834 2732
2835static struct dev_pm_ops sh_eth_dev_pm_ops = { 2733static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2836 .runtime_suspend = sh_eth_runtime_nop, 2734 .runtime_suspend = sh_eth_runtime_nop,
2837 .runtime_resume = sh_eth_runtime_nop, 2735 .runtime_resume = sh_eth_runtime_nop,
2838}; 2736};
2737#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2738#else
2739#define SH_ETH_PM_OPS NULL
2740#endif
2741
2742static struct platform_device_id sh_eth_id_table[] = {
2743 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2744 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2745 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2746 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2747 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2748 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2749 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2750 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2751 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2752 { }
2753};
2754MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2839 2755
2840static struct platform_driver sh_eth_driver = { 2756static struct platform_driver sh_eth_driver = {
2841 .probe = sh_eth_drv_probe, 2757 .probe = sh_eth_drv_probe,
2842 .remove = sh_eth_drv_remove, 2758 .remove = sh_eth_drv_remove,
2759 .id_table = sh_eth_id_table,
2843 .driver = { 2760 .driver = {
2844 .name = CARDNAME, 2761 .name = CARDNAME,
2845 .pm = &sh_eth_dev_pm_ops, 2762 .pm = SH_ETH_PM_OPS,
2846 }, 2763 },
2847}; 2764};
2848 2765
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 62689a5823be..99995bf38c40 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -166,19 +166,16 @@ enum {
166/* 166/*
167 * Register's bits 167 * Register's bits
168 */ 168 */
169#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\ 169/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */
170 defined(CONFIG_ARCH_R8A7740)
171/* EDSR */
172enum EDSR_BIT { 170enum EDSR_BIT {
173 EDSR_ENT = 0x01, EDSR_ENR = 0x02, 171 EDSR_ENT = 0x01, EDSR_ENR = 0x02,
174}; 172};
175#define EDSR_ENALL (EDSR_ENT|EDSR_ENR) 173#define EDSR_ENALL (EDSR_ENT|EDSR_ENR)
176 174
177/* GECMR */ 175/* GECMR : sh7734, sh7763 and r8a7740 only */
178enum GECMR_BIT { 176enum GECMR_BIT {
179 GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01, 177 GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01,
180}; 178};
181#endif
182 179
183/* EDMR */ 180/* EDMR */
184enum DMAC_M_BIT { 181enum DMAC_M_BIT {
@@ -251,13 +248,19 @@ enum EESR_BIT {
251 EESR_CERF = 0x00000001, 248 EESR_CERF = 0x00000001,
252}; 249};
253 250
251#define EESR_RX_CHECK (EESR_FRC | /* Frame recv */ \
252 EESR_RMAF | /* Multicast address recv */ \
253 EESR_RRF | /* Bit frame recv */ \
254 EESR_RTLF | /* Long frame recv */ \
255 EESR_RTSF | /* Short frame recv */ \
256 EESR_PRE | /* PHY-LSI recv error */ \
257 EESR_CERF) /* Recv frame CRC error */
258
254#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ 259#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
255 EESR_RTO) 260 EESR_RTO)
256#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ 261#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
257 EESR_RDE | EESR_RFRMER | EESR_ADE | \ 262 EESR_RDE | EESR_RFRMER | EESR_ADE | \
258 EESR_TFE | EESR_TDE | EESR_ECI) 263 EESR_TFE | EESR_TDE | EESR_ECI)
259#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
260 EESR_TFE)
261 264
262/* EESIPR */ 265/* EESIPR */
263enum DMAC_IM_BIT { 266enum DMAC_IM_BIT {
@@ -299,11 +302,11 @@ enum FCFTR_BIT {
299#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) 302#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
300#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) 303#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
301 304
302/* Transfer descriptor bit */ 305/* Transmit descriptor bit */
303enum TD_STS_BIT { 306enum TD_STS_BIT {
304 TD_TACT = 0x80000000, 307 TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
305 TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000, 308 TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
306 TD_TFP0 = 0x10000000, 309 TD_TFE = 0x08000000, TD_TWBI = 0x04000000,
307}; 310};
308#define TDF1ST TD_TFP1 311#define TDF1ST TD_TFP1
309#define TDFEND TD_TFP0 312#define TDFEND TD_TFP0
@@ -463,9 +466,9 @@ struct sh_eth_cpu_data {
463 /* interrupt checking mask */ 466 /* interrupt checking mask */
464 unsigned long tx_check; 467 unsigned long tx_check;
465 unsigned long eesr_err_check; 468 unsigned long eesr_err_check;
466 unsigned long tx_error_check;
467 469
468 /* hardware features */ 470 /* hardware features */
471 unsigned long irq_flags; /* IRQ configuration flags */
469 unsigned no_psr:1; /* EtherC DO NOT have PSR */ 472 unsigned no_psr:1; /* EtherC DO NOT have PSR */
470 unsigned apr:1; /* EtherC have APR */ 473 unsigned apr:1; /* EtherC have APR */
471 unsigned mpr:1; /* EtherC have MPR */ 474 unsigned mpr:1; /* EtherC have MPR */
@@ -478,6 +481,7 @@ struct sh_eth_cpu_data {
478 unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ 481 unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
479 unsigned hw_crc:1; /* E-DMAC have CSMR */ 482 unsigned hw_crc:1; /* E-DMAC have CSMR */
480 unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ 483 unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
484 unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */
481}; 485};
482 486
483struct sh_eth_private { 487struct sh_eth_private {
@@ -499,6 +503,7 @@ struct sh_eth_private {
499 u32 cur_tx, dirty_tx; 503 u32 cur_tx, dirty_tx;
500 u32 rx_buf_sz; /* Based on MTU+slack. */ 504 u32 rx_buf_sz; /* Based on MTU+slack. */
501 int edmac_endian; 505 int edmac_endian;
506 struct napi_struct napi;
502 /* MII transceiver section. */ 507 /* MII transceiver section. */
503 u32 phy_id; /* PHY ID */ 508 u32 phy_id; /* PHY ID */
504 struct mii_bus *mii_bus; /* MDIO bus control */ 509 struct mii_bus *mii_bus; /* MDIO bus control */
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index b6739afeaca1..a99739c5142c 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -1040,7 +1040,6 @@ static int s6gmac_remove(struct platform_device *pdev)
1040 unregister_netdev(dev); 1040 unregister_netdev(dev);
1041 free_irq(dev->irq, dev); 1041 free_irq(dev->irq, dev);
1042 free_netdev(dev); 1042 free_netdev(dev);
1043 platform_set_drvdata(pdev, NULL);
1044 } 1043 }
1045 return 0; 1044 return 0;
1046} 1045}
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 0ad5694b41f8..856e523ac936 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -818,7 +818,6 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
818 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, 818 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
819 sp->srings_dma); 819 sp->srings_dma);
820 free_netdev(dev); 820 free_netdev(dev);
821 platform_set_drvdata(pdev, NULL);
822 821
823 return 0; 822 return 0;
824} 823}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4a14a940c65e..c72968840f1a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -21,8 +21,8 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h>
25#include <linux/aer.h> 24#include <linux/aer.h>
25#include <linux/interrupt.h>
26#include "net_driver.h" 26#include "net_driver.h"
27#include "efx.h" 27#include "efx.h"
28#include "nic.h" 28#include "nic.h"
@@ -1283,29 +1283,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1283 return count; 1283 return count;
1284} 1284}
1285 1285
1286static int
1287efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1288{
1289#ifdef CONFIG_RFS_ACCEL
1290 unsigned int i;
1291 int rc;
1292
1293 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1294 if (!efx->net_dev->rx_cpu_rmap)
1295 return -ENOMEM;
1296 for (i = 0; i < efx->n_rx_channels; i++) {
1297 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
1298 xentries[i].vector);
1299 if (rc) {
1300 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1301 efx->net_dev->rx_cpu_rmap = NULL;
1302 return rc;
1303 }
1304 }
1305#endif
1306 return 0;
1307}
1308
1309/* Probe the number and type of interrupts we are able to obtain, and 1286/* Probe the number and type of interrupts we are able to obtain, and
1310 * the resulting numbers of channels and RX queues. 1287 * the resulting numbers of channels and RX queues.
1311 */ 1288 */
@@ -1359,11 +1336,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1359 efx->n_tx_channels = n_channels; 1336 efx->n_tx_channels = n_channels;
1360 efx->n_rx_channels = n_channels; 1337 efx->n_rx_channels = n_channels;
1361 } 1338 }
1362 rc = efx_init_rx_cpu_rmap(efx, xentries);
1363 if (rc) {
1364 pci_disable_msix(efx->pci_dev);
1365 return rc;
1366 }
1367 for (i = 0; i < efx->n_channels; i++) 1339 for (i = 0; i < efx->n_channels; i++)
1368 efx_get_channel(efx, i)->irq = 1340 efx_get_channel(efx, i)->irq =
1369 xentries[i].vector; 1341 xentries[i].vector;
@@ -1427,6 +1399,10 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1427 1399
1428 BUG_ON(efx->state == STATE_DISABLED); 1400 BUG_ON(efx->state == STATE_DISABLED);
1429 1401
1402 if (efx->eeh_disabled_legacy_irq) {
1403 enable_irq(efx->legacy_irq);
1404 efx->eeh_disabled_legacy_irq = false;
1405 }
1430 if (efx->legacy_irq) 1406 if (efx->legacy_irq)
1431 efx->legacy_irq_enabled = true; 1407 efx->legacy_irq_enabled = true;
1432 efx_nic_enable_interrupts(efx); 1408 efx_nic_enable_interrupts(efx);
@@ -2120,7 +2096,7 @@ static void efx_update_name(struct efx_nic *efx)
2120static int efx_netdev_event(struct notifier_block *this, 2096static int efx_netdev_event(struct notifier_block *this,
2121 unsigned long event, void *ptr) 2097 unsigned long event, void *ptr)
2122{ 2098{
2123 struct net_device *net_dev = ptr; 2099 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2124 2100
2125 if (net_dev->netdev_ops == &efx_netdev_ops && 2101 if (net_dev->netdev_ops == &efx_netdev_ops &&
2126 event == NETDEV_CHANGENAME) 2102 event == NETDEV_CHANGENAME)
@@ -2365,7 +2341,7 @@ out:
2365 * Returns 0 if the recovery mechanisms are unsuccessful. 2341 * Returns 0 if the recovery mechanisms are unsuccessful.
2366 * Returns a non-zero value otherwise. 2342 * Returns a non-zero value otherwise.
2367 */ 2343 */
2368static int efx_try_recovery(struct efx_nic *efx) 2344int efx_try_recovery(struct efx_nic *efx)
2369{ 2345{
2370#ifdef CONFIG_EEH 2346#ifdef CONFIG_EEH
2371 /* A PCI error can occur and not be seen by EEH because nothing 2347 /* A PCI error can occur and not be seen by EEH because nothing
@@ -2603,10 +2579,6 @@ static void efx_pci_remove_main(struct efx_nic *efx)
2603 BUG_ON(efx->state == STATE_READY); 2579 BUG_ON(efx->state == STATE_READY);
2604 cancel_work_sync(&efx->reset_work); 2580 cancel_work_sync(&efx->reset_work);
2605 2581
2606#ifdef CONFIG_RFS_ACCEL
2607 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2608 efx->net_dev->rx_cpu_rmap = NULL;
2609#endif
2610 efx_stop_interrupts(efx, false); 2582 efx_stop_interrupts(efx, false);
2611 efx_nic_fini_interrupt(efx); 2583 efx_nic_fini_interrupt(efx);
2612 efx_fini_port(efx); 2584 efx_fini_port(efx);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 8372da239b43..bdb30bbb0c97 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -124,6 +124,7 @@ extern const struct ethtool_ops efx_ethtool_ops;
124extern int efx_reset(struct efx_nic *efx, enum reset_type method); 124extern int efx_reset(struct efx_nic *efx, enum reset_type method);
125extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); 125extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
126extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); 126extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
127extern int efx_try_recovery(struct efx_nic *efx);
127 128
128/* Global */ 129/* Global */
129extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 130extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 6e768175e7e0..1fc21458413d 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1114,6 +1114,20 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
1114 return 0; 1114 return 0;
1115} 1115}
1116 1116
1117int efx_ethtool_get_ts_info(struct net_device *net_dev,
1118 struct ethtool_ts_info *ts_info)
1119{
1120 struct efx_nic *efx = netdev_priv(net_dev);
1121
1122 /* Software capabilities */
1123 ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
1124 SOF_TIMESTAMPING_SOFTWARE);
1125 ts_info->phc_index = -1;
1126
1127 efx_ptp_get_ts_info(efx, ts_info);
1128 return 0;
1129}
1130
1117static int efx_ethtool_get_module_eeprom(struct net_device *net_dev, 1131static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
1118 struct ethtool_eeprom *ee, 1132 struct ethtool_eeprom *ee,
1119 u8 *data) 1133 u8 *data)
@@ -1176,7 +1190,7 @@ const struct ethtool_ops efx_ethtool_ops = {
1176 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, 1190 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
1177 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1191 .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
1178 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1192 .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
1179 .get_ts_info = efx_ptp_get_ts_info, 1193 .get_ts_info = efx_ethtool_get_ts_info,
1180 .get_module_info = efx_ethtool_get_module_info, 1194 .get_module_info = efx_ethtool_get_module_info,
1181 .get_module_eeprom = efx_ethtool_get_module_eeprom, 1195 .get_module_eeprom = efx_ethtool_get_module_eeprom,
1182}; 1196};
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2397f0e8d3eb..b74a60ab9ac7 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -1185,8 +1185,21 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1185 1185
1186 nhoff = skb_network_offset(skb); 1186 nhoff = skb_network_offset(skb);
1187 1187
1188 if (skb->protocol != htons(ETH_P_IP)) 1188 if (skb->protocol == htons(ETH_P_8021Q)) {
1189 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
1190 nhoff + sizeof(struct vlan_hdr));
1191 if (((const struct vlan_hdr *)skb->data + nhoff)->
1192 h_vlan_encapsulated_proto != htons(ETH_P_IP))
1193 return -EPROTONOSUPPORT;
1194
1195 /* This is IP over 802.1q VLAN. We can't filter on the
1196 * IP 5-tuple and the vlan together, so just strip the
1197 * vlan header and filter on the IP part.
1198 */
1199 nhoff += sizeof(struct vlan_hdr);
1200 } else if (skb->protocol != htons(ETH_P_IP)) {
1189 return -EPROTONOSUPPORT; 1201 return -EPROTONOSUPPORT;
1202 }
1190 1203
1191 /* RFS must validate the IP header length before calling us */ 1204 /* RFS must validate the IP header length before calling us */
1192 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); 1205 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 39d6bd77f015..f4c7e6b67743 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -243,6 +243,7 @@ struct efx_rx_buffer {
243#define EFX_RX_BUF_LAST_IN_PAGE 0x0001 243#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
244#define EFX_RX_PKT_CSUMMED 0x0002 244#define EFX_RX_PKT_CSUMMED 0x0002
245#define EFX_RX_PKT_DISCARD 0x0004 245#define EFX_RX_PKT_DISCARD 0x0004
246#define EFX_RX_PKT_TCP 0x0040
246 247
247/** 248/**
248 * struct efx_rx_page_state - Page-based rx buffer state 249 * struct efx_rx_page_state - Page-based rx buffer state
@@ -784,9 +785,11 @@ struct efx_nic {
784 785
785 char name[IFNAMSIZ]; 786 char name[IFNAMSIZ];
786 struct pci_dev *pci_dev; 787 struct pci_dev *pci_dev;
788 unsigned int port_num;
787 const struct efx_nic_type *type; 789 const struct efx_nic_type *type;
788 int legacy_irq; 790 int legacy_irq;
789 bool legacy_irq_enabled; 791 bool legacy_irq_enabled;
792 bool eeh_disabled_legacy_irq;
790 struct workqueue_struct *workqueue; 793 struct workqueue_struct *workqueue;
791 char workqueue_name[16]; 794 char workqueue_name[16];
792 struct work_struct reset_work; 795 struct work_struct reset_work;
@@ -916,7 +919,7 @@ static inline int efx_dev_registered(struct efx_nic *efx)
916 919
917static inline unsigned int efx_port_num(struct efx_nic *efx) 920static inline unsigned int efx_port_num(struct efx_nic *efx)
918{ 921{
919 return efx->net_dev->dev_id; 922 return efx->port_num;
920} 923}
921 924
922/** 925/**
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index b0503cd8c2a0..56ed3bc71e00 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -14,6 +14,7 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/cpu_rmap.h>
17#include "net_driver.h" 18#include "net_driver.h"
18#include "bitfield.h" 19#include "bitfield.h"
19#include "efx.h" 20#include "efx.h"
@@ -1080,12 +1081,21 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1080 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1081 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1081 1082
1082 if (likely(rx_ev_pkt_ok)) { 1083 if (likely(rx_ev_pkt_ok)) {
1083 /* If packet is marked as OK and packet type is TCP/IP or 1084 /* If packet is marked as OK then we can rely on the
1084 * UDP/IP, then we can rely on the hardware checksum. 1085 * hardware checksum and classification.
1085 */ 1086 */
1086 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 1087 flags = 0;
1087 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 1088 switch (rx_ev_hdr_type) {
1088 EFX_RX_PKT_CSUMMED : 0; 1089 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1090 flags |= EFX_RX_PKT_TCP;
1091 /* fall through */
1092 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1093 flags |= EFX_RX_PKT_CSUMMED;
1094 /* fall through */
1095 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1096 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1097 break;
1098 }
1089 } else { 1099 } else {
1090 flags = efx_handle_rx_not_ok(rx_queue, event); 1100 flags = efx_handle_rx_not_ok(rx_queue, event);
1091 } 1101 }
@@ -1579,6 +1589,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1579 efx_readd(efx, &reg, FR_BZ_INT_ISR0); 1589 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1580 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1590 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1581 1591
1592 /* Legacy interrupts are disabled too late by the EEH kernel
1593 * code. Disable them earlier.
1594 * If an EEH error occurred, the read will have returned all ones.
1595 */
1596 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1597 !efx->eeh_disabled_legacy_irq) {
1598 disable_irq_nosync(efx->legacy_irq);
1599 efx->eeh_disabled_legacy_irq = true;
1600 }
1601
1582 /* Handle non-event-queue sources */ 1602 /* Handle non-event-queue sources */
1583 if (queues & (1U << efx->irq_level)) { 1603 if (queues & (1U << efx->irq_level)) {
1584 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1604 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
@@ -1687,6 +1707,7 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1687int efx_nic_init_interrupt(struct efx_nic *efx) 1707int efx_nic_init_interrupt(struct efx_nic *efx)
1688{ 1708{
1689 struct efx_channel *channel; 1709 struct efx_channel *channel;
1710 unsigned int n_irqs;
1690 int rc; 1711 int rc;
1691 1712
1692 if (!EFX_INT_MODE_USE_MSI(efx)) { 1713 if (!EFX_INT_MODE_USE_MSI(efx)) {
@@ -1707,7 +1728,19 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1707 return 0; 1728 return 0;
1708 } 1729 }
1709 1730
1731#ifdef CONFIG_RFS_ACCEL
1732 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1733 efx->net_dev->rx_cpu_rmap =
1734 alloc_irq_cpu_rmap(efx->n_rx_channels);
1735 if (!efx->net_dev->rx_cpu_rmap) {
1736 rc = -ENOMEM;
1737 goto fail1;
1738 }
1739 }
1740#endif
1741
1710 /* Hook MSI or MSI-X interrupt */ 1742 /* Hook MSI or MSI-X interrupt */
1743 n_irqs = 0;
1711 efx_for_each_channel(channel, efx) { 1744 efx_for_each_channel(channel, efx) {
1712 rc = request_irq(channel->irq, efx_msi_interrupt, 1745 rc = request_irq(channel->irq, efx_msi_interrupt,
1713 IRQF_PROBE_SHARED, /* Not shared */ 1746 IRQF_PROBE_SHARED, /* Not shared */
@@ -1718,13 +1751,31 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1718 "failed to hook IRQ %d\n", channel->irq); 1751 "failed to hook IRQ %d\n", channel->irq);
1719 goto fail2; 1752 goto fail2;
1720 } 1753 }
1754 ++n_irqs;
1755
1756#ifdef CONFIG_RFS_ACCEL
1757 if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
1758 channel->channel < efx->n_rx_channels) {
1759 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
1760 channel->irq);
1761 if (rc)
1762 goto fail2;
1763 }
1764#endif
1721 } 1765 }
1722 1766
1723 return 0; 1767 return 0;
1724 1768
1725 fail2: 1769 fail2:
1726 efx_for_each_channel(channel, efx) 1770#ifdef CONFIG_RFS_ACCEL
1771 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1772 efx->net_dev->rx_cpu_rmap = NULL;
1773#endif
1774 efx_for_each_channel(channel, efx) {
1775 if (n_irqs-- == 0)
1776 break;
1727 free_irq(channel->irq, &efx->channel[channel->channel]); 1777 free_irq(channel->irq, &efx->channel[channel->channel]);
1778 }
1728 fail1: 1779 fail1:
1729 return rc; 1780 return rc;
1730} 1781}
@@ -1734,11 +1785,14 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
1734 struct efx_channel *channel; 1785 struct efx_channel *channel;
1735 efx_oword_t reg; 1786 efx_oword_t reg;
1736 1787
1788#ifdef CONFIG_RFS_ACCEL
1789 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1790 efx->net_dev->rx_cpu_rmap = NULL;
1791#endif
1792
1737 /* Disable MSI/MSI-X interrupts */ 1793 /* Disable MSI/MSI-X interrupts */
1738 efx_for_each_channel(channel, efx) { 1794 efx_for_each_channel(channel, efx)
1739 if (channel->irq) 1795 free_irq(channel->irq, &efx->channel[channel->channel]);
1740 free_irq(channel->irq, &efx->channel[channel->channel]);
1741 }
1742 1796
1743 /* ACK legacy interrupt */ 1797 /* ACK legacy interrupt */
1744 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1798 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1b0003323498..d63c2991a751 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -254,8 +254,8 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
254struct ethtool_ts_info; 254struct ethtool_ts_info;
255extern void efx_ptp_probe(struct efx_nic *efx); 255extern void efx_ptp_probe(struct efx_nic *efx);
256extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); 256extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
257extern int efx_ptp_get_ts_info(struct net_device *net_dev, 257extern void efx_ptp_get_ts_info(struct efx_nic *efx,
258 struct ethtool_ts_info *ts_info); 258 struct ethtool_ts_info *ts_info);
259extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 259extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
260extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 260extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
261extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); 261extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 9a95abf2dedf..b495394a6dfa 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1203,18 +1203,16 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
1203 return 0; 1203 return 0;
1204} 1204}
1205 1205
1206int 1206void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
1207efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
1208{ 1207{
1209 struct efx_nic *efx = netdev_priv(net_dev);
1210 struct efx_ptp_data *ptp = efx->ptp_data; 1208 struct efx_ptp_data *ptp = efx->ptp_data;
1211 1209
1212 if (!ptp) 1210 if (!ptp)
1213 return -EOPNOTSUPP; 1211 return;
1214 1212
1215 ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE | 1213 ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
1216 SOF_TIMESTAMPING_RX_HARDWARE | 1214 SOF_TIMESTAMPING_RX_HARDWARE |
1217 SOF_TIMESTAMPING_RAW_HARDWARE); 1215 SOF_TIMESTAMPING_RAW_HARDWARE);
1218 ts_info->phc_index = ptp_clock_index(ptp->phc_clock); 1216 ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
1219 ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; 1217 ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
1220 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE | 1218 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
@@ -1224,7 +1222,6 @@ efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
1224 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | 1222 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
1225 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | 1223 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
1226 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); 1224 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1227 return 0;
1228} 1225}
1229 1226
1230int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd) 1227int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index a7dfe36cabf4..6af9cfda50fb 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -36,7 +36,7 @@
36#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) 36#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
37 37
38/* Size of buffer allocated for skb header area. */ 38/* Size of buffer allocated for skb header area. */
39#define EFX_SKB_HEADERS 64u 39#define EFX_SKB_HEADERS 128u
40 40
41/* This is the percentage fill level below which new RX descriptors 41/* This is the percentage fill level below which new RX descriptors
42 * will be added to the RX descriptor ring. 42 * will be added to the RX descriptor ring.
@@ -282,9 +282,9 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
282} 282}
283 283
284/* Recycle the pages that are used by buffers that have just been received. */ 284/* Recycle the pages that are used by buffers that have just been received. */
285static void efx_recycle_rx_buffers(struct efx_channel *channel, 285static void efx_recycle_rx_pages(struct efx_channel *channel,
286 struct efx_rx_buffer *rx_buf, 286 struct efx_rx_buffer *rx_buf,
287 unsigned int n_frags) 287 unsigned int n_frags)
288{ 288{
289 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 289 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
290 290
@@ -294,6 +294,20 @@ static void efx_recycle_rx_buffers(struct efx_channel *channel,
294 } while (--n_frags); 294 } while (--n_frags);
295} 295}
296 296
297static void efx_discard_rx_packet(struct efx_channel *channel,
298 struct efx_rx_buffer *rx_buf,
299 unsigned int n_frags)
300{
301 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
302
303 efx_recycle_rx_pages(channel, rx_buf, n_frags);
304
305 do {
306 efx_free_rx_buffer(rx_buf);
307 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
308 } while (--n_frags);
309}
310
297/** 311/**
298 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 312 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
299 * @rx_queue: RX descriptor queue 313 * @rx_queue: RX descriptor queue
@@ -533,8 +547,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
533 */ 547 */
534 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { 548 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
535 efx_rx_flush_packet(channel); 549 efx_rx_flush_packet(channel);
536 put_page(rx_buf->page); 550 efx_discard_rx_packet(channel, rx_buf, n_frags);
537 efx_recycle_rx_buffers(channel, rx_buf, n_frags);
538 return; 551 return;
539 } 552 }
540 553
@@ -570,9 +583,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
570 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 583 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
571 } 584 }
572 585
573 /* All fragments have been DMA-synced, so recycle buffers and pages. */ 586 /* All fragments have been DMA-synced, so recycle pages. */
574 rx_buf = efx_rx_buffer(rx_queue, index); 587 rx_buf = efx_rx_buffer(rx_queue, index);
575 efx_recycle_rx_buffers(channel, rx_buf, n_frags); 588 efx_recycle_rx_pages(channel, rx_buf, n_frags);
576 589
577 /* Pipeline receives so that we give time for packet headers to be 590 /* Pipeline receives so that we give time for packet headers to be
578 * prefetched into cache. 591 * prefetched into cache.
@@ -598,6 +611,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
598 611
599 /* Set the SKB flags */ 612 /* Set the SKB flags */
600 skb_checksum_none_assert(skb); 613 skb_checksum_none_assert(skb);
614 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
615 skb->ip_summed = CHECKSUM_UNNECESSARY;
601 616
602 if (channel->type->receive_skb) 617 if (channel->type->receive_skb)
603 if (channel->type->receive_skb(channel, skb)) 618 if (channel->type->receive_skb(channel, skb))
@@ -627,7 +642,7 @@ void __efx_rx_packet(struct efx_channel *channel)
627 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 642 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
628 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 643 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
629 644
630 if (!channel->type->receive_skb) 645 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
631 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); 646 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
632 else 647 else
633 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); 648 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
@@ -675,7 +690,7 @@ static void efx_init_rx_recycle_ring(struct efx_nic *efx,
675#ifdef CONFIG_PPC64 690#ifdef CONFIG_PPC64
676 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; 691 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
677#else 692#else
678 if (efx->pci_dev->dev.iommu_group) 693 if (iommu_present(&pci_bus_type))
679 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; 694 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
680 else 695 else
681 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; 696 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 51669244d154..8c91775e3c5f 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -304,7 +304,7 @@ static int siena_probe_nic(struct efx_nic *efx)
304 } 304 }
305 305
306 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 306 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
307 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 307 efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
308 308
309 efx_mcdi_init(efx); 309 efx_mcdi_init(efx);
310 310
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
index c1c4bb868a3b..e832f46660c9 100644
--- a/drivers/net/ethernet/sgi/Kconfig
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -22,7 +22,6 @@ config SGI_IOC3_ETH
22 bool "SGI IOC3 Ethernet" 22 bool "SGI IOC3 Ethernet"
23 depends on PCI && SGI_IP27 23 depends on PCI && SGI_IP27
24 select CRC32 24 select CRC32
25 select NET_CORE
26 select MII 25 select MII
27 ---help--- 26 ---help---
28 If you have a network (Ethernet) card of this type, say Y and read 27 If you have a network (Ethernet) card of this type, say Y and read
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 7ed08c32a9c5..ffa78432164d 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1398,16 +1398,6 @@ static struct pci_driver ioc3_driver = {
1398 .remove = ioc3_remove_one, 1398 .remove = ioc3_remove_one,
1399}; 1399};
1400 1400
1401static int __init ioc3_init_module(void)
1402{
1403 return pci_register_driver(&ioc3_driver);
1404}
1405
1406static void __exit ioc3_cleanup_module(void)
1407{
1408 pci_unregister_driver(&ioc3_driver);
1409}
1410
1411static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) 1401static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1412{ 1402{
1413 unsigned long data; 1403 unsigned long data;
@@ -1677,9 +1667,7 @@ static void ioc3_set_multicast_list(struct net_device *dev)
1677 netif_wake_queue(dev); /* Let us get going again. */ 1667 netif_wake_queue(dev); /* Let us get going again. */
1678} 1668}
1679 1669
1670module_pci_driver(ioc3_driver);
1680MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); 1671MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1681MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); 1672MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1682MODULE_LICENSE("GPL"); 1673MODULE_LICENSE("GPL");
1683
1684module_init(ioc3_init_module);
1685module_exit(ioc3_cleanup_module);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 4bdbaad9932d..9f5f35e041ac 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -863,7 +863,6 @@ static int __exit meth_remove(struct platform_device *pdev)
863 863
864 unregister_netdev(dev); 864 unregister_netdev(dev);
865 free_netdev(dev); 865 free_netdev(dev);
866 platform_set_drvdata(pdev, NULL);
867 866
868 return 0; 867 return 0;
869} 868}
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 28f7268f1b88..5eb933c97bba 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1578,19 +1578,7 @@ static struct pci_driver sc92031_pci_driver = {
1578 .resume = sc92031_resume, 1578 .resume = sc92031_resume,
1579}; 1579};
1580 1580
1581static int __init sc92031_init(void) 1581module_pci_driver(sc92031_pci_driver);
1582{
1583 return pci_register_driver(&sc92031_pci_driver);
1584}
1585
1586static void __exit sc92031_exit(void)
1587{
1588 pci_unregister_driver(&sc92031_pci_driver);
1589}
1590
1591module_init(sc92031_init);
1592module_exit(sc92031_exit);
1593
1594MODULE_LICENSE("GPL"); 1582MODULE_LICENSE("GPL");
1595MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); 1583MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1596MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); 1584MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig
index f1135cc1bd48..68d052b09af1 100644
--- a/drivers/net/ethernet/sis/Kconfig
+++ b/drivers/net/ethernet/sis/Kconfig
@@ -22,7 +22,6 @@ config SIS900
22 tristate "SiS 900/7016 PCI Fast Ethernet Adapter support" 22 tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
23 depends on PCI 23 depends on PCI
24 select CRC32 24 select CRC32
25 select NET_CORE
26 select MII 25 select MII
27 ---help--- 26 ---help---
28 This is a driver for the Fast Ethernet PCI network cards based on 27 This is a driver for the Fast Ethernet PCI network cards based on
@@ -39,7 +38,6 @@ config SIS190
39 tristate "SiS190/SiS191 gigabit ethernet support" 38 tristate "SiS190/SiS191 gigabit ethernet support"
40 depends on PCI 39 depends on PCI
41 select CRC32 40 select CRC32
42 select NET_CORE
43 select MII 41 select MII
44 ---help--- 42 ---help---
45 Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or 43 Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 9a9c379420d1..02df0894690d 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1934,15 +1934,4 @@ static struct pci_driver sis190_pci_driver = {
1934 .remove = sis190_remove_one, 1934 .remove = sis190_remove_one,
1935}; 1935};
1936 1936
1937static int __init sis190_init_module(void) 1937module_pci_driver(sis190_pci_driver);
1938{
1939 return pci_register_driver(&sis190_pci_driver);
1940}
1941
1942static void __exit sis190_cleanup_module(void)
1943{
1944 pci_unregister_driver(&sis190_pci_driver);
1945}
1946
1947module_init(sis190_init_module);
1948module_exit(sis190_cleanup_module);
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index bb4c1674ff99..068fc44d37e1 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -37,7 +37,6 @@ config SMC9194
37config SMC91X 37config SMC91X
38 tristate "SMC 91C9x/91C1xxx support" 38 tristate "SMC 91C9x/91C1xxx support"
39 select CRC32 39 select CRC32
40 select NET_CORE
41 select MII 40 select MII
42 depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ 41 depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
43 MN10300 || COLDFIRE || ARM64) 42 MN10300 || COLDFIRE || ARM64)
@@ -57,7 +56,6 @@ config PCMCIA_SMC91C92
57 tristate "SMC 91Cxx PCMCIA support" 56 tristate "SMC 91Cxx PCMCIA support"
58 depends on PCMCIA 57 depends on PCMCIA
59 select CRC32 58 select CRC32
60 select NET_CORE
61 select MII 59 select MII
62 ---help--- 60 ---help---
63 Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA 61 Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA
@@ -70,7 +68,6 @@ config EPIC100
70 tristate "SMC EtherPower II" 68 tristate "SMC EtherPower II"
71 depends on PCI 69 depends on PCI
72 select CRC32 70 select CRC32
73 select NET_CORE
74 select MII 71 select MII
75 ---help--- 72 ---help---
76 This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC, 73 This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC,
@@ -81,7 +78,6 @@ config EPIC100
81config SMC911X 78config SMC911X
82 tristate "SMSC LAN911[5678] support" 79 tristate "SMSC LAN911[5678] support"
83 select CRC32 80 select CRC32
84 select NET_CORE
85 select MII 81 select MII
86 depends on (ARM || SUPERH || MN10300) 82 depends on (ARM || SUPERH || MN10300)
87 ---help--- 83 ---help---
@@ -97,9 +93,8 @@ config SMC911X
97 93
98config SMSC911X 94config SMSC911X
99 tristate "SMSC LAN911x/LAN921x families embedded ethernet support" 95 tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
100 depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300) 96 depends on HAS_IOMEM
101 select CRC32 97 select CRC32
102 select NET_CORE
103 select MII 98 select MII
104 select PHYLIB 99 select PHYLIB
105 ---help--- 100 ---help---
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 9dd842dbb859..345558fe7367 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2087,7 +2087,6 @@ static int smc911x_drv_probe(struct platform_device *pdev)
2087 ndev->base_addr = res->start; 2087 ndev->base_addr = res->start;
2088 ret = smc911x_probe(ndev); 2088 ret = smc911x_probe(ndev);
2089 if (ret != 0) { 2089 if (ret != 0) {
2090 platform_set_drvdata(pdev, NULL);
2091 iounmap(addr); 2090 iounmap(addr);
2092release_both: 2091release_both:
2093 free_netdev(ndev); 2092 free_netdev(ndev);
@@ -2113,7 +2112,6 @@ static int smc911x_drv_remove(struct platform_device *pdev)
2113 struct resource *res; 2112 struct resource *res;
2114 2113
2115 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); 2114 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2116 platform_set_drvdata(pdev, NULL);
2117 2115
2118 unregister_netdev(ndev); 2116 unregister_netdev(ndev);
2119 2117
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index dfbf978315df..cde13be7c7de 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2299,7 +2299,6 @@ static int smc_drv_probe(struct platform_device *pdev)
2299 return 0; 2299 return 0;
2300 2300
2301 out_iounmap: 2301 out_iounmap:
2302 platform_set_drvdata(pdev, NULL);
2303 iounmap(addr); 2302 iounmap(addr);
2304 out_release_attrib: 2303 out_release_attrib:
2305 smc_release_attrib(pdev, ndev); 2304 smc_release_attrib(pdev, ndev);
@@ -2319,8 +2318,6 @@ static int smc_drv_remove(struct platform_device *pdev)
2319 struct smc_local *lp = netdev_priv(ndev); 2318 struct smc_local *lp = netdev_priv(ndev);
2320 struct resource *res; 2319 struct resource *res;
2321 2320
2322 platform_set_drvdata(pdev, NULL);
2323
2324 unregister_netdev(ndev); 2321 unregister_netdev(ndev);
2325 2322
2326 free_irq(ndev->irq, ndev); 2323 free_irq(ndev->irq, ndev);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 3663b9e04a31..a1419211585b 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2284,7 +2284,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
2284 mdiobus_unregister(pdata->mii_bus); 2284 mdiobus_unregister(pdata->mii_bus);
2285 mdiobus_free(pdata->mii_bus); 2285 mdiobus_free(pdata->mii_bus);
2286 2286
2287 platform_set_drvdata(pdev, NULL);
2288 unregister_netdev(dev); 2287 unregister_netdev(dev);
2289 free_irq(dev->irq, dev); 2288 free_irq(dev->irq, dev);
2290 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2289 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2539,7 +2538,6 @@ out_disable_resources:
2539out_enable_resources_fail: 2538out_enable_resources_fail:
2540 smsc911x_free_resources(pdev); 2539 smsc911x_free_resources(pdev);
2541out_request_resources_fail: 2540out_request_resources_fail:
2542 platform_set_drvdata(pdev, NULL);
2543 iounmap(pdata->ioaddr); 2541 iounmap(pdata->ioaddr);
2544 free_netdev(dev); 2542 free_netdev(dev);
2545out_release_io_1: 2543out_release_io_1:
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 43c1f3223322..6e52c0f74cd9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -1,7 +1,6 @@
1config STMMAC_ETH 1config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver" 2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 depends on HAS_IOMEM && HAS_DMA 3 depends on HAS_IOMEM && HAS_DMA
4 select NET_CORE
5 select MII 4 select MII
6 select PHYLIB 5 select PHYLIB
7 select CRC32 6 select CRC32
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 95176979b2d2..7eb8babed2cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -38,16 +38,6 @@
38#include "descs.h" 38#include "descs.h"
39#include "mmc.h" 39#include "mmc.h"
40 40
41#undef CHIP_DEBUG_PRINT
42/* Turn-on extra printk debug for MAC core, dma and descriptors */
43/* #define CHIP_DEBUG_PRINT */
44
45#ifdef CHIP_DEBUG_PRINT
46#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
47#else
48#define CHIP_DBG(fmt, args...) do { } while (0)
49#endif
50
51/* Synopsys Core versions */ 41/* Synopsys Core versions */
52#define DWMAC_CORE_3_40 0x34 42#define DWMAC_CORE_3_40 0x34
53#define DWMAC_CORE_3_50 0x35 43#define DWMAC_CORE_3_50 0x35
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 7e05e8d0f1c2..cdd926832e27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -91,8 +91,8 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
91 unsigned int value = 0; 91 unsigned int value = 0;
92 unsigned int perfect_addr_number; 92 unsigned int perfect_addr_number;
93 93
94 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", 94 pr_debug("%s: # mcasts %d, # unicast %d\n", __func__,
95 __func__, netdev_mc_count(dev), netdev_uc_count(dev)); 95 netdev_mc_count(dev), netdev_uc_count(dev));
96 96
97 if (dev->flags & IFF_PROMISC) 97 if (dev->flags & IFF_PROMISC)
98 value = GMAC_FRAME_FILTER_PR; 98 value = GMAC_FRAME_FILTER_PR;
@@ -152,7 +152,7 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
152#endif 152#endif
153 writel(value, ioaddr + GMAC_FRAME_FILTER); 153 writel(value, ioaddr + GMAC_FRAME_FILTER);
154 154
155 CHIP_DBG(KERN_INFO "\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n", 155 pr_debug("\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
156 readl(ioaddr + GMAC_FRAME_FILTER), 156 readl(ioaddr + GMAC_FRAME_FILTER),
157 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); 157 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
158} 158}
@@ -162,18 +162,18 @@ static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
162{ 162{
163 unsigned int flow = 0; 163 unsigned int flow = 0;
164 164
165 CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n"); 165 pr_debug("GMAC Flow-Control:\n");
166 if (fc & FLOW_RX) { 166 if (fc & FLOW_RX) {
167 CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); 167 pr_debug("\tReceive Flow-Control ON\n");
168 flow |= GMAC_FLOW_CTRL_RFE; 168 flow |= GMAC_FLOW_CTRL_RFE;
169 } 169 }
170 if (fc & FLOW_TX) { 170 if (fc & FLOW_TX) {
171 CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); 171 pr_debug("\tTransmit Flow-Control ON\n");
172 flow |= GMAC_FLOW_CTRL_TFE; 172 flow |= GMAC_FLOW_CTRL_TFE;
173 } 173 }
174 174
175 if (duplex) { 175 if (duplex) {
176 CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time); 176 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
177 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); 177 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
178 } 178 }
179 179
@@ -185,11 +185,11 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
185 unsigned int pmt = 0; 185 unsigned int pmt = 0;
186 186
187 if (mode & WAKE_MAGIC) { 187 if (mode & WAKE_MAGIC) {
188 CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); 188 pr_debug("GMAC: WOL Magic frame\n");
189 pmt |= power_down | magic_pkt_en; 189 pmt |= power_down | magic_pkt_en;
190 } 190 }
191 if (mode & WAKE_UCAST) { 191 if (mode & WAKE_UCAST) {
192 CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); 192 pr_debug("GMAC: WOL on global unicast\n");
193 pmt |= global_unicast; 193 pmt |= global_unicast;
194 } 194 }
195 195
@@ -203,23 +203,13 @@ static int dwmac1000_irq_status(void __iomem *ioaddr,
203 int ret = 0; 203 int ret = 0;
204 204
205 /* Not used events (e.g. MMC interrupts) are not handled. */ 205 /* Not used events (e.g. MMC interrupts) are not handled. */
206 if ((intr_status & mmc_tx_irq)) { 206 if ((intr_status & mmc_tx_irq))
207 CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
208 readl(ioaddr + GMAC_MMC_TX_INTR));
209 x->mmc_tx_irq_n++; 207 x->mmc_tx_irq_n++;
210 } 208 if (unlikely(intr_status & mmc_rx_irq))
211 if (unlikely(intr_status & mmc_rx_irq)) {
212 CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
213 readl(ioaddr + GMAC_MMC_RX_INTR));
214 x->mmc_rx_irq_n++; 209 x->mmc_rx_irq_n++;
215 } 210 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
216 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
217 CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
218 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
219 x->mmc_rx_csum_offload_irq_n++; 211 x->mmc_rx_csum_offload_irq_n++;
220 }
221 if (unlikely(intr_status & pmt_irq)) { 212 if (unlikely(intr_status & pmt_irq)) {
222 CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
223 /* clear the PMT bits 5 and 6 by reading the PMT status reg */ 213 /* clear the PMT bits 5 and 6 by reading the PMT status reg */
224 readl(ioaddr + GMAC_PMT); 214 readl(ioaddr + GMAC_PMT);
225 x->irq_receive_pmt_irq_n++; 215 x->irq_receive_pmt_irq_n++;
@@ -229,32 +219,22 @@ static int dwmac1000_irq_status(void __iomem *ioaddr,
229 /* Clean LPI interrupt by reading the Reg 12 */ 219 /* Clean LPI interrupt by reading the Reg 12 */
230 ret = readl(ioaddr + LPI_CTRL_STATUS); 220 ret = readl(ioaddr + LPI_CTRL_STATUS);
231 221
232 if (ret & LPI_CTRL_STATUS_TLPIEN) { 222 if (ret & LPI_CTRL_STATUS_TLPIEN)
233 CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
234 x->irq_tx_path_in_lpi_mode_n++; 223 x->irq_tx_path_in_lpi_mode_n++;
235 } 224 if (ret & LPI_CTRL_STATUS_TLPIEX)
236 if (ret & LPI_CTRL_STATUS_TLPIEX) {
237 CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
238 x->irq_tx_path_exit_lpi_mode_n++; 225 x->irq_tx_path_exit_lpi_mode_n++;
239 } 226 if (ret & LPI_CTRL_STATUS_RLPIEN)
240 if (ret & LPI_CTRL_STATUS_RLPIEN) {
241 CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
242 x->irq_rx_path_in_lpi_mode_n++; 227 x->irq_rx_path_in_lpi_mode_n++;
243 } 228 if (ret & LPI_CTRL_STATUS_RLPIEX)
244 if (ret & LPI_CTRL_STATUS_RLPIEX) {
245 CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
246 x->irq_rx_path_exit_lpi_mode_n++; 229 x->irq_rx_path_exit_lpi_mode_n++;
247 }
248 } 230 }
249 231
250 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { 232 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
251 CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n");
252 readl(ioaddr + GMAC_AN_STATUS); 233 readl(ioaddr + GMAC_AN_STATUS);
253 x->irq_pcs_ane_n++; 234 x->irq_pcs_ane_n++;
254 } 235 }
255 if (intr_status & rgmii_irq) { 236 if (intr_status & rgmii_irq) {
256 u32 status = readl(ioaddr + GMAC_S_R_GMII); 237 u32 status = readl(ioaddr + GMAC_S_R_GMII);
257 CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n");
258 x->irq_rgmii_n++; 238 x->irq_rgmii_n++;
259 239
260 /* Save and dump the link status. */ 240 /* Save and dump the link status. */
@@ -271,11 +251,12 @@ static int dwmac1000_irq_status(void __iomem *ioaddr,
271 x->pcs_speed = SPEED_10; 251 x->pcs_speed = SPEED_10;
272 252
273 x->pcs_link = 1; 253 x->pcs_link = 1;
274 pr_debug("Link is Up - %d/%s\n", (int)x->pcs_speed, 254 pr_debug("%s: Link is Up - %d/%s\n", __func__,
255 (int)x->pcs_speed,
275 x->pcs_duplex ? "Full" : "Half"); 256 x->pcs_duplex ? "Full" : "Half");
276 } else { 257 } else {
277 x->pcs_link = 0; 258 x->pcs_link = 0;
278 pr_debug("Link is Down\n"); 259 pr_debug("%s: Link is Down\n", __func__);
279 } 260 }
280 } 261 }
281 262
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 2c431b616058..0c2058a69fd2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -116,7 +116,7 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
116 u32 csr6 = readl(ioaddr + DMA_CONTROL); 116 u32 csr6 = readl(ioaddr + DMA_CONTROL);
117 117
118 if (txmode == SF_DMA_MODE) { 118 if (txmode == SF_DMA_MODE) {
119 CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n"); 119 pr_debug("GMAC: enable TX store and forward mode\n");
120 /* Transmit COE type 2 cannot be done in cut-through mode. */ 120 /* Transmit COE type 2 cannot be done in cut-through mode. */
121 csr6 |= DMA_CONTROL_TSF; 121 csr6 |= DMA_CONTROL_TSF;
122 /* Operating on second frame increase the performance 122 /* Operating on second frame increase the performance
@@ -124,8 +124,7 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
124 */ 124 */
125 csr6 |= DMA_CONTROL_OSF; 125 csr6 |= DMA_CONTROL_OSF;
126 } else { 126 } else {
127 CHIP_DBG(KERN_DEBUG "GMAC: disabling TX SF (threshold %d)\n", 127 pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
128 txmode);
129 csr6 &= ~DMA_CONTROL_TSF; 128 csr6 &= ~DMA_CONTROL_TSF;
130 csr6 &= DMA_CONTROL_TC_TX_MASK; 129 csr6 &= DMA_CONTROL_TC_TX_MASK;
131 /* Set the transmit threshold */ 130 /* Set the transmit threshold */
@@ -142,11 +141,10 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
142 } 141 }
143 142
144 if (rxmode == SF_DMA_MODE) { 143 if (rxmode == SF_DMA_MODE) {
145 CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n"); 144 pr_debug("GMAC: enable RX store and forward mode\n");
146 csr6 |= DMA_CONTROL_RSF; 145 csr6 |= DMA_CONTROL_RSF;
147 } else { 146 } else {
148 CHIP_DBG(KERN_DEBUG "GMAC: disable RX SF mode (threshold %d)\n", 147 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
149 rxmode);
150 csr6 &= ~DMA_CONTROL_RSF; 148 csr6 &= ~DMA_CONTROL_RSF;
151 csr6 &= DMA_CONTROL_TC_RX_MASK; 149 csr6 &= DMA_CONTROL_TC_RX_MASK;
152 if (rxmode <= 32) 150 if (rxmode <= 32)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 007bb2be3f10..5857d677dac1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -135,10 +135,6 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
135 } 135 }
136 136
137 writel(value, ioaddr + MAC_CONTROL); 137 writel(value, ioaddr + MAC_CONTROL);
138
139 CHIP_DBG(KERN_INFO "%s: Filter: 0x%08x Hash: HI 0x%08x, LO 0x%08x\n",
140 __func__, readl(ioaddr + MAC_CONTROL),
141 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
142} 138}
143 139
144static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, 140static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 67551c154138..7d1dce9e7ffc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -90,14 +90,14 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
90{ 90{
91 int i; 91 int i;
92 92
93 CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); 93 pr_debug("DWMAC 100 DMA CSR\n");
94 for (i = 0; i < 9; i++) 94 for (i = 0; i < 9; i++)
95 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, 95 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
96 (DMA_BUS_MODE + i * 4), 96 (DMA_BUS_MODE + i * 4),
97 readl(ioaddr + DMA_BUS_MODE + i * 4)); 97 readl(ioaddr + DMA_BUS_MODE + i * 4));
98 CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", 98
99 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); 99 pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n",
100 CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", 100 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR),
101 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); 101 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
102} 102}
103 103
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 491d7e930603..484e3cf9c414 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -24,13 +24,6 @@
24#include "common.h" 24#include "common.h"
25#include "dwmac_dma.h" 25#include "dwmac_dma.h"
26 26
27#undef DWMAC_DMA_DEBUG
28#ifdef DWMAC_DMA_DEBUG
29#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
30#else
31#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
32#endif
33
34#define GMAC_HI_REG_AE 0x80000000 27#define GMAC_HI_REG_AE 0x80000000
35 28
36/* CSR1 enables the transmit DMA to check for new descriptor */ 29/* CSR1 enables the transmit DMA to check for new descriptor */
@@ -85,24 +78,24 @@ static void show_tx_process_state(unsigned int status)
85 78
86 switch (state) { 79 switch (state) {
87 case 0: 80 case 0:
88 pr_info("- TX (Stopped): Reset or Stop command\n"); 81 pr_debug("- TX (Stopped): Reset or Stop command\n");
89 break; 82 break;
90 case 1: 83 case 1:
91 pr_info("- TX (Running):Fetching the Tx desc\n"); 84 pr_debug("- TX (Running):Fetching the Tx desc\n");
92 break; 85 break;
93 case 2: 86 case 2:
94 pr_info("- TX (Running): Waiting for end of tx\n"); 87 pr_debug("- TX (Running): Waiting for end of tx\n");
95 break; 88 break;
96 case 3: 89 case 3:
97 pr_info("- TX (Running): Reading the data " 90 pr_debug("- TX (Running): Reading the data "
98 "and queuing the data into the Tx buf\n"); 91 "and queuing the data into the Tx buf\n");
99 break; 92 break;
100 case 6: 93 case 6:
101 pr_info("- TX (Suspended): Tx Buff Underflow " 94 pr_debug("- TX (Suspended): Tx Buff Underflow "
102 "or an unavailable Transmit descriptor\n"); 95 "or an unavailable Transmit descriptor\n");
103 break; 96 break;
104 case 7: 97 case 7:
105 pr_info("- TX (Running): Closing Tx descriptor\n"); 98 pr_debug("- TX (Running): Closing Tx descriptor\n");
106 break; 99 break;
107 default: 100 default:
108 break; 101 break;
@@ -116,29 +109,29 @@ static void show_rx_process_state(unsigned int status)
116 109
117 switch (state) { 110 switch (state) {
118 case 0: 111 case 0:
119 pr_info("- RX (Stopped): Reset or Stop command\n"); 112 pr_debug("- RX (Stopped): Reset or Stop command\n");
120 break; 113 break;
121 case 1: 114 case 1:
122 pr_info("- RX (Running): Fetching the Rx desc\n"); 115 pr_debug("- RX (Running): Fetching the Rx desc\n");
123 break; 116 break;
124 case 2: 117 case 2:
125 pr_info("- RX (Running):Checking for end of pkt\n"); 118 pr_debug("- RX (Running):Checking for end of pkt\n");
126 break; 119 break;
127 case 3: 120 case 3:
128 pr_info("- RX (Running): Waiting for Rx pkt\n"); 121 pr_debug("- RX (Running): Waiting for Rx pkt\n");
129 break; 122 break;
130 case 4: 123 case 4:
131 pr_info("- RX (Suspended): Unavailable Rx buf\n"); 124 pr_debug("- RX (Suspended): Unavailable Rx buf\n");
132 break; 125 break;
133 case 5: 126 case 5:
134 pr_info("- RX (Running): Closing Rx descriptor\n"); 127 pr_debug("- RX (Running): Closing Rx descriptor\n");
135 break; 128 break;
136 case 6: 129 case 6:
137 pr_info("- RX(Running): Flushing the current frame" 130 pr_debug("- RX(Running): Flushing the current frame"
138 " from the Rx buf\n"); 131 " from the Rx buf\n");
139 break; 132 break;
140 case 7: 133 case 7:
141 pr_info("- RX (Running): Queuing the Rx frame" 134 pr_debug("- RX (Running): Queuing the Rx frame"
142 " from the Rx buf into memory\n"); 135 " from the Rx buf into memory\n");
143 break; 136 break;
144 default: 137 default:
@@ -154,51 +147,37 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
154 /* read the status register (CSR5) */ 147 /* read the status register (CSR5) */
155 u32 intr_status = readl(ioaddr + DMA_STATUS); 148 u32 intr_status = readl(ioaddr + DMA_STATUS);
156 149
157 DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
158#ifdef DWMAC_DMA_DEBUG 150#ifdef DWMAC_DMA_DEBUG
159 /* It displays the DMA process states (CSR5 register) */ 151 /* Enable it to monitor DMA rx/tx status in case of critical problems */
152 pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status);
160 show_tx_process_state(intr_status); 153 show_tx_process_state(intr_status);
161 show_rx_process_state(intr_status); 154 show_rx_process_state(intr_status);
162#endif 155#endif
163 /* ABNORMAL interrupts */ 156 /* ABNORMAL interrupts */
164 if (unlikely(intr_status & DMA_STATUS_AIS)) { 157 if (unlikely(intr_status & DMA_STATUS_AIS)) {
165 DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
166 if (unlikely(intr_status & DMA_STATUS_UNF)) { 158 if (unlikely(intr_status & DMA_STATUS_UNF)) {
167 DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
168 ret = tx_hard_error_bump_tc; 159 ret = tx_hard_error_bump_tc;
169 x->tx_undeflow_irq++; 160 x->tx_undeflow_irq++;
170 } 161 }
171 if (unlikely(intr_status & DMA_STATUS_TJT)) { 162 if (unlikely(intr_status & DMA_STATUS_TJT))
172 DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
173 x->tx_jabber_irq++; 163 x->tx_jabber_irq++;
174 } 164
175 if (unlikely(intr_status & DMA_STATUS_OVF)) { 165 if (unlikely(intr_status & DMA_STATUS_OVF))
176 DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
177 x->rx_overflow_irq++; 166 x->rx_overflow_irq++;
178 } 167
179 if (unlikely(intr_status & DMA_STATUS_RU)) { 168 if (unlikely(intr_status & DMA_STATUS_RU))
180 DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
181 x->rx_buf_unav_irq++; 169 x->rx_buf_unav_irq++;
182 } 170 if (unlikely(intr_status & DMA_STATUS_RPS))
183 if (unlikely(intr_status & DMA_STATUS_RPS)) {
184 DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
185 x->rx_process_stopped_irq++; 171 x->rx_process_stopped_irq++;
186 } 172 if (unlikely(intr_status & DMA_STATUS_RWT))
187 if (unlikely(intr_status & DMA_STATUS_RWT)) {
188 DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
189 x->rx_watchdog_irq++; 173 x->rx_watchdog_irq++;
190 } 174 if (unlikely(intr_status & DMA_STATUS_ETI))
191 if (unlikely(intr_status & DMA_STATUS_ETI)) {
192 DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
193 x->tx_early_irq++; 175 x->tx_early_irq++;
194 }
195 if (unlikely(intr_status & DMA_STATUS_TPS)) { 176 if (unlikely(intr_status & DMA_STATUS_TPS)) {
196 DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
197 x->tx_process_stopped_irq++; 177 x->tx_process_stopped_irq++;
198 ret = tx_hard_error; 178 ret = tx_hard_error;
199 } 179 }
200 if (unlikely(intr_status & DMA_STATUS_FBI)) { 180 if (unlikely(intr_status & DMA_STATUS_FBI)) {
201 DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
202 x->fatal_bus_error_irq++; 181 x->fatal_bus_error_irq++;
203 ret = tx_hard_error; 182 ret = tx_hard_error;
204 } 183 }
@@ -224,12 +203,11 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
224 /* Optional hardware blocks, interrupts should be disabled */ 203 /* Optional hardware blocks, interrupts should be disabled */
225 if (unlikely(intr_status & 204 if (unlikely(intr_status &
226 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) 205 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
227 pr_info("%s: unexpected status %08x\n", __func__, intr_status); 206 pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
228 207
229 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ 208 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
230 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); 209 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
231 210
232 DWMAC_LIB_DBG(KERN_INFO "\n\n");
233 return ret; 211 return ret;
234} 212}
235 213
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 0fbc8fafa706..7e6628a91514 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -33,54 +33,40 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
33 struct net_device_stats *stats = (struct net_device_stats *)data; 33 struct net_device_stats *stats = (struct net_device_stats *)data;
34 34
35 if (unlikely(p->des01.etx.error_summary)) { 35 if (unlikely(p->des01.etx.error_summary)) {
36 CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); 36 if (unlikely(p->des01.etx.jabber_timeout))
37 if (unlikely(p->des01.etx.jabber_timeout)) {
38 CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
39 x->tx_jabber++; 37 x->tx_jabber++;
40 }
41 38
42 if (unlikely(p->des01.etx.frame_flushed)) { 39 if (unlikely(p->des01.etx.frame_flushed)) {
43 CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
44 x->tx_frame_flushed++; 40 x->tx_frame_flushed++;
45 dwmac_dma_flush_tx_fifo(ioaddr); 41 dwmac_dma_flush_tx_fifo(ioaddr);
46 } 42 }
47 43
48 if (unlikely(p->des01.etx.loss_carrier)) { 44 if (unlikely(p->des01.etx.loss_carrier)) {
49 CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
50 x->tx_losscarrier++; 45 x->tx_losscarrier++;
51 stats->tx_carrier_errors++; 46 stats->tx_carrier_errors++;
52 } 47 }
53 if (unlikely(p->des01.etx.no_carrier)) { 48 if (unlikely(p->des01.etx.no_carrier)) {
54 CHIP_DBG(KERN_ERR "\tno_carrier error\n");
55 x->tx_carrier++; 49 x->tx_carrier++;
56 stats->tx_carrier_errors++; 50 stats->tx_carrier_errors++;
57 } 51 }
58 if (unlikely(p->des01.etx.late_collision)) { 52 if (unlikely(p->des01.etx.late_collision))
59 CHIP_DBG(KERN_ERR "\tlate_collision error\n");
60 stats->collisions += p->des01.etx.collision_count; 53 stats->collisions += p->des01.etx.collision_count;
61 } 54
62 if (unlikely(p->des01.etx.excessive_collisions)) { 55 if (unlikely(p->des01.etx.excessive_collisions))
63 CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
64 stats->collisions += p->des01.etx.collision_count; 56 stats->collisions += p->des01.etx.collision_count;
65 } 57
66 if (unlikely(p->des01.etx.excessive_deferral)) { 58 if (unlikely(p->des01.etx.excessive_deferral))
67 CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
68 x->tx_deferred++; 59 x->tx_deferred++;
69 }
70 60
71 if (unlikely(p->des01.etx.underflow_error)) { 61 if (unlikely(p->des01.etx.underflow_error)) {
72 CHIP_DBG(KERN_ERR "\tunderflow error\n");
73 dwmac_dma_flush_tx_fifo(ioaddr); 62 dwmac_dma_flush_tx_fifo(ioaddr);
74 x->tx_underflow++; 63 x->tx_underflow++;
75 } 64 }
76 65
77 if (unlikely(p->des01.etx.ip_header_error)) { 66 if (unlikely(p->des01.etx.ip_header_error))
78 CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
79 x->tx_ip_header_error++; 67 x->tx_ip_header_error++;
80 }
81 68
82 if (unlikely(p->des01.etx.payload_error)) { 69 if (unlikely(p->des01.etx.payload_error)) {
83 CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
84 x->tx_payload_error++; 70 x->tx_payload_error++;
85 dwmac_dma_flush_tx_fifo(ioaddr); 71 dwmac_dma_flush_tx_fifo(ioaddr);
86 } 72 }
@@ -88,15 +74,12 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
88 ret = -1; 74 ret = -1;
89 } 75 }
90 76
91 if (unlikely(p->des01.etx.deferred)) { 77 if (unlikely(p->des01.etx.deferred))
92 CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
93 x->tx_deferred++; 78 x->tx_deferred++;
94 } 79
95#ifdef STMMAC_VLAN_TAG_USED 80#ifdef STMMAC_VLAN_TAG_USED
96 if (p->des01.etx.vlan_frame) { 81 if (p->des01.etx.vlan_frame)
97 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
98 x->tx_vlan++; 82 x->tx_vlan++;
99 }
100#endif 83#endif
101 84
102 return ret; 85 return ret;
@@ -123,30 +106,20 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
123 * 0 1 1 | COE bypassed.. no IPv4/6 frame 106 * 0 1 1 | COE bypassed.. no IPv4/6 frame
124 * 0 1 0 | Reserved. 107 * 0 1 0 | Reserved.
125 */ 108 */
126 if (status == 0x0) { 109 if (status == 0x0)
127 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
128 ret = llc_snap; 110 ret = llc_snap;
129 } else if (status == 0x4) { 111 else if (status == 0x4)
130 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
131 ret = good_frame; 112 ret = good_frame;
132 } else if (status == 0x5) { 113 else if (status == 0x5)
133 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
134 ret = csum_none; 114 ret = csum_none;
135 } else if (status == 0x6) { 115 else if (status == 0x6)
136 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
137 ret = csum_none; 116 ret = csum_none;
138 } else if (status == 0x7) { 117 else if (status == 0x7)
139 CHIP_DBG(KERN_ERR
140 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
141 ret = csum_none; 118 ret = csum_none;
142 } else if (status == 0x1) { 119 else if (status == 0x1)
143 CHIP_DBG(KERN_ERR
144 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
145 ret = discard_frame; 120 ret = discard_frame;
146 } else if (status == 0x3) { 121 else if (status == 0x3)
147 CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
148 ret = discard_frame; 122 ret = discard_frame;
149 }
150 return ret; 123 return ret;
151} 124}
152 125
@@ -208,36 +181,26 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
208 struct net_device_stats *stats = (struct net_device_stats *)data; 181 struct net_device_stats *stats = (struct net_device_stats *)data;
209 182
210 if (unlikely(p->des01.erx.error_summary)) { 183 if (unlikely(p->des01.erx.error_summary)) {
211 CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
212 p->des01.erx);
213 if (unlikely(p->des01.erx.descriptor_error)) { 184 if (unlikely(p->des01.erx.descriptor_error)) {
214 CHIP_DBG(KERN_ERR "\tdescriptor error\n");
215 x->rx_desc++; 185 x->rx_desc++;
216 stats->rx_length_errors++; 186 stats->rx_length_errors++;
217 } 187 }
218 if (unlikely(p->des01.erx.overflow_error)) { 188 if (unlikely(p->des01.erx.overflow_error))
219 CHIP_DBG(KERN_ERR "\toverflow error\n");
220 x->rx_gmac_overflow++; 189 x->rx_gmac_overflow++;
221 }
222 190
223 if (unlikely(p->des01.erx.ipc_csum_error)) 191 if (unlikely(p->des01.erx.ipc_csum_error))
224 CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); 192 pr_err("\tIPC Csum Error/Giant frame\n");
225 193
226 if (unlikely(p->des01.erx.late_collision)) { 194 if (unlikely(p->des01.erx.late_collision)) {
227 CHIP_DBG(KERN_ERR "\tlate_collision error\n");
228 stats->collisions++;
229 stats->collisions++; 195 stats->collisions++;
230 } 196 }
231 if (unlikely(p->des01.erx.receive_watchdog)) { 197 if (unlikely(p->des01.erx.receive_watchdog))
232 CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
233 x->rx_watchdog++; 198 x->rx_watchdog++;
234 } 199
235 if (unlikely(p->des01.erx.error_gmii)) { 200 if (unlikely(p->des01.erx.error_gmii))
236 CHIP_DBG(KERN_ERR "\tReceive Error\n");
237 x->rx_mii++; 201 x->rx_mii++;
238 } 202
239 if (unlikely(p->des01.erx.crc_error)) { 203 if (unlikely(p->des01.erx.crc_error)) {
240 CHIP_DBG(KERN_ERR "\tCRC error\n");
241 x->rx_crc++; 204 x->rx_crc++;
242 stats->rx_crc_errors++; 205 stats->rx_crc_errors++;
243 } 206 }
@@ -251,30 +214,24 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
251 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 214 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
252 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); 215 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
253 216
254 if (unlikely(p->des01.erx.dribbling)) { 217 if (unlikely(p->des01.erx.dribbling))
255 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
256 x->dribbling_bit++; 218 x->dribbling_bit++;
257 } 219
258 if (unlikely(p->des01.erx.sa_filter_fail)) { 220 if (unlikely(p->des01.erx.sa_filter_fail)) {
259 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
260 x->sa_rx_filter_fail++; 221 x->sa_rx_filter_fail++;
261 ret = discard_frame; 222 ret = discard_frame;
262 } 223 }
263 if (unlikely(p->des01.erx.da_filter_fail)) { 224 if (unlikely(p->des01.erx.da_filter_fail)) {
264 CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
265 x->da_rx_filter_fail++; 225 x->da_rx_filter_fail++;
266 ret = discard_frame; 226 ret = discard_frame;
267 } 227 }
268 if (unlikely(p->des01.erx.length_error)) { 228 if (unlikely(p->des01.erx.length_error)) {
269 CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
270 x->rx_length++; 229 x->rx_length++;
271 ret = discard_frame; 230 ret = discard_frame;
272 } 231 }
273#ifdef STMMAC_VLAN_TAG_USED 232#ifdef STMMAC_VLAN_TAG_USED
274 if (p->des01.erx.vlan_tag) { 233 if (p->des01.erx.vlan_tag)
275 CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
276 x->rx_vlan++; 234 x->rx_vlan++;
277 }
278#endif 235#endif
279 236
280 return ret; 237 return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 11775b99afc5..35ad4f427ae2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -52,10 +52,8 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
52 ret = -1; 52 ret = -1;
53 } 53 }
54 54
55 if (p->des01.etx.vlan_frame) { 55 if (p->des01.etx.vlan_frame)
56 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
57 x->tx_vlan++; 56 x->tx_vlan++;
58 }
59 57
60 if (unlikely(p->des01.tx.deferred)) 58 if (unlikely(p->des01.tx.deferred))
61 x->tx_deferred++; 59 x->tx_deferred++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e9eab29db7be..f2ccb36e8685 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -51,32 +51,6 @@
51#include "stmmac_ptp.h" 51#include "stmmac_ptp.h"
52#include "stmmac.h" 52#include "stmmac.h"
53 53
54#undef STMMAC_DEBUG
55/*#define STMMAC_DEBUG*/
56#ifdef STMMAC_DEBUG
57#define DBG(nlevel, klevel, fmt, args...) \
58 ((void)(netif_msg_##nlevel(priv) && \
59 printk(KERN_##klevel fmt, ## args)))
60#else
61#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
62#endif
63
64#undef STMMAC_RX_DEBUG
65/*#define STMMAC_RX_DEBUG*/
66#ifdef STMMAC_RX_DEBUG
67#define RX_DBG(fmt, args...) printk(fmt, ## args)
68#else
69#define RX_DBG(fmt, args...) do { } while (0)
70#endif
71
72#undef STMMAC_XMIT_DEBUG
73/*#define STMMAC_XMIT_DEBUG*/
74#ifdef STMMAC_XMIT_DEBUG
75#define TX_DBG(fmt, args...) printk(fmt, ## args)
76#else
77#define TX_DBG(fmt, args...) do { } while (0)
78#endif
79
80#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 54#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
81#define JUMBO_LEN 9000 55#define JUMBO_LEN 9000
82 56
@@ -214,19 +188,17 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
214 } 188 }
215} 189}
216 190
217#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
218static void print_pkt(unsigned char *buf, int len) 191static void print_pkt(unsigned char *buf, int len)
219{ 192{
220 int j; 193 int j;
221 pr_info("len = %d byte, buf addr: 0x%p", len, buf); 194 pr_debug("len = %d byte, buf addr: 0x%p", len, buf);
222 for (j = 0; j < len; j++) { 195 for (j = 0; j < len; j++) {
223 if ((j % 16) == 0) 196 if ((j % 16) == 0)
224 pr_info("\n %03x:", j); 197 pr_debug("\n %03x:", j);
225 pr_info(" %02x", buf[j]); 198 pr_debug(" %02x", buf[j]);
226 } 199 }
227 pr_info("\n"); 200 pr_debug("\n");
228} 201}
229#endif
230 202
231/* minimum number of free TX descriptors required to wake up TX process */ 203/* minimum number of free TX descriptors required to wake up TX process */
232#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) 204#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
@@ -696,9 +668,6 @@ static void stmmac_adjust_link(struct net_device *dev)
696 if (phydev == NULL) 668 if (phydev == NULL)
697 return; 669 return;
698 670
699 DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
700 phydev->addr, phydev->link);
701
702 spin_lock_irqsave(&priv->lock, flags); 671 spin_lock_irqsave(&priv->lock, flags);
703 672
704 if (phydev->link) { 673 if (phydev->link) {
@@ -773,8 +742,6 @@ static void stmmac_adjust_link(struct net_device *dev)
773 priv->eee_enabled = stmmac_eee_init(priv); 742 priv->eee_enabled = stmmac_eee_init(priv);
774 743
775 spin_unlock_irqrestore(&priv->lock, flags); 744 spin_unlock_irqrestore(&priv->lock, flags);
776
777 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
778} 745}
779 746
780/** 747/**
@@ -789,13 +756,13 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
789 int interface = priv->plat->interface; 756 int interface = priv->plat->interface;
790 757
791 if (priv->dma_cap.pcs) { 758 if (priv->dma_cap.pcs) {
792 if ((interface & PHY_INTERFACE_MODE_RGMII) || 759 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
793 (interface & PHY_INTERFACE_MODE_RGMII_ID) || 760 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
794 (interface & PHY_INTERFACE_MODE_RGMII_RXID) || 761 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
795 (interface & PHY_INTERFACE_MODE_RGMII_TXID)) { 762 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
796 pr_debug("STMMAC: PCS RGMII support enable\n"); 763 pr_debug("STMMAC: PCS RGMII support enable\n");
797 priv->pcs = STMMAC_PCS_RGMII; 764 priv->pcs = STMMAC_PCS_RGMII;
798 } else if (interface & PHY_INTERFACE_MODE_SGMII) { 765 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
799 pr_debug("STMMAC: PCS SGMII support enable\n"); 766 pr_debug("STMMAC: PCS SGMII support enable\n");
800 priv->pcs = STMMAC_PCS_SGMII; 767 priv->pcs = STMMAC_PCS_SGMII;
801 } 768 }
@@ -1015,8 +982,9 @@ static void init_dma_desc_rings(struct net_device *dev)
1015 if (bfsize < BUF_SIZE_16KiB) 982 if (bfsize < BUF_SIZE_16KiB)
1016 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 983 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1017 984
1018 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 985 if (netif_msg_probe(priv))
1019 txsize, rxsize, bfsize); 986 pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
987 txsize, rxsize, bfsize);
1020 988
1021 if (priv->extend_desc) { 989 if (priv->extend_desc) {
1022 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * 990 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
@@ -1052,12 +1020,13 @@ static void init_dma_desc_rings(struct net_device *dev)
1052 GFP_KERNEL); 1020 GFP_KERNEL);
1053 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 1021 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1054 GFP_KERNEL); 1022 GFP_KERNEL);
1055 if (netif_msg_drv(priv)) 1023 if (netif_msg_probe(priv)) {
1056 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, 1024 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1057 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); 1025 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
1058 1026
1059 /* RX INITIALIZATION */ 1027 /* RX INITIALIZATION */
1060 DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n"); 1028 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
1029 }
1061 for (i = 0; i < rxsize; i++) { 1030 for (i = 0; i < rxsize; i++) {
1062 struct dma_desc *p; 1031 struct dma_desc *p;
1063 if (priv->extend_desc) 1032 if (priv->extend_desc)
@@ -1068,8 +1037,10 @@ static void init_dma_desc_rings(struct net_device *dev)
1068 if (stmmac_init_rx_buffers(priv, p, i)) 1037 if (stmmac_init_rx_buffers(priv, p, i))
1069 break; 1038 break;
1070 1039
1071 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1040 if (netif_msg_probe(priv))
1072 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); 1041 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1042 priv->rx_skbuff[i]->data,
1043 (unsigned int)priv->rx_skbuff_dma[i]);
1073 } 1044 }
1074 priv->cur_rx = 0; 1045 priv->cur_rx = 0;
1075 priv->dirty_rx = (unsigned int)(i - rxsize); 1046 priv->dirty_rx = (unsigned int)(i - rxsize);
@@ -1244,8 +1215,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1244 1215
1245 stmmac_get_tx_hwtstamp(priv, entry, skb); 1216 stmmac_get_tx_hwtstamp(priv, entry, skb);
1246 } 1217 }
1247 TX_DBG("%s: curr %d, dirty %d\n", __func__, 1218 if (netif_msg_tx_done(priv))
1248 priv->cur_tx, priv->dirty_tx); 1219 pr_debug("%s: curr %d, dirty %d\n", __func__,
1220 priv->cur_tx, priv->dirty_tx);
1249 1221
1250 if (likely(priv->tx_skbuff_dma[entry])) { 1222 if (likely(priv->tx_skbuff_dma[entry])) {
1251 dma_unmap_single(priv->device, 1223 dma_unmap_single(priv->device,
@@ -1270,7 +1242,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1270 netif_tx_lock(priv->dev); 1242 netif_tx_lock(priv->dev);
1271 if (netif_queue_stopped(priv->dev) && 1243 if (netif_queue_stopped(priv->dev) &&
1272 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { 1244 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
1273 TX_DBG("%s: restart transmit\n", __func__); 1245 if (netif_msg_tx_done(priv))
1246 pr_debug("%s: restart transmit\n", __func__);
1274 netif_wake_queue(priv->dev); 1247 netif_wake_queue(priv->dev);
1275 } 1248 }
1276 netif_tx_unlock(priv->dev); 1249 netif_tx_unlock(priv->dev);
@@ -1579,7 +1552,7 @@ static int stmmac_open(struct net_device *dev)
1579 if (ret) { 1552 if (ret) {
1580 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1553 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1581 __func__, ret); 1554 __func__, ret);
1582 goto open_error; 1555 goto phy_error;
1583 } 1556 }
1584 } 1557 }
1585 1558
@@ -1593,7 +1566,7 @@ static int stmmac_open(struct net_device *dev)
1593 ret = stmmac_init_dma_engine(priv); 1566 ret = stmmac_init_dma_engine(priv);
1594 if (ret < 0) { 1567 if (ret < 0) {
1595 pr_err("%s: DMA initialization failed\n", __func__); 1568 pr_err("%s: DMA initialization failed\n", __func__);
1596 goto open_error; 1569 goto init_error;
1597 } 1570 }
1598 1571
1599 /* Copy the MAC addr into the HW */ 1572 /* Copy the MAC addr into the HW */
@@ -1612,7 +1585,7 @@ static int stmmac_open(struct net_device *dev)
1612 if (unlikely(ret < 0)) { 1585 if (unlikely(ret < 0)) {
1613 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 1586 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1614 __func__, dev->irq, ret); 1587 __func__, dev->irq, ret);
1615 goto open_error; 1588 goto init_error;
1616 } 1589 }
1617 1590
1618 /* Request the Wake IRQ in case of another line is used for WoL */ 1591 /* Request the Wake IRQ in case of another line is used for WoL */
@@ -1622,7 +1595,7 @@ static int stmmac_open(struct net_device *dev)
1622 if (unlikely(ret < 0)) { 1595 if (unlikely(ret < 0)) {
1623 pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n", 1596 pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1624 __func__, priv->wol_irq, ret); 1597 __func__, priv->wol_irq, ret);
1625 goto open_error_wolirq; 1598 goto wolirq_error;
1626 } 1599 }
1627 } 1600 }
1628 1601
@@ -1633,7 +1606,7 @@ static int stmmac_open(struct net_device *dev)
1633 if (unlikely(ret < 0)) { 1606 if (unlikely(ret < 0)) {
1634 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", 1607 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1635 __func__, priv->lpi_irq, ret); 1608 __func__, priv->lpi_irq, ret);
1636 goto open_error_lpiirq; 1609 goto lpiirq_error;
1637 } 1610 }
1638 } 1611 }
1639 1612
@@ -1659,7 +1632,7 @@ static int stmmac_open(struct net_device *dev)
1659 pr_warn("%s: failed debugFS registration\n", __func__); 1632 pr_warn("%s: failed debugFS registration\n", __func__);
1660#endif 1633#endif
1661 /* Start the ball rolling... */ 1634 /* Start the ball rolling... */
1662 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 1635 pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
1663 priv->hw->dma->start_tx(priv->ioaddr); 1636 priv->hw->dma->start_tx(priv->ioaddr);
1664 priv->hw->dma->start_rx(priv->ioaddr); 1637 priv->hw->dma->start_rx(priv->ioaddr);
1665 1638
@@ -1691,17 +1664,17 @@ static int stmmac_open(struct net_device *dev)
1691 1664
1692 return 0; 1665 return 0;
1693 1666
1694open_error_lpiirq: 1667lpiirq_error:
1695 if (priv->wol_irq != dev->irq) 1668 if (priv->wol_irq != dev->irq)
1696 free_irq(priv->wol_irq, dev); 1669 free_irq(priv->wol_irq, dev);
1697 1670wolirq_error:
1698open_error_wolirq:
1699 free_irq(dev->irq, dev); 1671 free_irq(dev->irq, dev);
1700 1672
1701open_error: 1673init_error:
1674 free_dma_desc_resources(priv);
1702 if (priv->phydev) 1675 if (priv->phydev)
1703 phy_disconnect(priv->phydev); 1676 phy_disconnect(priv->phydev);
1704 1677phy_error:
1705 clk_disable_unprepare(priv->stmmac_clk); 1678 clk_disable_unprepare(priv->stmmac_clk);
1706 1679
1707 return ret; 1680 return ret;
@@ -1796,16 +1769,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1796 1769
1797 entry = priv->cur_tx % txsize; 1770 entry = priv->cur_tx % txsize;
1798 1771
1799#ifdef STMMAC_XMIT_DEBUG
1800 if ((skb->len > ETH_FRAME_LEN) || nfrags)
1801 pr_debug("%s: [entry %d]: skb addr %p len: %d nopagedlen: %d\n"
1802 "\tn_frags: %d - ip_summed: %d - %s gso\n"
1803 "\ttx_count_frames %d\n", __func__, entry,
1804 skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
1805 !skb_is_gso(skb) ? "isn't" : "is",
1806 priv->tx_count_frames);
1807#endif
1808
1809 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1772 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1810 1773
1811 if (priv->extend_desc) 1774 if (priv->extend_desc)
@@ -1815,12 +1778,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1815 1778
1816 first = desc; 1779 first = desc;
1817 1780
1818#ifdef STMMAC_XMIT_DEBUG
1819 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1820 pr_debug("\tskb len: %d, nopaged_len: %d,\n"
1821 "\t\tn_frags: %d, ip_summed: %d\n",
1822 skb->len, nopaged_len, nfrags, skb->ip_summed);
1823#endif
1824 priv->tx_skbuff[entry] = skb; 1781 priv->tx_skbuff[entry] = skb;
1825 1782
1826 /* To program the descriptors according to the size of the frame */ 1783 /* To program the descriptors according to the size of the frame */
@@ -1856,7 +1813,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1856 else 1813 else
1857 desc = priv->dma_tx + entry; 1814 desc = priv->dma_tx + entry;
1858 1815
1859 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1860 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1816 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1861 DMA_TO_DEVICE); 1817 DMA_TO_DEVICE);
1862 priv->tx_skbuff_dma[entry] = desc->des2; 1818 priv->tx_skbuff_dma[entry] = desc->des2;
@@ -1880,8 +1836,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1880 if (priv->tx_coal_frames > priv->tx_count_frames) { 1836 if (priv->tx_coal_frames > priv->tx_count_frames) {
1881 priv->hw->desc->clear_tx_ic(desc); 1837 priv->hw->desc->clear_tx_ic(desc);
1882 priv->xstats.tx_reset_ic_bit++; 1838 priv->xstats.tx_reset_ic_bit++;
1883 TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
1884 priv->tx_count_frames);
1885 mod_timer(&priv->txtimer, 1839 mod_timer(&priv->txtimer,
1886 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 1840 STMMAC_COAL_TIMER(priv->tx_coal_timer));
1887 } else 1841 } else
@@ -1893,22 +1847,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1893 1847
1894 priv->cur_tx++; 1848 priv->cur_tx++;
1895 1849
1896#ifdef STMMAC_XMIT_DEBUG
1897 if (netif_msg_pktdata(priv)) { 1850 if (netif_msg_pktdata(priv)) {
1898 pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", 1851 pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
1899 __func__, (priv->cur_tx % txsize), 1852 __func__, (priv->cur_tx % txsize),
1900 (priv->dirty_tx % txsize), entry, first, nfrags); 1853 (priv->dirty_tx % txsize), entry, first, nfrags);
1854
1901 if (priv->extend_desc) 1855 if (priv->extend_desc)
1902 stmmac_display_ring((void *)priv->dma_etx, txsize, 1); 1856 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1903 else 1857 else
1904 stmmac_display_ring((void *)priv->dma_tx, txsize, 0); 1858 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1905 1859
1906 pr_info(">>> frame to be transmitted: "); 1860 pr_debug(">>> frame to be transmitted: ");
1907 print_pkt(skb->data, skb->len); 1861 print_pkt(skb->data, skb->len);
1908 } 1862 }
1909#endif
1910 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { 1863 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1911 TX_DBG("%s: stop transmitted packets\n", __func__); 1864 if (netif_msg_hw(priv))
1865 pr_debug("%s: stop transmitted packets\n", __func__);
1912 netif_stop_queue(dev); 1866 netif_stop_queue(dev);
1913 } 1867 }
1914 1868
@@ -1968,7 +1922,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1968 1922
1969 priv->hw->ring->refill_desc3(priv, p); 1923 priv->hw->ring->refill_desc3(priv, p);
1970 1924
1971 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1925 if (netif_msg_rx_status(priv))
1926 pr_debug("\trefill entry #%d\n", entry);
1972 } 1927 }
1973 wmb(); 1928 wmb();
1974 priv->hw->desc->set_rx_owner(p); 1929 priv->hw->desc->set_rx_owner(p);
@@ -1991,15 +1946,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1991 unsigned int count = 0; 1946 unsigned int count = 0;
1992 int coe = priv->plat->rx_coe; 1947 int coe = priv->plat->rx_coe;
1993 1948
1994#ifdef STMMAC_RX_DEBUG 1949 if (netif_msg_rx_status(priv)) {
1995 if (netif_msg_hw(priv)) { 1950 pr_debug("%s: descriptor ring:\n", __func__);
1996 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1997 if (priv->extend_desc) 1951 if (priv->extend_desc)
1998 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); 1952 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
1999 else 1953 else
2000 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); 1954 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
2001 } 1955 }
2002#endif
2003 while (count < limit) { 1956 while (count < limit) {
2004 int status; 1957 int status;
2005 struct dma_desc *p; 1958 struct dma_desc *p;
@@ -2053,15 +2006,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2053 */ 2006 */
2054 if (unlikely(status != llc_snap)) 2007 if (unlikely(status != llc_snap))
2055 frame_len -= ETH_FCS_LEN; 2008 frame_len -= ETH_FCS_LEN;
2056#ifdef STMMAC_RX_DEBUG
2057 if (frame_len > ETH_FRAME_LEN)
2058 pr_debug("\tRX frame size %d, COE status: %d\n",
2059 frame_len, status);
2060 2009
2061 if (netif_msg_hw(priv)) 2010 if (netif_msg_rx_status(priv)) {
2062 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2011 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2063 p, entry, p->des2); 2012 p, entry, p->des2);
2064#endif 2013 if (frame_len > ETH_FRAME_LEN)
2014 pr_debug("\tframe size %d, COE: %d\n",
2015 frame_len, status);
2016 }
2065 skb = priv->rx_skbuff[entry]; 2017 skb = priv->rx_skbuff[entry];
2066 if (unlikely(!skb)) { 2018 if (unlikely(!skb)) {
2067 pr_err("%s: Inconsistent Rx descriptor chain\n", 2019 pr_err("%s: Inconsistent Rx descriptor chain\n",
@@ -2078,12 +2030,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2078 dma_unmap_single(priv->device, 2030 dma_unmap_single(priv->device,
2079 priv->rx_skbuff_dma[entry], 2031 priv->rx_skbuff_dma[entry],
2080 priv->dma_buf_sz, DMA_FROM_DEVICE); 2032 priv->dma_buf_sz, DMA_FROM_DEVICE);
2081#ifdef STMMAC_RX_DEBUG 2033
2082 if (netif_msg_pktdata(priv)) { 2034 if (netif_msg_pktdata(priv)) {
2083 pr_info(" frame received (%dbytes)", frame_len); 2035 pr_debug("frame received (%dbytes)", frame_len);
2084 print_pkt(skb->data, frame_len); 2036 print_pkt(skb->data, frame_len);
2085 } 2037 }
2086#endif 2038
2087 skb->protocol = eth_type_trans(skb, priv->dev); 2039 skb->protocol = eth_type_trans(skb, priv->dev);
2088 2040
2089 if (unlikely(!coe)) 2041 if (unlikely(!coe))
@@ -2562,9 +2514,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2562 /* Get and dump the chip ID */ 2514 /* Get and dump the chip ID */
2563 priv->synopsys_id = stmmac_get_synopsys_id(priv); 2515 priv->synopsys_id = stmmac_get_synopsys_id(priv);
2564 2516
2565 /* To use alternate (extended) or normal descriptor structures */
2566 stmmac_selec_desc_mode(priv);
2567
2568 /* To use the chained or ring mode */ 2517 /* To use the chained or ring mode */
2569 if (chain_mode) { 2518 if (chain_mode) {
2570 priv->hw->chain = &chain_mode_ops; 2519 priv->hw->chain = &chain_mode_ops;
@@ -2599,6 +2548,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2599 } else 2548 } else
2600 pr_info(" No HW DMA feature register supported"); 2549 pr_info(" No HW DMA feature register supported");
2601 2550
2551 /* To use alternate (extended) or normal descriptor structures */
2552 stmmac_selec_desc_mode(priv);
2553
2602 ret = priv->hw->mac->rx_ipc(priv->ioaddr); 2554 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
2603 if (!ret) { 2555 if (!ret) {
2604 pr_warn(" RX IPC Checksum Offload not configured.\n"); 2556 pr_warn(" RX IPC Checksum Offload not configured.\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index cc15039eaa47..fe7bc9903867 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -27,6 +27,9 @@
27#include <linux/mii.h> 27#include <linux/mii.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/of.h>
31#include <linux/of_gpio.h>
32
30#include <asm/io.h> 33#include <asm/io.h>
31 34
32#include "stmmac.h" 35#include "stmmac.h"
@@ -131,10 +134,46 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
131 struct net_device *ndev = bus->priv; 134 struct net_device *ndev = bus->priv;
132 struct stmmac_priv *priv = netdev_priv(ndev); 135 struct stmmac_priv *priv = netdev_priv(ndev);
133 unsigned int mii_address = priv->hw->mii.addr; 136 unsigned int mii_address = priv->hw->mii.addr;
137 struct stmmac_mdio_bus_data *data = priv->plat->mdio_bus_data;
138
139#ifdef CONFIG_OF
140 if (priv->device->of_node) {
141 int reset_gpio, active_low;
142
143 if (data->reset_gpio < 0) {
144 struct device_node *np = priv->device->of_node;
145 if (!np)
146 return 0;
147
148 data->reset_gpio = of_get_named_gpio(np,
149 "snps,reset-gpio", 0);
150 if (data->reset_gpio < 0)
151 return 0;
152
153 data->active_low = of_property_read_bool(np,
154 "snps,reset-active-low");
155 of_property_read_u32_array(np,
156 "snps,reset-delays-us", data->delays, 3);
157 }
158
159 reset_gpio = data->reset_gpio;
160 active_low = data->active_low;
161
162 if (!gpio_request(reset_gpio, "mdio-reset")) {
163 gpio_direction_output(reset_gpio, active_low ? 1 : 0);
164 udelay(data->delays[0]);
165 gpio_set_value(reset_gpio, active_low ? 0 : 1);
166 udelay(data->delays[1]);
167 gpio_set_value(reset_gpio, active_low ? 1 : 0);
168 udelay(data->delays[2]);
169 gpio_free(reset_gpio);
170 }
171 }
172#endif
134 173
135 if (priv->plat->mdio_bus_data->phy_reset) { 174 if (data->phy_reset) {
136 pr_debug("stmmac_mdio_reset: calling phy_reset\n"); 175 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
137 priv->plat->mdio_bus_data->phy_reset(priv->plat->bsp_priv); 176 data->phy_reset(priv->plat->bsp_priv);
138 } 177 }
139 178
140 /* This is a workaround for problems with the STE101P PHY. 179 /* This is a workaround for problems with the STE101P PHY.
@@ -172,6 +211,11 @@ int stmmac_mdio_register(struct net_device *ndev)
172 else 211 else
173 irqlist = priv->mii_irq; 212 irqlist = priv->mii_irq;
174 213
214#ifdef CONFIG_OF
215 if (priv->device->of_node)
216 mdio_bus_data->reset_gpio = -1;
217#endif
218
175 new_bus->name = "stmmac"; 219 new_bus->name = "stmmac";
176 new_bus->read = &stmmac_mdio_read; 220 new_bus->read = &stmmac_mdio_read;
177 new_bus->write = &stmmac_mdio_write; 221 new_bus->write = &stmmac_mdio_write;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1d3780f55ba2..03de76c7a177 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -34,12 +34,20 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
34 const char **mac) 34 const char **mac)
35{ 35{
36 struct device_node *np = pdev->dev.of_node; 36 struct device_node *np = pdev->dev.of_node;
37 struct stmmac_dma_cfg *dma_cfg;
37 38
38 if (!np) 39 if (!np)
39 return -ENODEV; 40 return -ENODEV;
40 41
41 *mac = of_get_mac_address(np); 42 *mac = of_get_mac_address(np);
42 plat->interface = of_get_phy_mode(np); 43 plat->interface = of_get_phy_mode(np);
44
45 plat->bus_id = of_alias_get_id(np, "ethernet");
46 if (plat->bus_id < 0)
47 plat->bus_id = 0;
48
49 of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr);
50
43 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 51 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
44 sizeof(struct stmmac_mdio_bus_data), 52 sizeof(struct stmmac_mdio_bus_data),
45 GFP_KERNEL); 53 GFP_KERNEL);
@@ -56,6 +64,22 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
56 plat->pmt = 1; 64 plat->pmt = 1;
57 } 65 }
58 66
67 if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
68 of_device_is_compatible(np, "snps,dwmac-3.710")) {
69 plat->enh_desc = 1;
70 plat->bugged_jumbo = 1;
71 plat->force_sf_dma_mode = 1;
72 }
73
74 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
75 if (!dma_cfg)
76 return -ENOMEM;
77
78 plat->dma_cfg = dma_cfg;
79 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
80 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
81 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
82
59 return 0; 83 return 0;
60} 84}
61#else 85#else
@@ -92,8 +116,10 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
92 if (IS_ERR(addr)) 116 if (IS_ERR(addr))
93 return PTR_ERR(addr); 117 return PTR_ERR(addr);
94 118
119 plat_dat = pdev->dev.platform_data;
95 if (pdev->dev.of_node) { 120 if (pdev->dev.of_node) {
96 plat_dat = devm_kzalloc(&pdev->dev, 121 if (!plat_dat)
122 plat_dat = devm_kzalloc(&pdev->dev,
97 sizeof(struct plat_stmmacenet_data), 123 sizeof(struct plat_stmmacenet_data),
98 GFP_KERNEL); 124 GFP_KERNEL);
99 if (!plat_dat) { 125 if (!plat_dat) {
@@ -106,8 +132,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
106 pr_err("%s: main dt probe failed", __func__); 132 pr_err("%s: main dt probe failed", __func__);
107 return ret; 133 return ret;
108 } 134 }
109 } else {
110 plat_dat = pdev->dev.platform_data;
111 } 135 }
112 136
113 /* Custom initialisation (if needed)*/ 137 /* Custom initialisation (if needed)*/
@@ -171,8 +195,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
171 if (priv->plat->exit) 195 if (priv->plat->exit)
172 priv->plat->exit(pdev); 196 priv->plat->exit(pdev);
173 197
174 platform_set_drvdata(pdev, NULL);
175
176 return ret; 198 return ret;
177} 199}
178 200
@@ -230,7 +252,9 @@ static const struct dev_pm_ops stmmac_pltfr_pm_ops;
230 252
231static const struct of_device_id stmmac_dt_ids[] = { 253static const struct of_device_id stmmac_dt_ids[] = {
232 { .compatible = "st,spear600-gmac"}, 254 { .compatible = "st,spear600-gmac"},
255 { .compatible = "snps,dwmac-3.610"},
233 { .compatible = "snps,dwmac-3.70a"}, 256 { .compatible = "snps,dwmac-3.70a"},
257 { .compatible = "snps,dwmac-3.710"},
234 { .compatible = "snps,dwmac"}, 258 { .compatible = "snps,dwmac"},
235 { /* sentinel */ } 259 { /* sentinel */ }
236}; 260};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 4c682a3d0424..759441b29e53 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -808,44 +808,43 @@ static int cas_reset_mii_phy(struct cas *cp)
808 return limit <= 0; 808 return limit <= 0;
809} 809}
810 810
811static int cas_saturn_firmware_init(struct cas *cp) 811static void cas_saturn_firmware_init(struct cas *cp)
812{ 812{
813 const struct firmware *fw; 813 const struct firmware *fw;
814 const char fw_name[] = "sun/cassini.bin"; 814 const char fw_name[] = "sun/cassini.bin";
815 int err; 815 int err;
816 816
817 if (PHY_NS_DP83065 != cp->phy_id) 817 if (PHY_NS_DP83065 != cp->phy_id)
818 return 0; 818 return;
819 819
820 err = request_firmware(&fw, fw_name, &cp->pdev->dev); 820 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
821 if (err) { 821 if (err) {
822 pr_err("Failed to load firmware \"%s\"\n", 822 pr_err("Failed to load firmware \"%s\"\n",
823 fw_name); 823 fw_name);
824 return err; 824 return;
825 } 825 }
826 if (fw->size < 2) { 826 if (fw->size < 2) {
827 pr_err("bogus length %zu in \"%s\"\n", 827 pr_err("bogus length %zu in \"%s\"\n",
828 fw->size, fw_name); 828 fw->size, fw_name);
829 err = -EINVAL;
830 goto out; 829 goto out;
831 } 830 }
832 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; 831 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
833 cp->fw_size = fw->size - 2; 832 cp->fw_size = fw->size - 2;
834 cp->fw_data = vmalloc(cp->fw_size); 833 cp->fw_data = vmalloc(cp->fw_size);
835 if (!cp->fw_data) { 834 if (!cp->fw_data)
836 err = -ENOMEM;
837 goto out; 835 goto out;
838 }
839 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); 836 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
840out: 837out:
841 release_firmware(fw); 838 release_firmware(fw);
842 return err;
843} 839}
844 840
845static void cas_saturn_firmware_load(struct cas *cp) 841static void cas_saturn_firmware_load(struct cas *cp)
846{ 842{
847 int i; 843 int i;
848 844
845 if (!cp->fw_data)
846 return;
847
849 cas_phy_powerdown(cp); 848 cas_phy_powerdown(cp);
850 849
851 /* expanded memory access mode */ 850 /* expanded memory access mode */
@@ -5083,8 +5082,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5083 if (cas_check_invariants(cp)) 5082 if (cas_check_invariants(cp))
5084 goto err_out_iounmap; 5083 goto err_out_iounmap;
5085 if (cp->cas_flags & CAS_FLAG_SATURN) 5084 if (cp->cas_flags & CAS_FLAG_SATURN)
5086 if (cas_saturn_firmware_init(cp)) 5085 cas_saturn_firmware_init(cp);
5087 goto err_out_iounmap;
5088 5086
5089 cp->init_block = (struct cas_init_block *) 5087 cp->init_block = (struct cas_init_block *)
5090 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5088 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 95cff98d8a34..fa322409bff3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -10108,7 +10108,7 @@ static int niu_of_probe(struct platform_device *op)
10108 goto err_out_iounmap; 10108 goto err_out_iounmap;
10109 } 10109 }
10110 10110
10111 dev_set_drvdata(&op->dev, dev); 10111 platform_set_drvdata(op, dev);
10112 10112
10113 niu_device_announce(np); 10113 niu_device_announce(np);
10114 10114
@@ -10145,7 +10145,7 @@ err_out:
10145 10145
10146static int niu_of_remove(struct platform_device *op) 10146static int niu_of_remove(struct platform_device *op)
10147{ 10147{
10148 struct net_device *dev = dev_get_drvdata(&op->dev); 10148 struct net_device *dev = platform_get_drvdata(op);
10149 10149
10150 if (dev) { 10150 if (dev) {
10151 struct niu *np = netdev_priv(dev); 10151 struct niu *np = netdev_priv(dev);
@@ -10175,7 +10175,6 @@ static int niu_of_remove(struct platform_device *op)
10175 niu_put_parent(np); 10175 niu_put_parent(np);
10176 10176
10177 free_netdev(dev); 10177 free_netdev(dev);
10178 dev_set_drvdata(&op->dev, NULL);
10179 } 10178 }
10180 return 0; 10179 return 0;
10181} 10180}
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 054975939a18..0d43fa9ff980 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -995,7 +995,6 @@ static void bigmac_set_multicast(struct net_device *dev)
995 struct bigmac *bp = netdev_priv(dev); 995 struct bigmac *bp = netdev_priv(dev);
996 void __iomem *bregs = bp->bregs; 996 void __iomem *bregs = bp->bregs;
997 struct netdev_hw_addr *ha; 997 struct netdev_hw_addr *ha;
998 int i;
999 u32 tmp, crc; 998 u32 tmp, crc;
1000 999
1001 /* Disable the receiver. The bit self-clears when 1000 /* Disable the receiver. The bit self-clears when
@@ -1017,10 +1016,7 @@ static void bigmac_set_multicast(struct net_device *dev)
1017 tmp |= BIGMAC_RXCFG_PMISC; 1016 tmp |= BIGMAC_RXCFG_PMISC;
1018 sbus_writel(tmp, bregs + BMAC_RXCFG); 1017 sbus_writel(tmp, bregs + BMAC_RXCFG);
1019 } else { 1018 } else {
1020 u16 hash_table[4]; 1019 u16 hash_table[4] = { 0 };
1021
1022 for (i = 0; i < 4; i++)
1023 hash_table[i] = 0;
1024 1020
1025 netdev_for_each_mc_addr(ha, dev) { 1021 netdev_for_each_mc_addr(ha, dev) {
1026 crc = ether_crc_le(6, ha->addr); 1022 crc = ether_crc_le(6, ha->addr);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 5f3f9d52757d..e62df2b81302 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -3028,15 +3028,4 @@ static struct pci_driver gem_driver = {
3028#endif /* CONFIG_PM */ 3028#endif /* CONFIG_PM */
3029}; 3029};
3030 3030
3031static int __init gem_init(void) 3031module_pci_driver(gem_driver);
3032{
3033 return pci_register_driver(&gem_driver);
3034}
3035
3036static void __exit gem_cleanup(void)
3037{
3038 pci_unregister_driver(&gem_driver);
3039}
3040
3041module_init(gem_init);
3042module_exit(gem_cleanup);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 436fa9d5a071..171f5b0809c4 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2506,7 +2506,7 @@ static struct quattro *quattro_sbus_find(struct platform_device *child)
2506 struct quattro *qp; 2506 struct quattro *qp;
2507 2507
2508 op = to_platform_device(parent); 2508 op = to_platform_device(parent);
2509 qp = dev_get_drvdata(&op->dev); 2509 qp = platform_get_drvdata(op);
2510 if (qp) 2510 if (qp)
2511 return qp; 2511 return qp;
2512 2512
@@ -2521,7 +2521,7 @@ static struct quattro *quattro_sbus_find(struct platform_device *child)
2521 qp->next = qfe_sbus_list; 2521 qp->next = qfe_sbus_list;
2522 qfe_sbus_list = qp; 2522 qfe_sbus_list = qp;
2523 2523
2524 dev_set_drvdata(&op->dev, qp); 2524 platform_set_drvdata(op, qp);
2525 } 2525 }
2526 return qp; 2526 return qp;
2527} 2527}
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 8182591bc187..b072f4dba033 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -767,7 +767,7 @@ static struct sunqec *get_qec(struct platform_device *child)
767 struct platform_device *op = to_platform_device(child->dev.parent); 767 struct platform_device *op = to_platform_device(child->dev.parent);
768 struct sunqec *qecp; 768 struct sunqec *qecp;
769 769
770 qecp = dev_get_drvdata(&op->dev); 770 qecp = platform_get_drvdata(op);
771 if (!qecp) { 771 if (!qecp) {
772 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); 772 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
773 if (qecp) { 773 if (qecp) {
@@ -801,7 +801,7 @@ static struct sunqec *get_qec(struct platform_device *child)
801 goto fail; 801 goto fail;
802 } 802 }
803 803
804 dev_set_drvdata(&op->dev, qecp); 804 platform_set_drvdata(op, qecp);
805 805
806 qecp->next_module = root_qec_dev; 806 qecp->next_module = root_qec_dev;
807 root_qec_dev = qecp; 807 root_qec_dev = qecp;
@@ -902,7 +902,7 @@ static int qec_ether_init(struct platform_device *op)
902 if (res) 902 if (res)
903 goto fail; 903 goto fail;
904 904
905 dev_set_drvdata(&op->dev, qe); 905 platform_set_drvdata(op, qe);
906 906
907 printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, 907 printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
908 dev->dev_addr); 908 dev->dev_addr);
@@ -934,7 +934,7 @@ static int qec_sbus_probe(struct platform_device *op)
934 934
935static int qec_sbus_remove(struct platform_device *op) 935static int qec_sbus_remove(struct platform_device *op)
936{ 936{
937 struct sunqe *qp = dev_get_drvdata(&op->dev); 937 struct sunqe *qp = platform_get_drvdata(op);
938 struct net_device *net_dev = qp->dev; 938 struct net_device *net_dev = qp->dev;
939 939
940 unregister_netdev(net_dev); 940 unregister_netdev(net_dev);
@@ -948,8 +948,6 @@ static int qec_sbus_remove(struct platform_device *op)
948 948
949 free_netdev(net_dev); 949 free_netdev(net_dev);
950 950
951 dev_set_drvdata(&op->dev, NULL);
952
953 return 0; 951 return 0;
954} 952}
955 953
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1df0ff3839e8..3df56840a3b9 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1239,6 +1239,8 @@ static int vnet_port_remove(struct vio_dev *vdev)
1239 dev_set_drvdata(&vdev->dev, NULL); 1239 dev_set_drvdata(&vdev->dev, NULL);
1240 1240
1241 kfree(port); 1241 kfree(port);
1242
1243 unregister_netdev(vp->dev);
1242 } 1244 }
1243 return 0; 1245 return 0;
1244} 1246}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index d1a769f35f9d..05a1674e204f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -35,6 +35,7 @@
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36 36
37#include <linux/platform_data/cpsw.h> 37#include <linux/platform_data/cpsw.h>
38#include <linux/pinctrl/consumer.h>
38 39
39#include "cpsw_ale.h" 40#include "cpsw_ale.h"
40#include "cpts.h" 41#include "cpts.h"
@@ -1554,6 +1555,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1554 if (mac_addr) 1555 if (mac_addr)
1555 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 1556 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
1556 1557
1558 slave_data->phy_if = of_get_phy_mode(slave_node);
1559
1557 if (data->dual_emac) { 1560 if (data->dual_emac) {
1558 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 1561 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
1559 &prop)) { 1562 &prop)) {
@@ -1689,6 +1692,9 @@ static int cpsw_probe(struct platform_device *pdev)
1689 */ 1692 */
1690 pm_runtime_enable(&pdev->dev); 1693 pm_runtime_enable(&pdev->dev);
1691 1694
1695 /* Select default pin state */
1696 pinctrl_pm_select_default_state(&pdev->dev);
1697
1692 if (cpsw_probe_dt(&priv->data, pdev)) { 1698 if (cpsw_probe_dt(&priv->data, pdev)) {
1693 pr_err("cpsw: platform data missing\n"); 1699 pr_err("cpsw: platform data missing\n");
1694 ret = -ENODEV; 1700 ret = -ENODEV;
@@ -1698,10 +1704,10 @@ static int cpsw_probe(struct platform_device *pdev)
1698 1704
1699 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 1705 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
1700 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 1706 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
1701 pr_info("Detected MACID = %pM", priv->mac_addr); 1707 pr_info("Detected MACID = %pM\n", priv->mac_addr);
1702 } else { 1708 } else {
1703 eth_random_addr(priv->mac_addr); 1709 eth_random_addr(priv->mac_addr);
1704 pr_info("Random MACID = %pM", priv->mac_addr); 1710 pr_info("Random MACID = %pM\n", priv->mac_addr);
1705 } 1711 }
1706 1712
1707 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 1713 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -1940,7 +1946,6 @@ static int cpsw_remove(struct platform_device *pdev)
1940 struct cpsw_priv *priv = netdev_priv(ndev); 1946 struct cpsw_priv *priv = netdev_priv(ndev);
1941 int i; 1947 int i;
1942 1948
1943 platform_set_drvdata(pdev, NULL);
1944 if (priv->data.dual_emac) 1949 if (priv->data.dual_emac)
1945 unregister_netdev(cpsw_get_slave_ndev(priv, 1)); 1950 unregister_netdev(cpsw_get_slave_ndev(priv, 1));
1946 unregister_netdev(ndev); 1951 unregister_netdev(ndev);
@@ -1981,6 +1986,9 @@ static int cpsw_suspend(struct device *dev)
1981 soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); 1986 soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
1982 pm_runtime_put_sync(&pdev->dev); 1987 pm_runtime_put_sync(&pdev->dev);
1983 1988
1989 /* Select sleep pin state */
1990 pinctrl_pm_select_sleep_state(&pdev->dev);
1991
1984 return 0; 1992 return 0;
1985} 1993}
1986 1994
@@ -1990,6 +1998,10 @@ static int cpsw_resume(struct device *dev)
1990 struct net_device *ndev = platform_get_drvdata(pdev); 1998 struct net_device *ndev = platform_get_drvdata(pdev);
1991 1999
1992 pm_runtime_get_sync(&pdev->dev); 2000 pm_runtime_get_sync(&pdev->dev);
2001
2002 /* Select default pin state */
2003 pinctrl_pm_select_default_state(&pdev->dev);
2004
1993 if (netif_running(ndev)) 2005 if (netif_running(ndev))
1994 cpsw_ndo_open(ndev); 2006 cpsw_ndo_open(ndev);
1995 return 0; 2007 return 0;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 053c84fd0853..031ebc81b50c 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -64,6 +64,7 @@
64#define CPDMA_DESC_TO_PORT_EN BIT(20) 64#define CPDMA_DESC_TO_PORT_EN BIT(20)
65#define CPDMA_TO_PORT_SHIFT 16 65#define CPDMA_TO_PORT_SHIFT 16
66#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) 66#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
67#define CPDMA_DESC_CRC_LEN 4
67 68
68#define CPDMA_TEARDOWN_VALUE 0xfffffffc 69#define CPDMA_TEARDOWN_VALUE 0xfffffffc
69 70
@@ -805,6 +806,10 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
805 status = -EBUSY; 806 status = -EBUSY;
806 goto unlock_ret; 807 goto unlock_ret;
807 } 808 }
809
810 if (status & CPDMA_DESC_PASS_CRC)
811 outlen -= CPDMA_DESC_CRC_LEN;
812
808 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | 813 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
809 CPDMA_DESC_PORT_MASK); 814 CPDMA_DESC_PORT_MASK);
810 815
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 860e15ddfbcb..07b176bcf929 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1532,7 +1532,7 @@ static int emac_dev_open(struct net_device *ndev)
1532 struct device *emac_dev = &ndev->dev; 1532 struct device *emac_dev = &ndev->dev;
1533 u32 cnt; 1533 u32 cnt;
1534 struct resource *res; 1534 struct resource *res;
1535 int q, m, ret; 1535 int ret;
1536 int i = 0; 1536 int i = 0;
1537 int k = 0; 1537 int k = 0;
1538 struct emac_priv *priv = netdev_priv(ndev); 1538 struct emac_priv *priv = netdev_priv(ndev);
@@ -1567,8 +1567,9 @@ static int emac_dev_open(struct net_device *ndev)
1567 1567
1568 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 1568 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1569 for (i = res->start; i <= res->end; i++) { 1569 for (i = res->start; i <= res->end; i++) {
1570 if (request_irq(i, emac_irq, IRQF_DISABLED, 1570 if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
1571 ndev->name, ndev)) 1571 IRQF_DISABLED,
1572 ndev->name, ndev))
1572 goto rollback; 1573 goto rollback;
1573 } 1574 }
1574 k++; 1575 k++;
@@ -1641,15 +1642,7 @@ static int emac_dev_open(struct net_device *ndev)
1641 1642
1642rollback: 1643rollback:
1643 1644
1644 dev_err(emac_dev, "DaVinci EMAC: request_irq() failed"); 1645 dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
1645
1646 for (q = k; k >= 0; k--) {
1647 for (m = i; m >= res->start; m--)
1648 free_irq(m, ndev);
1649 res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
1650 m = res->end;
1651 }
1652
1653 ret = -EBUSY; 1646 ret = -EBUSY;
1654err: 1647err:
1655 pm_runtime_put(&priv->pdev->dev); 1648 pm_runtime_put(&priv->pdev->dev);
@@ -1667,9 +1660,6 @@ err:
1667 */ 1660 */
1668static int emac_dev_stop(struct net_device *ndev) 1661static int emac_dev_stop(struct net_device *ndev)
1669{ 1662{
1670 struct resource *res;
1671 int i = 0;
1672 int irq_num;
1673 struct emac_priv *priv = netdev_priv(ndev); 1663 struct emac_priv *priv = netdev_priv(ndev);
1674 struct device *emac_dev = &ndev->dev; 1664 struct device *emac_dev = &ndev->dev;
1675 1665
@@ -1685,13 +1675,6 @@ static int emac_dev_stop(struct net_device *ndev)
1685 if (priv->phydev) 1675 if (priv->phydev)
1686 phy_disconnect(priv->phydev); 1676 phy_disconnect(priv->phydev);
1687 1677
1688 /* Free IRQ */
1689 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
1690 for (irq_num = res->start; irq_num <= res->end; irq_num++)
1691 free_irq(irq_num, priv->ndev);
1692 i++;
1693 }
1694
1695 if (netif_msg_drv(priv)) 1678 if (netif_msg_drv(priv))
1696 dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); 1679 dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
1697 1680
@@ -1771,29 +1754,22 @@ static const struct net_device_ops emac_netdev_ops = {
1771#endif 1754#endif
1772}; 1755};
1773 1756
1774#ifdef CONFIG_OF 1757static struct emac_platform_data *
1775static struct emac_platform_data 1758davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1776 *davinci_emac_of_get_pdata(struct platform_device *pdev,
1777 struct emac_priv *priv)
1778{ 1759{
1779 struct device_node *np; 1760 struct device_node *np;
1780 struct emac_platform_data *pdata = NULL; 1761 struct emac_platform_data *pdata = NULL;
1781 const u8 *mac_addr; 1762 const u8 *mac_addr;
1782 u32 data;
1783 int ret;
1784 1763
1785 pdata = pdev->dev.platform_data; 1764 if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
1786 if (!pdata) { 1765 return pdev->dev.platform_data;
1787 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1766
1788 if (!pdata) 1767 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1789 goto nodata; 1768 if (!pdata)
1790 } 1769 return NULL;
1791 1770
1792 np = pdev->dev.of_node; 1771 np = pdev->dev.of_node;
1793 if (!np) 1772 pdata->version = EMAC_VERSION_2;
1794 goto nodata;
1795 else
1796 pdata->version = EMAC_VERSION_2;
1797 1773
1798 if (!is_valid_ether_addr(pdata->mac_addr)) { 1774 if (!is_valid_ether_addr(pdata->mac_addr)) {
1799 mac_addr = of_get_mac_address(np); 1775 mac_addr = of_get_mac_address(np);
@@ -1801,47 +1777,31 @@ static struct emac_platform_data
1801 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); 1777 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
1802 } 1778 }
1803 1779
1804 ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data); 1780 of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
1805 if (!ret) 1781 &pdata->ctrl_reg_offset);
1806 pdata->ctrl_reg_offset = data;
1807 1782
1808 ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset", 1783 of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
1809 &data); 1784 &pdata->ctrl_mod_reg_offset);
1810 if (!ret)
1811 pdata->ctrl_mod_reg_offset = data;
1812 1785
1813 ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data); 1786 of_property_read_u32(np, "ti,davinci-ctrl-ram-offset",
1814 if (!ret) 1787 &pdata->ctrl_ram_offset);
1815 pdata->ctrl_ram_offset = data;
1816 1788
1817 ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data); 1789 of_property_read_u32(np, "ti,davinci-ctrl-ram-size",
1818 if (!ret) 1790 &pdata->ctrl_ram_size);
1819 pdata->ctrl_ram_size = data;
1820 1791
1821 ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data); 1792 of_property_read_u8(np, "ti,davinci-rmii-en", &pdata->rmii_en);
1822 if (!ret)
1823 pdata->rmii_en = data;
1824 1793
1825 ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data); 1794 pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram");
1826 if (!ret)
1827 pdata->no_bd_ram = data;
1828 1795
1829 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 1796 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
1830 if (!priv->phy_node) 1797 if (!priv->phy_node)
1831 pdata->phy_id = ""; 1798 pdata->phy_id = "";
1832 1799
1833 pdev->dev.platform_data = pdata; 1800 pdev->dev.platform_data = pdata;
1834nodata: 1801
1835 return pdata; 1802 return pdata;
1836} 1803}
1837#else 1804
1838static struct emac_platform_data
1839 *davinci_emac_of_get_pdata(struct platform_device *pdev,
1840 struct emac_priv *priv)
1841{
1842 return pdev->dev.platform_data;
1843}
1844#endif
1845/** 1805/**
1846 * davinci_emac_probe - EMAC device probe 1806 * davinci_emac_probe - EMAC device probe
1847 * @pdev: The DaVinci EMAC device that we are removing 1807 * @pdev: The DaVinci EMAC device that we are removing
@@ -1856,7 +1816,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1856 struct resource *res; 1816 struct resource *res;
1857 struct net_device *ndev; 1817 struct net_device *ndev;
1858 struct emac_priv *priv; 1818 struct emac_priv *priv;
1859 unsigned long size, hw_ram_addr; 1819 unsigned long hw_ram_addr;
1860 struct emac_platform_data *pdata; 1820 struct emac_platform_data *pdata;
1861 struct device *emac_dev; 1821 struct device *emac_dev;
1862 struct cpdma_params dma_params; 1822 struct cpdma_params dma_params;
@@ -1907,25 +1867,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
1907 emac_dev = &ndev->dev; 1867 emac_dev = &ndev->dev;
1908 /* Get EMAC platform data */ 1868 /* Get EMAC platform data */
1909 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1869 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1910 if (!res) {
1911 dev_err(&pdev->dev,"error getting res\n");
1912 rc = -ENOENT;
1913 goto no_pdata;
1914 }
1915
1916 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; 1870 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
1917 size = resource_size(res); 1871 priv->remap_addr = devm_ioremap_resource(&pdev->dev, res);
1918 if (!devm_request_mem_region(&pdev->dev, res->start, 1872 if (IS_ERR(priv->remap_addr)) {
1919 size, ndev->name)) { 1873 rc = PTR_ERR(priv->remap_addr);
1920 dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
1921 rc = -ENXIO;
1922 goto no_pdata;
1923 }
1924
1925 priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size);
1926 if (!priv->remap_addr) {
1927 dev_err(&pdev->dev, "unable to map IO\n");
1928 rc = -ENOMEM;
1929 goto no_pdata; 1874 goto no_pdata;
1930 } 1875 }
1931 priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; 1876 priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
@@ -2037,8 +1982,6 @@ static int davinci_emac_remove(struct platform_device *pdev)
2037 1982
2038 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); 1983 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
2039 1984
2040 platform_set_drvdata(pdev, NULL);
2041
2042 if (priv->txchan) 1985 if (priv->txchan)
2043 cpdma_chan_destroy(priv->txchan); 1986 cpdma_chan_destroy(priv->txchan);
2044 if (priv->rxchan) 1987 if (priv->rxchan)
@@ -2078,11 +2021,13 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
2078 .resume = davinci_emac_resume, 2021 .resume = davinci_emac_resume,
2079}; 2022};
2080 2023
2024#if IS_ENABLED(CONFIG_OF)
2081static const struct of_device_id davinci_emac_of_match[] = { 2025static const struct of_device_id davinci_emac_of_match[] = {
2082 {.compatible = "ti,davinci-dm6467-emac", }, 2026 {.compatible = "ti,davinci-dm6467-emac", },
2083 {}, 2027 {},
2084}; 2028};
2085MODULE_DEVICE_TABLE(of, davinci_emac_of_match); 2029MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
2030#endif
2086 2031
2087/* davinci_emac_driver: EMAC platform driver structure */ 2032/* davinci_emac_driver: EMAC platform driver structure */
2088static struct platform_driver davinci_emac_driver = { 2033static struct platform_driver davinci_emac_driver = {
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index c47f0dbcebb5..16ddfc348062 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -38,6 +38,7 @@
38#include <linux/davinci_emac.h> 38#include <linux/davinci_emac.h>
39#include <linux/of.h> 39#include <linux/of.h>
40#include <linux/of_device.h> 40#include <linux/of_device.h>
41#include <linux/pinctrl/consumer.h>
41 42
42/* 43/*
43 * This timeout definition is a worst-case ultra defensive measure against 44 * This timeout definition is a worst-case ultra defensive measure against
@@ -291,6 +292,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
291 return 0; 292 return 0;
292} 293}
293 294
295#if IS_ENABLED(CONFIG_OF)
294static int davinci_mdio_probe_dt(struct mdio_platform_data *data, 296static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
295 struct platform_device *pdev) 297 struct platform_device *pdev)
296{ 298{
@@ -308,7 +310,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
308 310
309 return 0; 311 return 0;
310} 312}
311 313#endif
312 314
313static int davinci_mdio_probe(struct platform_device *pdev) 315static int davinci_mdio_probe(struct platform_device *pdev)
314{ 316{
@@ -347,6 +349,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
347 data->bus->parent = dev; 349 data->bus->parent = dev;
348 data->bus->priv = data; 350 data->bus->priv = data;
349 351
352 /* Select default pin state */
353 pinctrl_pm_select_default_state(&pdev->dev);
354
350 pm_runtime_enable(&pdev->dev); 355 pm_runtime_enable(&pdev->dev);
351 pm_runtime_get_sync(&pdev->dev); 356 pm_runtime_get_sync(&pdev->dev);
352 data->clk = clk_get(&pdev->dev, "fck"); 357 data->clk = clk_get(&pdev->dev, "fck");
@@ -453,6 +458,9 @@ static int davinci_mdio_suspend(struct device *dev)
453 spin_unlock(&data->lock); 458 spin_unlock(&data->lock);
454 pm_runtime_put_sync(data->dev); 459 pm_runtime_put_sync(data->dev);
455 460
461 /* Select sleep pin state */
462 pinctrl_pm_select_sleep_state(dev);
463
456 return 0; 464 return 0;
457} 465}
458 466
@@ -460,6 +468,9 @@ static int davinci_mdio_resume(struct device *dev)
460{ 468{
461 struct davinci_mdio_data *data = dev_get_drvdata(dev); 469 struct davinci_mdio_data *data = dev_get_drvdata(dev);
462 470
471 /* Select default pin state */
472 pinctrl_pm_select_default_state(dev);
473
463 pm_runtime_get_sync(data->dev); 474 pm_runtime_get_sync(data->dev);
464 475
465 spin_lock(&data->lock); 476 spin_lock(&data->lock);
@@ -477,11 +488,13 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = {
477 .resume_early = davinci_mdio_resume, 488 .resume_early = davinci_mdio_resume,
478}; 489};
479 490
491#if IS_ENABLED(CONFIG_OF)
480static const struct of_device_id davinci_mdio_of_mtable[] = { 492static const struct of_device_id davinci_mdio_of_mtable[] = {
481 { .compatible = "ti,davinci_mdio", }, 493 { .compatible = "ti,davinci_mdio", },
482 { /* sentinel */ }, 494 { /* sentinel */ },
483}; 495};
484MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); 496MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
497#endif
485 498
486static struct platform_driver davinci_mdio_driver = { 499static struct platform_driver davinci_mdio_driver = {
487 .driver = { 500 .driver = {
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 60c400f6d01f..591437e59b90 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -372,7 +372,7 @@ static int tlan_resume(struct pci_dev *pdev)
372 372
373 pci_set_power_state(pdev, PCI_D0); 373 pci_set_power_state(pdev, PCI_D0);
374 pci_restore_state(pdev); 374 pci_restore_state(pdev);
375 pci_enable_wake(pdev, 0, 0); 375 pci_enable_wake(pdev, PCI_D0, 0);
376 netif_device_attach(dev); 376 netif_device_attach(dev);
377 377
378 if (netif_running(dev)) 378 if (netif_running(dev))
@@ -533,7 +533,6 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
533 /* This is a hack. We need to know which board structure 533 /* This is a hack. We need to know which board structure
534 * is suited for this adapter */ 534 * is suited for this adapter */
535 device_id = inw(ioaddr + EISA_ID2); 535 device_id = inw(ioaddr + EISA_ID2);
536 priv->is_eisa = 1;
537 if (device_id == 0x20F1) { 536 if (device_id == 0x20F1) {
538 priv->adapter = &board_info[13]; /* NetFlex-3/E */ 537 priv->adapter = &board_info[13]; /* NetFlex-3/E */
539 priv->adapter_rev = 23; /* TLAN 2.3 */ 538 priv->adapter_rev = 23; /* TLAN 2.3 */
diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h
index 5fc98a8e4889..2eb33a250788 100644
--- a/drivers/net/ethernet/ti/tlan.h
+++ b/drivers/net/ethernet/ti/tlan.h
@@ -207,7 +207,6 @@ struct tlan_priv {
207 u8 tlan_full_duplex; 207 u8 tlan_full_duplex;
208 spinlock_t lock; 208 spinlock_t lock;
209 u8 link; 209 u8 link;
210 u8 is_eisa;
211 struct work_struct tlan_tqueue; 210 struct work_struct tlan_tqueue;
212 u8 neg_be_verbose; 211 u8 neg_be_verbose;
213}; 212};
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index fe256094db35..a971b9cca564 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -2209,18 +2209,6 @@ MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2209module_param_named(duplex, options.duplex, int, 0); 2209module_param_named(duplex, options.duplex, int, 0);
2210MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); 2210MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2211 2211
2212static int __init tc35815_init_module(void) 2212module_pci_driver(tc35815_pci_driver);
2213{
2214 return pci_register_driver(&tc35815_pci_driver);
2215}
2216
2217static void __exit tc35815_cleanup_module(void)
2218{
2219 pci_unregister_driver(&tc35815_pci_driver);
2220}
2221
2222module_init(tc35815_init_module);
2223module_exit(tc35815_cleanup_module);
2224
2225MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver"); 2213MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
2226MODULE_LICENSE("GPL"); 2214MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 3c69a0460832..01bdc6ca0755 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1682,7 +1682,6 @@ static int tsi108_ether_remove(struct platform_device *pdev)
1682 1682
1683 unregister_netdev(dev); 1683 unregister_netdev(dev);
1684 tsi108_stop_ethernet(dev); 1684 tsi108_stop_ethernet(dev);
1685 platform_set_drvdata(pdev, NULL);
1686 iounmap(priv->regs); 1685 iounmap(priv->regs);
1687 iounmap(priv->phyregs); 1686 iounmap(priv->phyregs);
1688 free_netdev(dev); 1687 free_netdev(dev);
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 68a9ba66feba..8a049a2b4474 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -5,7 +5,6 @@
5config NET_VENDOR_VIA 5config NET_VENDOR_VIA
6 bool "VIA devices" 6 bool "VIA devices"
7 default y 7 default y
8 depends on PCI
9 ---help--- 8 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 9 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 10 and read the Ethernet-HOWTO, available from
@@ -22,7 +21,6 @@ config VIA_RHINE
22 tristate "VIA Rhine support" 21 tristate "VIA Rhine support"
23 depends on PCI 22 depends on PCI
24 select CRC32 23 select CRC32
25 select NET_CORE
26 select MII 24 select MII
27 ---help--- 25 ---help---
28 If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A), 26 If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A),
@@ -45,10 +43,9 @@ config VIA_RHINE_MMIO
45 43
46config VIA_VELOCITY 44config VIA_VELOCITY
47 tristate "VIA Velocity support" 45 tristate "VIA Velocity support"
48 depends on PCI 46 depends on (PCI || USE_OF)
49 select CRC32 47 select CRC32
50 select CRC_CCITT 48 select CRC_CCITT
51 select NET_CORE
52 select MII 49 select MII
53 ---help--- 50 ---help---
54 If you have a VIA "Velocity" based network card say Y here. 51 If you have a VIA "Velocity" based network card say Y here.
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index fb6248956ee2..1d6dc41f755d 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -46,6 +46,7 @@
46#include <linux/types.h> 46#include <linux/types.h>
47#include <linux/bitops.h> 47#include <linux/bitops.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/dma-mapping.h>
49#include <linux/mm.h> 50#include <linux/mm.h>
50#include <linux/errno.h> 51#include <linux/errno.h>
51#include <linux/ioport.h> 52#include <linux/ioport.h>
@@ -64,7 +65,11 @@
64#include <linux/if.h> 65#include <linux/if.h>
65#include <linux/uaccess.h> 66#include <linux/uaccess.h>
66#include <linux/proc_fs.h> 67#include <linux/proc_fs.h>
68#include <linux/of_address.h>
69#include <linux/of_device.h>
70#include <linux/of_irq.h>
67#include <linux/inetdevice.h> 71#include <linux/inetdevice.h>
72#include <linux/platform_device.h>
68#include <linux/reboot.h> 73#include <linux/reboot.h>
69#include <linux/ethtool.h> 74#include <linux/ethtool.h>
70#include <linux/mii.h> 75#include <linux/mii.h>
@@ -79,10 +84,24 @@
79 84
80#include "via-velocity.h" 85#include "via-velocity.h"
81 86
87enum velocity_bus_type {
88 BUS_PCI,
89 BUS_PLATFORM,
90};
82 91
83static int velocity_nics; 92static int velocity_nics;
84static int msglevel = MSG_LEVEL_INFO; 93static int msglevel = MSG_LEVEL_INFO;
85 94
95static void velocity_set_power_state(struct velocity_info *vptr, char state)
96{
97 void *addr = vptr->mac_regs;
98
99 if (vptr->pdev)
100 pci_set_power_state(vptr->pdev, state);
101 else
102 writeb(state, addr + 0x154);
103}
104
86/** 105/**
87 * mac_get_cam_mask - Read a CAM mask 106 * mac_get_cam_mask - Read a CAM mask
88 * @regs: register block for this velocity 107 * @regs: register block for this velocity
@@ -361,12 +380,23 @@ static struct velocity_info_tbl chip_info_table[] = {
361 * Describe the PCI device identifiers that we support in this 380 * Describe the PCI device identifiers that we support in this
362 * device driver. Used for hotplug autoloading. 381 * device driver. Used for hotplug autoloading.
363 */ 382 */
364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = { 383
384static DEFINE_PCI_DEVICE_TABLE(velocity_pci_id_table) = {
365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, 385 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
366 { } 386 { }
367}; 387};
368 388
369MODULE_DEVICE_TABLE(pci, velocity_id_table); 389MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
390
391/**
392 * Describe the OF device identifiers that we support in this
393 * device driver. Used for devicetree nodes.
394 */
395static struct of_device_id velocity_of_ids[] = {
396 { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
397 { /* Sentinel */ },
398};
399MODULE_DEVICE_TABLE(of, velocity_of_ids);
370 400
371/** 401/**
372 * get_chip_name - identifier to name 402 * get_chip_name - identifier to name
@@ -385,29 +415,6 @@ static const char *get_chip_name(enum chip_type chip_id)
385} 415}
386 416
387/** 417/**
388 * velocity_remove1 - device unplug
389 * @pdev: PCI device being removed
390 *
391 * Device unload callback. Called on an unplug or on module
392 * unload for each active device that is present. Disconnects
393 * the device from the network layer and frees all the resources
394 */
395static void velocity_remove1(struct pci_dev *pdev)
396{
397 struct net_device *dev = pci_get_drvdata(pdev);
398 struct velocity_info *vptr = netdev_priv(dev);
399
400 unregister_netdev(dev);
401 iounmap(vptr->mac_regs);
402 pci_release_regions(pdev);
403 pci_disable_device(pdev);
404 pci_set_drvdata(pdev, NULL);
405 free_netdev(dev);
406
407 velocity_nics--;
408}
409
410/**
411 * velocity_set_int_opt - parser for integer options 418 * velocity_set_int_opt - parser for integer options
412 * @opt: pointer to option value 419 * @opt: pointer to option value
413 * @val: value the user requested (or -1 for default) 420 * @val: value the user requested (or -1 for default)
@@ -998,9 +1005,9 @@ static void velocity_print_link_status(struct velocity_info *vptr)
998{ 1005{
999 1006
1000 if (vptr->mii_status & VELOCITY_LINK_FAIL) { 1007 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1001 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); 1008 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name);
1002 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { 1009 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1003 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name); 1010 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name);
1004 1011
1005 if (vptr->mii_status & VELOCITY_SPEED_1000) 1012 if (vptr->mii_status & VELOCITY_SPEED_1000)
1006 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); 1013 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
@@ -1014,7 +1021,7 @@ static void velocity_print_link_status(struct velocity_info *vptr)
1014 else 1021 else
1015 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); 1022 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1016 } else { 1023 } else {
1017 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); 1024 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name);
1018 switch (vptr->options.spd_dpx) { 1025 switch (vptr->options.spd_dpx) {
1019 case SPD_DPX_1000_FULL: 1026 case SPD_DPX_1000_FULL:
1020 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); 1027 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
@@ -1180,6 +1187,17 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
1180 u16 BMCR; 1187 u16 BMCR;
1181 1188
1182 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { 1189 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1190 case PHYID_ICPLUS_IP101A:
1191 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1192 MII_ADVERTISE, vptr->mac_regs);
1193 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1195 vptr->mac_regs);
1196 else
1197 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1198 vptr->mac_regs);
1199 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1200 break;
1183 case PHYID_CICADA_CS8201: 1201 case PHYID_CICADA_CS8201:
1184 /* 1202 /*
1185 * Reset to hardware default 1203 * Reset to hardware default
@@ -1311,6 +1329,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1311 enum velocity_init_type type) 1329 enum velocity_init_type type)
1312{ 1330{
1313 struct mac_regs __iomem *regs = vptr->mac_regs; 1331 struct mac_regs __iomem *regs = vptr->mac_regs;
1332 struct net_device *netdev = vptr->netdev;
1314 int i, mii_status; 1333 int i, mii_status;
1315 1334
1316 mac_wol_reset(regs); 1335 mac_wol_reset(regs);
@@ -1319,7 +1338,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1319 case VELOCITY_INIT_RESET: 1338 case VELOCITY_INIT_RESET:
1320 case VELOCITY_INIT_WOL: 1339 case VELOCITY_INIT_WOL:
1321 1340
1322 netif_stop_queue(vptr->dev); 1341 netif_stop_queue(netdev);
1323 1342
1324 /* 1343 /*
1325 * Reset RX to prevent RX pointer not on the 4X location 1344 * Reset RX to prevent RX pointer not on the 4X location
@@ -1332,7 +1351,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1332 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { 1351 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1333 velocity_print_link_status(vptr); 1352 velocity_print_link_status(vptr);
1334 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) 1353 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1335 netif_wake_queue(vptr->dev); 1354 netif_wake_queue(netdev);
1336 } 1355 }
1337 1356
1338 enable_flow_control_ability(vptr); 1357 enable_flow_control_ability(vptr);
@@ -1352,9 +1371,11 @@ static void velocity_init_registers(struct velocity_info *vptr,
1352 velocity_soft_reset(vptr); 1371 velocity_soft_reset(vptr);
1353 mdelay(5); 1372 mdelay(5);
1354 1373
1355 mac_eeprom_reload(regs); 1374 if (!vptr->no_eeprom) {
1356 for (i = 0; i < 6; i++) 1375 mac_eeprom_reload(regs);
1357 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); 1376 for (i = 0; i < 6; i++)
1377 writeb(netdev->dev_addr[i], regs->PAR + i);
1378 }
1358 1379
1359 /* 1380 /*
1360 * clear Pre_ACPI bit. 1381 * clear Pre_ACPI bit.
@@ -1377,7 +1398,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1377 /* 1398 /*
1378 * Set packet filter: Receive directed and broadcast address 1399 * Set packet filter: Receive directed and broadcast address
1379 */ 1400 */
1380 velocity_set_multi(vptr->dev); 1401 velocity_set_multi(netdev);
1381 1402
1382 /* 1403 /*
1383 * Enable MII auto-polling 1404 * Enable MII auto-polling
@@ -1404,14 +1425,14 @@ static void velocity_init_registers(struct velocity_info *vptr,
1404 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set); 1425 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1405 1426
1406 mii_status = velocity_get_opt_media_mode(vptr); 1427 mii_status = velocity_get_opt_media_mode(vptr);
1407 netif_stop_queue(vptr->dev); 1428 netif_stop_queue(netdev);
1408 1429
1409 mii_init(vptr, mii_status); 1430 mii_init(vptr, mii_status);
1410 1431
1411 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { 1432 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1412 velocity_print_link_status(vptr); 1433 velocity_print_link_status(vptr);
1413 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) 1434 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1414 netif_wake_queue(vptr->dev); 1435 netif_wake_queue(netdev);
1415 } 1436 }
1416 1437
1417 enable_flow_control_ability(vptr); 1438 enable_flow_control_ability(vptr);
@@ -1459,7 +1480,6 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
1459 struct velocity_opt *opt = &vptr->options; 1480 struct velocity_opt *opt = &vptr->options;
1460 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); 1481 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1461 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); 1482 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1462 struct pci_dev *pdev = vptr->pdev;
1463 dma_addr_t pool_dma; 1483 dma_addr_t pool_dma;
1464 void *pool; 1484 void *pool;
1465 unsigned int i; 1485 unsigned int i;
@@ -1467,14 +1487,14 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
1467 /* 1487 /*
1468 * Allocate all RD/TD rings a single pool. 1488 * Allocate all RD/TD rings a single pool.
1469 * 1489 *
1470 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1490 * dma_alloc_coherent() fulfills the requirement for 64 bytes
1471 * alignment 1491 * alignment
1472 */ 1492 */
1473 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + 1493 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1474 rx_ring_size, &pool_dma); 1494 rx_ring_size, &pool_dma, GFP_ATOMIC);
1475 if (!pool) { 1495 if (!pool) {
1476 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1496 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1477 vptr->dev->name); 1497 vptr->netdev->name);
1478 return -ENOMEM; 1498 return -ENOMEM;
1479 } 1499 }
1480 1500
@@ -1514,7 +1534,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1514 struct rx_desc *rd = &(vptr->rx.ring[idx]); 1534 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 1535 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1516 1536
1517 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64); 1537 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1518 if (rd_info->skb == NULL) 1538 if (rd_info->skb == NULL)
1519 return -ENOMEM; 1539 return -ENOMEM;
1520 1540
@@ -1524,8 +1544,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1524 */ 1544 */
1525 skb_reserve(rd_info->skb, 1545 skb_reserve(rd_info->skb,
1526 64 - ((unsigned long) rd_info->skb->data & 63)); 1546 64 - ((unsigned long) rd_info->skb->data & 63));
1527 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, 1547 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1528 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 1548 vptr->rx.buf_sz, DMA_FROM_DEVICE);
1529 1549
1530 /* 1550 /*
1531 * Fill in the descriptor to match 1551 * Fill in the descriptor to match
@@ -1588,8 +1608,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1588 1608
1589 if (!rd_info->skb) 1609 if (!rd_info->skb)
1590 continue; 1610 continue;
1591 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, 1611 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1592 PCI_DMA_FROMDEVICE); 1612 DMA_FROM_DEVICE);
1593 rd_info->skb_dma = 0; 1613 rd_info->skb_dma = 0;
1594 1614
1595 dev_kfree_skb(rd_info->skb); 1615 dev_kfree_skb(rd_info->skb);
@@ -1620,7 +1640,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
1620 1640
1621 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1641 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1622 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1642 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1623 "%s: failed to allocate RX buffer.\n", vptr->dev->name); 1643 "%s: failed to allocate RX buffer.\n", vptr->netdev->name);
1624 velocity_free_rd_ring(vptr); 1644 velocity_free_rd_ring(vptr);
1625 goto out; 1645 goto out;
1626 } 1646 }
@@ -1670,7 +1690,7 @@ static void velocity_free_dma_rings(struct velocity_info *vptr)
1670 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1690 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1671 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; 1691 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1672 1692
1673 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); 1693 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1674} 1694}
1675 1695
1676static int velocity_init_rings(struct velocity_info *vptr, int mtu) 1696static int velocity_init_rings(struct velocity_info *vptr, int mtu)
@@ -1727,8 +1747,8 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
1727 pktlen = max_t(size_t, pktlen, 1747 pktlen = max_t(size_t, pktlen,
1728 td->td_buf[i].size & ~TD_QUEUE); 1748 td->td_buf[i].size & ~TD_QUEUE);
1729 1749
1730 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], 1750 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1731 le16_to_cpu(pktlen), PCI_DMA_TODEVICE); 1751 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1732 } 1752 }
1733 } 1753 }
1734 dev_kfree_skb_irq(skb); 1754 dev_kfree_skb_irq(skb);
@@ -1750,8 +1770,8 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1750 if (td_info->skb) { 1770 if (td_info->skb) {
1751 for (i = 0; i < td_info->nskb_dma; i++) { 1771 for (i = 0; i < td_info->nskb_dma; i++) {
1752 if (td_info->skb_dma[i]) { 1772 if (td_info->skb_dma[i]) {
1753 pci_unmap_single(vptr->pdev, td_info->skb_dma[i], 1773 dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1754 td_info->skb->len, PCI_DMA_TODEVICE); 1774 td_info->skb->len, DMA_TO_DEVICE);
1755 td_info->skb_dma[i] = 0; 1775 td_info->skb_dma[i] = 0;
1756 } 1776 }
1757 } 1777 }
@@ -1809,7 +1829,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
1809 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0])); 1829 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1810 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR); 1830 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1811 writew(TRDCSR_RUN, &regs->TDCSRClr); 1831 writew(TRDCSR_RUN, &regs->TDCSRClr);
1812 netif_stop_queue(vptr->dev); 1832 netif_stop_queue(vptr->netdev);
1813 1833
1814 /* FIXME: port over the pci_device_failed code and use it 1834 /* FIXME: port over the pci_device_failed code and use it
1815 here */ 1835 here */
@@ -1850,10 +1870,10 @@ static void velocity_error(struct velocity_info *vptr, int status)
1850 1870
1851 if (linked) { 1871 if (linked) {
1852 vptr->mii_status &= ~VELOCITY_LINK_FAIL; 1872 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1853 netif_carrier_on(vptr->dev); 1873 netif_carrier_on(vptr->netdev);
1854 } else { 1874 } else {
1855 vptr->mii_status |= VELOCITY_LINK_FAIL; 1875 vptr->mii_status |= VELOCITY_LINK_FAIL;
1856 netif_carrier_off(vptr->dev); 1876 netif_carrier_off(vptr->netdev);
1857 } 1877 }
1858 1878
1859 velocity_print_link_status(vptr); 1879 velocity_print_link_status(vptr);
@@ -1867,9 +1887,9 @@ static void velocity_error(struct velocity_info *vptr, int status)
1867 enable_mii_autopoll(regs); 1887 enable_mii_autopoll(regs);
1868 1888
1869 if (vptr->mii_status & VELOCITY_LINK_FAIL) 1889 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1870 netif_stop_queue(vptr->dev); 1890 netif_stop_queue(vptr->netdev);
1871 else 1891 else
1872 netif_wake_queue(vptr->dev); 1892 netif_wake_queue(vptr->netdev);
1873 1893
1874 } 1894 }
1875 if (status & ISR_MIBFI) 1895 if (status & ISR_MIBFI)
@@ -1894,7 +1914,7 @@ static int velocity_tx_srv(struct velocity_info *vptr)
1894 int idx; 1914 int idx;
1895 int works = 0; 1915 int works = 0;
1896 struct velocity_td_info *tdinfo; 1916 struct velocity_td_info *tdinfo;
1897 struct net_device_stats *stats = &vptr->dev->stats; 1917 struct net_device_stats *stats = &vptr->netdev->stats;
1898 1918
1899 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { 1919 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1900 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; 1920 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
@@ -1939,9 +1959,9 @@ static int velocity_tx_srv(struct velocity_info *vptr)
1939 * Look to see if we should kick the transmit network 1959 * Look to see if we should kick the transmit network
1940 * layer for more work. 1960 * layer for more work.
1941 */ 1961 */
1942 if (netif_queue_stopped(vptr->dev) && (full == 0) && 1962 if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1943 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { 1963 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1944 netif_wake_queue(vptr->dev); 1964 netif_wake_queue(vptr->netdev);
1945 } 1965 }
1946 return works; 1966 return works;
1947} 1967}
@@ -1989,7 +2009,7 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1989 if (pkt_size < rx_copybreak) { 2009 if (pkt_size < rx_copybreak) {
1990 struct sk_buff *new_skb; 2010 struct sk_buff *new_skb;
1991 2011
1992 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); 2012 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
1993 if (new_skb) { 2013 if (new_skb) {
1994 new_skb->ip_summed = rx_skb[0]->ip_summed; 2014 new_skb->ip_summed = rx_skb[0]->ip_summed;
1995 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); 2015 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
@@ -2029,15 +2049,14 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
2029 */ 2049 */
2030static int velocity_receive_frame(struct velocity_info *vptr, int idx) 2050static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2031{ 2051{
2032 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 2052 struct net_device_stats *stats = &vptr->netdev->stats;
2033 struct net_device_stats *stats = &vptr->dev->stats;
2034 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 2053 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2035 struct rx_desc *rd = &(vptr->rx.ring[idx]); 2054 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2036 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 2055 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2037 struct sk_buff *skb; 2056 struct sk_buff *skb;
2038 2057
2039 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { 2058 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2040 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); 2059 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name);
2041 stats->rx_length_errors++; 2060 stats->rx_length_errors++;
2042 return -EINVAL; 2061 return -EINVAL;
2043 } 2062 }
@@ -2047,8 +2066,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2047 2066
2048 skb = rd_info->skb; 2067 skb = rd_info->skb;
2049 2068
2050 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 2069 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2051 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 2070 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2052 2071
2053 /* 2072 /*
2054 * Drop frame not meeting IEEE 802.3 2073 * Drop frame not meeting IEEE 802.3
@@ -2061,21 +2080,20 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2061 } 2080 }
2062 } 2081 }
2063 2082
2064 pci_action = pci_dma_sync_single_for_device;
2065
2066 velocity_rx_csum(rd, skb); 2083 velocity_rx_csum(rd, skb);
2067 2084
2068 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { 2085 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2069 velocity_iph_realign(vptr, skb, pkt_len); 2086 velocity_iph_realign(vptr, skb, pkt_len);
2070 pci_action = pci_unmap_single;
2071 rd_info->skb = NULL; 2087 rd_info->skb = NULL;
2088 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2089 DMA_FROM_DEVICE);
2090 } else {
2091 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2092 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2072 } 2093 }
2073 2094
2074 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2075 PCI_DMA_FROMDEVICE);
2076
2077 skb_put(skb, pkt_len - 4); 2095 skb_put(skb, pkt_len - 4);
2078 skb->protocol = eth_type_trans(skb, vptr->dev); 2096 skb->protocol = eth_type_trans(skb, vptr->netdev);
2079 2097
2080 if (rd->rdesc0.RSR & RSR_DETAG) { 2098 if (rd->rdesc0.RSR & RSR_DETAG) {
2081 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); 2099 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
@@ -2100,7 +2118,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2100 */ 2118 */
2101static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) 2119static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2102{ 2120{
2103 struct net_device_stats *stats = &vptr->dev->stats; 2121 struct net_device_stats *stats = &vptr->netdev->stats;
2104 int rd_curr = vptr->rx.curr; 2122 int rd_curr = vptr->rx.curr;
2105 int works = 0; 2123 int works = 0;
2106 2124
@@ -2235,15 +2253,15 @@ static int velocity_open(struct net_device *dev)
2235 goto out; 2253 goto out;
2236 2254
2237 /* Ensure chip is running */ 2255 /* Ensure chip is running */
2238 pci_set_power_state(vptr->pdev, PCI_D0); 2256 velocity_set_power_state(vptr, PCI_D0);
2239 2257
2240 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2258 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2241 2259
2242 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, 2260 ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2243 dev->name, dev); 2261 dev->name, dev);
2244 if (ret < 0) { 2262 if (ret < 0) {
2245 /* Power down the chip */ 2263 /* Power down the chip */
2246 pci_set_power_state(vptr->pdev, PCI_D3hot); 2264 velocity_set_power_state(vptr, PCI_D3hot);
2247 velocity_free_rings(vptr); 2265 velocity_free_rings(vptr);
2248 goto out; 2266 goto out;
2249 } 2267 }
@@ -2292,7 +2310,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2292 2310
2293 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 2311 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2294 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 2312 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2295 vptr->dev->name); 2313 vptr->netdev->name);
2296 ret = -EINVAL; 2314 ret = -EINVAL;
2297 goto out_0; 2315 goto out_0;
2298 } 2316 }
@@ -2314,8 +2332,9 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2314 goto out_0; 2332 goto out_0;
2315 } 2333 }
2316 2334
2317 tmp_vptr->dev = dev; 2335 tmp_vptr->netdev = dev;
2318 tmp_vptr->pdev = vptr->pdev; 2336 tmp_vptr->pdev = vptr->pdev;
2337 tmp_vptr->dev = vptr->dev;
2319 tmp_vptr->options = vptr->options; 2338 tmp_vptr->options = vptr->options;
2320 tmp_vptr->tx.numq = vptr->tx.numq; 2339 tmp_vptr->tx.numq = vptr->tx.numq;
2321 2340
@@ -2415,7 +2434,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2415 saving then we need to bring the device back up to talk to it */ 2434 saving then we need to bring the device back up to talk to it */
2416 2435
2417 if (!netif_running(dev)) 2436 if (!netif_running(dev))
2418 pci_set_power_state(vptr->pdev, PCI_D0); 2437 velocity_set_power_state(vptr, PCI_D0);
2419 2438
2420 switch (cmd) { 2439 switch (cmd) {
2421 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2440 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
@@ -2428,7 +2447,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2428 ret = -EOPNOTSUPP; 2447 ret = -EOPNOTSUPP;
2429 } 2448 }
2430 if (!netif_running(dev)) 2449 if (!netif_running(dev))
2431 pci_set_power_state(vptr->pdev, PCI_D3hot); 2450 velocity_set_power_state(vptr, PCI_D3hot);
2432 2451
2433 2452
2434 return ret; 2453 return ret;
@@ -2494,7 +2513,7 @@ static int velocity_close(struct net_device *dev)
2494 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) 2513 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2495 velocity_get_ip(vptr); 2514 velocity_get_ip(vptr);
2496 2515
2497 free_irq(vptr->pdev->irq, dev); 2516 free_irq(dev->irq, dev);
2498 2517
2499 velocity_free_rings(vptr); 2518 velocity_free_rings(vptr);
2500 2519
@@ -2550,7 +2569,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2550 * add it to the transmit ring. 2569 * add it to the transmit ring.
2551 */ 2570 */
2552 tdinfo->skb = skb; 2571 tdinfo->skb = skb;
2553 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); 2572 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2573 DMA_TO_DEVICE);
2554 td_ptr->tdesc0.len = cpu_to_le16(pktlen); 2574 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2555 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2575 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2556 td_ptr->td_buf[0].pa_high = 0; 2576 td_ptr->td_buf[0].pa_high = 0;
@@ -2560,7 +2580,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2581 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562 2582
2563 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev, 2583 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2564 frag, 0, 2584 frag, 0,
2565 skb_frag_size(frag), 2585 skb_frag_size(frag),
2566 DMA_TO_DEVICE); 2586 DMA_TO_DEVICE);
@@ -2632,12 +2652,9 @@ static const struct net_device_ops velocity_netdev_ops = {
2632 * Set up the initial velocity_info struct for the device that has been 2652 * Set up the initial velocity_info struct for the device that has been
2633 * discovered. 2653 * discovered.
2634 */ 2654 */
2635static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, 2655static void velocity_init_info(struct velocity_info *vptr,
2636 const struct velocity_info_tbl *info) 2656 const struct velocity_info_tbl *info)
2637{ 2657{
2638 memset(vptr, 0, sizeof(struct velocity_info));
2639
2640 vptr->pdev = pdev;
2641 vptr->chip_id = info->chip_id; 2658 vptr->chip_id = info->chip_id;
2642 vptr->tx.numq = info->txqueue; 2659 vptr->tx.numq = info->txqueue;
2643 vptr->multicast_limit = MCAM_SIZE; 2660 vptr->multicast_limit = MCAM_SIZE;
@@ -2652,10 +2669,9 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
2652 * Retrieve the PCI configuration space data that interests us from 2669 * Retrieve the PCI configuration space data that interests us from
2653 * the kernel PCI layer 2670 * the kernel PCI layer
2654 */ 2671 */
2655static int velocity_get_pci_info(struct velocity_info *vptr, 2672static int velocity_get_pci_info(struct velocity_info *vptr)
2656 struct pci_dev *pdev)
2657{ 2673{
2658 vptr->rev_id = pdev->revision; 2674 struct pci_dev *pdev = vptr->pdev;
2659 2675
2660 pci_set_master(pdev); 2676 pci_set_master(pdev);
2661 2677
@@ -2678,7 +2694,37 @@ static int velocity_get_pci_info(struct velocity_info *vptr,
2678 dev_err(&pdev->dev, "region #1 is too small.\n"); 2694 dev_err(&pdev->dev, "region #1 is too small.\n");
2679 return -EINVAL; 2695 return -EINVAL;
2680 } 2696 }
2681 vptr->pdev = pdev; 2697
2698 return 0;
2699}
2700
2701/**
2702 * velocity_get_platform_info - retrieve platform info for device
2703 * @vptr: velocity device
2704 * @pdev: platform device it matches
2705 *
2706 * Retrieve the Platform configuration data that interests us
2707 */
2708static int velocity_get_platform_info(struct velocity_info *vptr)
2709{
2710 struct resource res;
2711 int ret;
2712
2713 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2714 vptr->no_eeprom = 1;
2715
2716 ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2717 if (ret) {
2718 dev_err(vptr->dev, "unable to find memory address\n");
2719 return ret;
2720 }
2721
2722 vptr->memaddr = res.start;
2723
2724 if (resource_size(&res) < VELOCITY_IO_SIZE) {
2725 dev_err(vptr->dev, "memory region is too small.\n");
2726 return -EINVAL;
2727 }
2682 2728
2683 return 0; 2729 return 0;
2684} 2730}
@@ -2692,7 +2738,7 @@ static int velocity_get_pci_info(struct velocity_info *vptr,
2692 */ 2738 */
2693static void velocity_print_info(struct velocity_info *vptr) 2739static void velocity_print_info(struct velocity_info *vptr)
2694{ 2740{
2695 struct net_device *dev = vptr->dev; 2741 struct net_device *dev = vptr->netdev;
2696 2742
2697 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); 2743 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2698 printk(KERN_INFO "%s: Ethernet Address: %pM\n", 2744 printk(KERN_INFO "%s: Ethernet Address: %pM\n",
@@ -2707,21 +2753,22 @@ static u32 velocity_get_link(struct net_device *dev)
2707} 2753}
2708 2754
2709/** 2755/**
2710 * velocity_found1 - set up discovered velocity card 2756 * velocity_probe - set up discovered velocity device
2711 * @pdev: PCI device 2757 * @pdev: PCI device
2712 * @ent: PCI device table entry that matched 2758 * @ent: PCI device table entry that matched
2759 * @bustype: bus that device is connected to
2713 * 2760 *
2714 * Configure a discovered adapter from scratch. Return a negative 2761 * Configure a discovered adapter from scratch. Return a negative
2715 * errno error code on failure paths. 2762 * errno error code on failure paths.
2716 */ 2763 */
2717static int velocity_found1(struct pci_dev *pdev, 2764static int velocity_probe(struct device *dev, int irq,
2718 const struct pci_device_id *ent) 2765 const struct velocity_info_tbl *info,
2766 enum velocity_bus_type bustype)
2719{ 2767{
2720 static int first = 1; 2768 static int first = 1;
2721 struct net_device *dev; 2769 struct net_device *netdev;
2722 int i; 2770 int i;
2723 const char *drv_string; 2771 const char *drv_string;
2724 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2725 struct velocity_info *vptr; 2772 struct velocity_info *vptr;
2726 struct mac_regs __iomem *regs; 2773 struct mac_regs __iomem *regs;
2727 int ret = -ENOMEM; 2774 int ret = -ENOMEM;
@@ -2730,20 +2777,18 @@ static int velocity_found1(struct pci_dev *pdev,
2730 * can support more than MAX_UNITS. 2777 * can support more than MAX_UNITS.
2731 */ 2778 */
2732 if (velocity_nics >= MAX_UNITS) { 2779 if (velocity_nics >= MAX_UNITS) {
2733 dev_notice(&pdev->dev, "already found %d NICs.\n", 2780 dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2734 velocity_nics);
2735 return -ENODEV; 2781 return -ENODEV;
2736 } 2782 }
2737 2783
2738 dev = alloc_etherdev(sizeof(struct velocity_info)); 2784 netdev = alloc_etherdev(sizeof(struct velocity_info));
2739 if (!dev) 2785 if (!netdev)
2740 goto out; 2786 goto out;
2741 2787
2742 /* Chain it all together */ 2788 /* Chain it all together */
2743 2789
2744 SET_NETDEV_DEV(dev, &pdev->dev); 2790 SET_NETDEV_DEV(netdev, dev);
2745 vptr = netdev_priv(dev); 2791 vptr = netdev_priv(netdev);
2746
2747 2792
2748 if (first) { 2793 if (first) {
2749 printk(KERN_INFO "%s Ver. %s\n", 2794 printk(KERN_INFO "%s Ver. %s\n",
@@ -2753,41 +2798,41 @@ static int velocity_found1(struct pci_dev *pdev,
2753 first = 0; 2798 first = 0;
2754 } 2799 }
2755 2800
2756 velocity_init_info(pdev, vptr, info); 2801 netdev->irq = irq;
2757 2802 vptr->netdev = netdev;
2758 vptr->dev = dev; 2803 vptr->dev = dev;
2759 2804
2760 ret = pci_enable_device(pdev); 2805 velocity_init_info(vptr, info);
2761 if (ret < 0)
2762 goto err_free_dev;
2763 2806
2764 ret = velocity_get_pci_info(vptr, pdev); 2807 if (bustype == BUS_PCI) {
2765 if (ret < 0) { 2808 vptr->pdev = to_pci_dev(dev);
2766 /* error message already printed */
2767 goto err_disable;
2768 }
2769 2809
2770 ret = pci_request_regions(pdev, VELOCITY_NAME); 2810 ret = velocity_get_pci_info(vptr);
2771 if (ret < 0) { 2811 if (ret < 0)
2772 dev_err(&pdev->dev, "No PCI resources.\n"); 2812 goto err_free_dev;
2773 goto err_disable; 2813 } else {
2814 vptr->pdev = NULL;
2815 ret = velocity_get_platform_info(vptr);
2816 if (ret < 0)
2817 goto err_free_dev;
2774 } 2818 }
2775 2819
2776 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); 2820 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2777 if (regs == NULL) { 2821 if (regs == NULL) {
2778 ret = -EIO; 2822 ret = -EIO;
2779 goto err_release_res; 2823 goto err_free_dev;
2780 } 2824 }
2781 2825
2782 vptr->mac_regs = regs; 2826 vptr->mac_regs = regs;
2827 vptr->rev_id = readb(&regs->rev_id);
2783 2828
2784 mac_wol_reset(regs); 2829 mac_wol_reset(regs);
2785 2830
2786 for (i = 0; i < 6; i++) 2831 for (i = 0; i < 6; i++)
2787 dev->dev_addr[i] = readb(&regs->PAR[i]); 2832 netdev->dev_addr[i] = readb(&regs->PAR[i]);
2788 2833
2789 2834
2790 drv_string = dev_driver_string(&pdev->dev); 2835 drv_string = dev_driver_string(dev);
2791 2836
2792 velocity_get_options(&vptr->options, velocity_nics, drv_string); 2837 velocity_get_options(&vptr->options, velocity_nics, drv_string);
2793 2838
@@ -2808,46 +2853,125 @@ static int velocity_found1(struct pci_dev *pdev,
2808 2853
2809 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); 2854 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2810 2855
2811 dev->netdev_ops = &velocity_netdev_ops; 2856 netdev->netdev_ops = &velocity_netdev_ops;
2812 dev->ethtool_ops = &velocity_ethtool_ops; 2857 netdev->ethtool_ops = &velocity_ethtool_ops;
2813 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2858 netif_napi_add(netdev, &vptr->napi, velocity_poll,
2859 VELOCITY_NAPI_WEIGHT);
2814 2860
2815 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 2861 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2816 NETIF_F_HW_VLAN_CTAG_TX; 2862 NETIF_F_HW_VLAN_CTAG_TX;
2817 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER | 2863 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2818 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM; 2864 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2865 NETIF_F_IP_CSUM;
2819 2866
2820 ret = register_netdev(dev); 2867 ret = register_netdev(netdev);
2821 if (ret < 0) 2868 if (ret < 0)
2822 goto err_iounmap; 2869 goto err_iounmap;
2823 2870
2824 if (!velocity_get_link(dev)) { 2871 if (!velocity_get_link(netdev)) {
2825 netif_carrier_off(dev); 2872 netif_carrier_off(netdev);
2826 vptr->mii_status |= VELOCITY_LINK_FAIL; 2873 vptr->mii_status |= VELOCITY_LINK_FAIL;
2827 } 2874 }
2828 2875
2829 velocity_print_info(vptr); 2876 velocity_print_info(vptr);
2830 pci_set_drvdata(pdev, dev); 2877 dev_set_drvdata(vptr->dev, netdev);
2831 2878
2832 /* and leave the chip powered down */ 2879 /* and leave the chip powered down */
2833 2880
2834 pci_set_power_state(pdev, PCI_D3hot); 2881 velocity_set_power_state(vptr, PCI_D3hot);
2835 velocity_nics++; 2882 velocity_nics++;
2836out: 2883out:
2837 return ret; 2884 return ret;
2838 2885
2839err_iounmap: 2886err_iounmap:
2840 iounmap(regs); 2887 iounmap(regs);
2841err_release_res:
2842 pci_release_regions(pdev);
2843err_disable:
2844 pci_disable_device(pdev);
2845err_free_dev: 2888err_free_dev:
2846 free_netdev(dev); 2889 free_netdev(netdev);
2847 goto out; 2890 goto out;
2848} 2891}
2849 2892
2850#ifdef CONFIG_PM 2893/**
2894 * velocity_remove - device unplug
2895 * @dev: device being removed
2896 *
2897 * Device unload callback. Called on an unplug or on module
2898 * unload for each active device that is present. Disconnects
2899 * the device from the network layer and frees all the resources
2900 */
2901static int velocity_remove(struct device *dev)
2902{
2903 struct net_device *netdev = dev_get_drvdata(dev);
2904 struct velocity_info *vptr = netdev_priv(netdev);
2905
2906 unregister_netdev(netdev);
2907 iounmap(vptr->mac_regs);
2908 free_netdev(netdev);
2909 velocity_nics--;
2910
2911 return 0;
2912}
2913
2914static int velocity_pci_probe(struct pci_dev *pdev,
2915 const struct pci_device_id *ent)
2916{
2917 const struct velocity_info_tbl *info =
2918 &chip_info_table[ent->driver_data];
2919 int ret;
2920
2921 ret = pci_enable_device(pdev);
2922 if (ret < 0)
2923 return ret;
2924
2925 ret = pci_request_regions(pdev, VELOCITY_NAME);
2926 if (ret < 0) {
2927 dev_err(&pdev->dev, "No PCI resources.\n");
2928 goto fail1;
2929 }
2930
2931 ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2932 if (ret == 0)
2933 return 0;
2934
2935 pci_release_regions(pdev);
2936fail1:
2937 pci_disable_device(pdev);
2938 return ret;
2939}
2940
2941static void velocity_pci_remove(struct pci_dev *pdev)
2942{
2943 velocity_remove(&pdev->dev);
2944
2945 pci_release_regions(pdev);
2946 pci_disable_device(pdev);
2947}
2948
2949static int velocity_platform_probe(struct platform_device *pdev)
2950{
2951 const struct of_device_id *of_id;
2952 const struct velocity_info_tbl *info;
2953 int irq;
2954
2955 of_id = of_match_device(velocity_of_ids, &pdev->dev);
2956 if (!of_id)
2957 return -EINVAL;
2958 info = of_id->data;
2959
2960 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2961 if (!irq)
2962 return -EINVAL;
2963
2964 return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2965}
2966
2967static int velocity_platform_remove(struct platform_device *pdev)
2968{
2969 velocity_remove(&pdev->dev);
2970
2971 return 0;
2972}
2973
2974#ifdef CONFIG_PM_SLEEP
2851/** 2975/**
2852 * wol_calc_crc - WOL CRC 2976 * wol_calc_crc - WOL CRC
2853 * @pattern: data pattern 2977 * @pattern: data pattern
@@ -3004,32 +3128,35 @@ static void velocity_save_context(struct velocity_info *vptr, struct velocity_co
3004 3128
3005} 3129}
3006 3130
3007static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) 3131static int velocity_suspend(struct device *dev)
3008{ 3132{
3009 struct net_device *dev = pci_get_drvdata(pdev); 3133 struct net_device *netdev = dev_get_drvdata(dev);
3010 struct velocity_info *vptr = netdev_priv(dev); 3134 struct velocity_info *vptr = netdev_priv(netdev);
3011 unsigned long flags; 3135 unsigned long flags;
3012 3136
3013 if (!netif_running(vptr->dev)) 3137 if (!netif_running(vptr->netdev))
3014 return 0; 3138 return 0;
3015 3139
3016 netif_device_detach(vptr->dev); 3140 netif_device_detach(vptr->netdev);
3017 3141
3018 spin_lock_irqsave(&vptr->lock, flags); 3142 spin_lock_irqsave(&vptr->lock, flags);
3019 pci_save_state(pdev); 3143 if (vptr->pdev)
3144 pci_save_state(vptr->pdev);
3020 3145
3021 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { 3146 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3022 velocity_get_ip(vptr); 3147 velocity_get_ip(vptr);
3023 velocity_save_context(vptr, &vptr->context); 3148 velocity_save_context(vptr, &vptr->context);
3024 velocity_shutdown(vptr); 3149 velocity_shutdown(vptr);
3025 velocity_set_wol(vptr); 3150 velocity_set_wol(vptr);
3026 pci_enable_wake(pdev, PCI_D3hot, 1); 3151 if (vptr->pdev)
3027 pci_set_power_state(pdev, PCI_D3hot); 3152 pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3153 velocity_set_power_state(vptr, PCI_D3hot);
3028 } else { 3154 } else {
3029 velocity_save_context(vptr, &vptr->context); 3155 velocity_save_context(vptr, &vptr->context);
3030 velocity_shutdown(vptr); 3156 velocity_shutdown(vptr);
3031 pci_disable_device(pdev); 3157 if (vptr->pdev)
3032 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3158 pci_disable_device(vptr->pdev);
3159 velocity_set_power_state(vptr, PCI_D3hot);
3033 } 3160 }
3034 3161
3035 spin_unlock_irqrestore(&vptr->lock, flags); 3162 spin_unlock_irqrestore(&vptr->lock, flags);
@@ -3071,19 +3198,22 @@ static void velocity_restore_context(struct velocity_info *vptr, struct velocity
3071 writeb(*((u8 *) (context->mac_reg + i)), ptr + i); 3198 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3072} 3199}
3073 3200
3074static int velocity_resume(struct pci_dev *pdev) 3201static int velocity_resume(struct device *dev)
3075{ 3202{
3076 struct net_device *dev = pci_get_drvdata(pdev); 3203 struct net_device *netdev = dev_get_drvdata(dev);
3077 struct velocity_info *vptr = netdev_priv(dev); 3204 struct velocity_info *vptr = netdev_priv(netdev);
3078 unsigned long flags; 3205 unsigned long flags;
3079 int i; 3206 int i;
3080 3207
3081 if (!netif_running(vptr->dev)) 3208 if (!netif_running(vptr->netdev))
3082 return 0; 3209 return 0;
3083 3210
3084 pci_set_power_state(pdev, PCI_D0); 3211 velocity_set_power_state(vptr, PCI_D0);
3085 pci_enable_wake(pdev, 0, 0); 3212
3086 pci_restore_state(pdev); 3213 if (vptr->pdev) {
3214 pci_enable_wake(vptr->pdev, PCI_D0, 0);
3215 pci_restore_state(vptr->pdev);
3216 }
3087 3217
3088 mac_wol_reset(vptr->mac_regs); 3218 mac_wol_reset(vptr->mac_regs);
3089 3219
@@ -3101,27 +3231,38 @@ static int velocity_resume(struct pci_dev *pdev)
3101 3231
3102 mac_enable_int(vptr->mac_regs); 3232 mac_enable_int(vptr->mac_regs);
3103 spin_unlock_irqrestore(&vptr->lock, flags); 3233 spin_unlock_irqrestore(&vptr->lock, flags);
3104 netif_device_attach(vptr->dev); 3234 netif_device_attach(vptr->netdev);
3105 3235
3106 return 0; 3236 return 0;
3107} 3237}
3108#endif 3238#endif /* CONFIG_PM_SLEEP */
3239
3240static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3109 3241
3110/* 3242/*
3111 * Definition for our device driver. The PCI layer interface 3243 * Definition for our device driver. The PCI layer interface
3112 * uses this to handle all our card discover and plugging 3244 * uses this to handle all our card discover and plugging
3113 */ 3245 */
3114static struct pci_driver velocity_driver = { 3246static struct pci_driver velocity_pci_driver = {
3115 .name = VELOCITY_NAME, 3247 .name = VELOCITY_NAME,
3116 .id_table = velocity_id_table, 3248 .id_table = velocity_pci_id_table,
3117 .probe = velocity_found1, 3249 .probe = velocity_pci_probe,
3118 .remove = velocity_remove1, 3250 .remove = velocity_pci_remove,
3119#ifdef CONFIG_PM 3251 .driver = {
3120 .suspend = velocity_suspend, 3252 .pm = &velocity_pm_ops,
3121 .resume = velocity_resume, 3253 },
3122#endif
3123}; 3254};
3124 3255
3256static struct platform_driver velocity_platform_driver = {
3257 .probe = velocity_platform_probe,
3258 .remove = velocity_platform_remove,
3259 .driver = {
3260 .name = "via-velocity",
3261 .owner = THIS_MODULE,
3262 .of_match_table = velocity_of_ids,
3263 .pm = &velocity_pm_ops,
3264 },
3265};
3125 3266
3126/** 3267/**
3127 * velocity_ethtool_up - pre hook for ethtool 3268 * velocity_ethtool_up - pre hook for ethtool
@@ -3134,7 +3275,7 @@ static int velocity_ethtool_up(struct net_device *dev)
3134{ 3275{
3135 struct velocity_info *vptr = netdev_priv(dev); 3276 struct velocity_info *vptr = netdev_priv(dev);
3136 if (!netif_running(dev)) 3277 if (!netif_running(dev))
3137 pci_set_power_state(vptr->pdev, PCI_D0); 3278 velocity_set_power_state(vptr, PCI_D0);
3138 return 0; 3279 return 0;
3139} 3280}
3140 3281
@@ -3149,7 +3290,7 @@ static void velocity_ethtool_down(struct net_device *dev)
3149{ 3290{
3150 struct velocity_info *vptr = netdev_priv(dev); 3291 struct velocity_info *vptr = netdev_priv(dev);
3151 if (!netif_running(dev)) 3292 if (!netif_running(dev))
3152 pci_set_power_state(vptr->pdev, PCI_D3hot); 3293 velocity_set_power_state(vptr, PCI_D3hot);
3153} 3294}
3154 3295
3155static int velocity_get_settings(struct net_device *dev, 3296static int velocity_get_settings(struct net_device *dev,
@@ -3269,9 +3410,14 @@ static int velocity_set_settings(struct net_device *dev,
3269static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3410static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3270{ 3411{
3271 struct velocity_info *vptr = netdev_priv(dev); 3412 struct velocity_info *vptr = netdev_priv(dev);
3413
3272 strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver)); 3414 strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3273 strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version)); 3415 strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3274 strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info)); 3416 if (vptr->pdev)
3417 strlcpy(info->bus_info, pci_name(vptr->pdev),
3418 sizeof(info->bus_info));
3419 else
3420 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3275} 3421}
3276 3422
3277static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3423static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3561,13 +3707,20 @@ static void velocity_unregister_notifier(void)
3561 */ 3707 */
3562static int __init velocity_init_module(void) 3708static int __init velocity_init_module(void)
3563{ 3709{
3564 int ret; 3710 int ret_pci, ret_platform;
3565 3711
3566 velocity_register_notifier(); 3712 velocity_register_notifier();
3567 ret = pci_register_driver(&velocity_driver); 3713
3568 if (ret < 0) 3714 ret_pci = pci_register_driver(&velocity_pci_driver);
3715 ret_platform = platform_driver_register(&velocity_platform_driver);
3716
3717 /* if both_registers failed, remove the notifier */
3718 if ((ret_pci < 0) && (ret_platform < 0)) {
3569 velocity_unregister_notifier(); 3719 velocity_unregister_notifier();
3570 return ret; 3720 return ret_pci;
3721 }
3722
3723 return 0;
3571} 3724}
3572 3725
3573/** 3726/**
@@ -3581,7 +3734,9 @@ static int __init velocity_init_module(void)
3581static void __exit velocity_cleanup_module(void) 3734static void __exit velocity_cleanup_module(void)
3582{ 3735{
3583 velocity_unregister_notifier(); 3736 velocity_unregister_notifier();
3584 pci_unregister_driver(&velocity_driver); 3737
3738 pci_unregister_driver(&velocity_pci_driver);
3739 platform_driver_unregister(&velocity_platform_driver);
3585} 3740}
3586 3741
3587module_init(velocity_init_module); 3742module_init(velocity_init_module);
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index 4cb9f13485e9..9453bfa9324a 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1265,7 +1265,7 @@ struct velocity_context {
1265#define PHYID_VT3216_64BIT 0x000FC600UL 1265#define PHYID_VT3216_64BIT 0x000FC600UL
1266#define PHYID_MARVELL_1000 0x01410C50UL 1266#define PHYID_MARVELL_1000 0x01410C50UL
1267#define PHYID_MARVELL_1000S 0x01410C40UL 1267#define PHYID_MARVELL_1000S 0x01410C40UL
1268 1268#define PHYID_ICPLUS_IP101A 0x02430C54UL
1269#define PHYID_REV_ID_MASK 0x0000000FUL 1269#define PHYID_REV_ID_MASK 0x0000000FUL
1270 1270
1271#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) 1271#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
@@ -1434,8 +1434,10 @@ struct velocity_opt {
1434#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1434#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1435 1435
1436struct velocity_info { 1436struct velocity_info {
1437 struct device *dev;
1437 struct pci_dev *pdev; 1438 struct pci_dev *pdev;
1438 struct net_device *dev; 1439 struct net_device *netdev;
1440 int no_eeprom;
1439 1441
1440 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 1442 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
1441 u8 ip_addr[4]; 1443 u8 ip_addr[4];
@@ -1514,7 +1516,7 @@ static inline int velocity_get_ip(struct velocity_info *vptr)
1514 int res = -ENOENT; 1516 int res = -ENOENT;
1515 1517
1516 rcu_read_lock(); 1518 rcu_read_lock();
1517 in_dev = __in_dev_get_rcu(vptr->dev); 1519 in_dev = __in_dev_get_rcu(vptr->netdev);
1518 if (in_dev != NULL) { 1520 if (in_dev != NULL) {
1519 ifa = (struct in_ifaddr *) in_dev->ifa_list; 1521 ifa = (struct in_ifaddr *) in_dev->ifa_list;
1520 if (ifa != NULL) { 1522 if (ifa != NULL) {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a518dcab396e..30fed08d1674 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -734,7 +734,6 @@ err_hw_probe:
734 unregister_netdev(ndev); 734 unregister_netdev(ndev);
735err_register: 735err_register:
736 free_netdev(ndev); 736 free_netdev(ndev);
737 platform_set_drvdata(pdev, NULL);
738 return err; 737 return err;
739} 738}
740 739
@@ -750,7 +749,6 @@ static int w5100_remove(struct platform_device *pdev)
750 749
751 unregister_netdev(ndev); 750 unregister_netdev(ndev);
752 free_netdev(ndev); 751 free_netdev(ndev);
753 platform_set_drvdata(pdev, NULL);
754 return 0; 752 return 0;
755} 753}
756 754
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 6e00e3f94ce4..e92884564e1e 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -646,7 +646,6 @@ err_hw_probe:
646 unregister_netdev(ndev); 646 unregister_netdev(ndev);
647err_register: 647err_register:
648 free_netdev(ndev); 648 free_netdev(ndev);
649 platform_set_drvdata(pdev, NULL);
650 return err; 649 return err;
651} 650}
652 651
@@ -662,7 +661,6 @@ static int w5300_remove(struct platform_device *pdev)
662 661
663 unregister_netdev(ndev); 662 unregister_netdev(ndev);
664 free_netdev(ndev); 663 free_netdev(ndev);
665 platform_set_drvdata(pdev, NULL);
666 return 0; 664 return 0;
667} 665}
668 666
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 122d60c0481b..7b90a5eba099 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_XILINX 5config NET_VENDOR_XILINX
6 bool "Xilinx devices" 6 bool "Xilinx devices"
7 default y 7 default y
8 depends on PPC || PPC32 || MICROBLAZE 8 depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
@@ -20,7 +20,7 @@ if NET_VENDOR_XILINX
20 20
21config XILINX_EMACLITE 21config XILINX_EMACLITE
22 tristate "Xilinx 10/100 Ethernet Lite support" 22 tristate "Xilinx 10/100 Ethernet Lite support"
23 depends on (PPC32 || MICROBLAZE) 23 depends on (PPC32 || MICROBLAZE || ARCH_ZYNQ)
24 select PHYLIB 24 select PHYLIB
25 ---help--- 25 ---help---
26 This driver supports the 10/100 Ethernet Lite from Xilinx. 26 This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 57c2e5ef2804..58eb4488beff 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1007,7 +1007,7 @@ static int temac_of_probe(struct platform_device *op)
1007 return -ENOMEM; 1007 return -ENOMEM;
1008 1008
1009 ether_setup(ndev); 1009 ether_setup(ndev);
1010 dev_set_drvdata(&op->dev, ndev); 1010 platform_set_drvdata(op, ndev);
1011 SET_NETDEV_DEV(ndev, &op->dev); 1011 SET_NETDEV_DEV(ndev, &op->dev);
1012 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1012 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1013 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; 1013 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
@@ -1136,7 +1136,7 @@ static int temac_of_probe(struct platform_device *op)
1136 1136
1137static int temac_of_remove(struct platform_device *op) 1137static int temac_of_remove(struct platform_device *op)
1138{ 1138{
1139 struct net_device *ndev = dev_get_drvdata(&op->dev); 1139 struct net_device *ndev = platform_get_drvdata(op);
1140 struct temac_local *lp = netdev_priv(ndev); 1140 struct temac_local *lp = netdev_priv(ndev);
1141 1141
1142 temac_mdio_teardown(lp); 1142 temac_mdio_teardown(lp);
@@ -1145,7 +1145,6 @@ static int temac_of_remove(struct platform_device *op)
1145 if (lp->phy_node) 1145 if (lp->phy_node)
1146 of_node_put(lp->phy_node); 1146 of_node_put(lp->phy_node);
1147 lp->phy_node = NULL; 1147 lp->phy_node = NULL;
1148 dev_set_drvdata(&op->dev, NULL);
1149 iounmap(lp->regs); 1148 iounmap(lp->regs);
1150 if (lp->sdma_regs) 1149 if (lp->sdma_regs)
1151 iounmap(lp->sdma_regs); 1150 iounmap(lp->sdma_regs);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 24748e8367a1..fb7d1c28a2ea 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1484,7 +1484,7 @@ static int axienet_of_probe(struct platform_device *op)
1484 return -ENOMEM; 1484 return -ENOMEM;
1485 1485
1486 ether_setup(ndev); 1486 ether_setup(ndev);
1487 dev_set_drvdata(&op->dev, ndev); 1487 platform_set_drvdata(op, ndev);
1488 1488
1489 SET_NETDEV_DEV(ndev, &op->dev); 1489 SET_NETDEV_DEV(ndev, &op->dev);
1490 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1490 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
@@ -1622,7 +1622,7 @@ nodev:
1622 1622
1623static int axienet_of_remove(struct platform_device *op) 1623static int axienet_of_remove(struct platform_device *op)
1624{ 1624{
1625 struct net_device *ndev = dev_get_drvdata(&op->dev); 1625 struct net_device *ndev = platform_get_drvdata(op);
1626 struct axienet_local *lp = netdev_priv(ndev); 1626 struct axienet_local *lp = netdev_priv(ndev);
1627 1627
1628 axienet_mdio_teardown(lp); 1628 axienet_mdio_teardown(lp);
@@ -1632,8 +1632,6 @@ static int axienet_of_remove(struct platform_device *op)
1632 of_node_put(lp->phy_node); 1632 of_node_put(lp->phy_node);
1633 lp->phy_node = NULL; 1633 lp->phy_node = NULL;
1634 1634
1635 dev_set_drvdata(&op->dev, NULL);
1636
1637 iounmap(lp->regs); 1635 iounmap(lp->regs);
1638 if (lp->dma_regs) 1636 if (lp->dma_regs)
1639 iounmap(lp->dma_regs); 1637 iounmap(lp->dma_regs);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b7268b3dae77..fd4dbdae5331 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -2,9 +2,9 @@
2 * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. 2 * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
3 * 3 *
4 * This is a new flat driver which is based on the original emac_lite 4 * This is a new flat driver which is based on the original emac_lite
5 * driver from John Williams <john.williams@petalogix.com>. 5 * driver from John Williams <john.williams@xilinx.com>.
6 * 6 *
7 * 2007-2009 (c) Xilinx, Inc. 7 * 2007 - 2013 (c) Xilinx, Inc.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
@@ -159,34 +159,32 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
159 u32 reg_data; 159 u32 reg_data;
160 160
161 /* Enable the Tx interrupts for the first Buffer */ 161 /* Enable the Tx interrupts for the first Buffer */
162 reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET); 162 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
163 out_be32(drvdata->base_addr + XEL_TSR_OFFSET, 163 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
164 reg_data | XEL_TSR_XMIT_IE_MASK); 164 drvdata->base_addr + XEL_TSR_OFFSET);
165 165
166 /* Enable the Tx interrupts for the second Buffer if 166 /* Enable the Tx interrupts for the second Buffer if
167 * configured in HW */ 167 * configured in HW */
168 if (drvdata->tx_ping_pong != 0) { 168 if (drvdata->tx_ping_pong != 0) {
169 reg_data = in_be32(drvdata->base_addr + 169 reg_data = __raw_readl(drvdata->base_addr +
170 XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 170 XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
171 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + 171 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
172 XEL_TSR_OFFSET, 172 drvdata->base_addr + XEL_BUFFER_OFFSET +
173 reg_data | XEL_TSR_XMIT_IE_MASK); 173 XEL_TSR_OFFSET);
174 } 174 }
175 175
176 /* Enable the Rx interrupts for the first buffer */ 176 /* Enable the Rx interrupts for the first buffer */
177 out_be32(drvdata->base_addr + XEL_RSR_OFFSET, 177 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
178 XEL_RSR_RECV_IE_MASK);
179 178
180 /* Enable the Rx interrupts for the second Buffer if 179 /* Enable the Rx interrupts for the second Buffer if
181 * configured in HW */ 180 * configured in HW */
182 if (drvdata->rx_ping_pong != 0) { 181 if (drvdata->rx_ping_pong != 0) {
183 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + 182 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr +
184 XEL_RSR_OFFSET, 183 XEL_BUFFER_OFFSET + XEL_RSR_OFFSET);
185 XEL_RSR_RECV_IE_MASK);
186 } 184 }
187 185
188 /* Enable the Global Interrupt Enable */ 186 /* Enable the Global Interrupt Enable */
189 out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK); 187 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
190} 188}
191 189
192/** 190/**
@@ -201,37 +199,37 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
201 u32 reg_data; 199 u32 reg_data;
202 200
203 /* Disable the Global Interrupt Enable */ 201 /* Disable the Global Interrupt Enable */
204 out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK); 202 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
205 203
206 /* Disable the Tx interrupts for the first buffer */ 204 /* Disable the Tx interrupts for the first buffer */
207 reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET); 205 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
208 out_be32(drvdata->base_addr + XEL_TSR_OFFSET, 206 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
209 reg_data & (~XEL_TSR_XMIT_IE_MASK)); 207 drvdata->base_addr + XEL_TSR_OFFSET);
210 208
211 /* Disable the Tx interrupts for the second Buffer 209 /* Disable the Tx interrupts for the second Buffer
212 * if configured in HW */ 210 * if configured in HW */
213 if (drvdata->tx_ping_pong != 0) { 211 if (drvdata->tx_ping_pong != 0) {
214 reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + 212 reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
215 XEL_TSR_OFFSET); 213 XEL_TSR_OFFSET);
216 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + 214 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
217 XEL_TSR_OFFSET, 215 drvdata->base_addr + XEL_BUFFER_OFFSET +
218 reg_data & (~XEL_TSR_XMIT_IE_MASK)); 216 XEL_TSR_OFFSET);
219 } 217 }
220 218
221 /* Disable the Rx interrupts for the first buffer */ 219 /* Disable the Rx interrupts for the first buffer */
222 reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET); 220 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
223 out_be32(drvdata->base_addr + XEL_RSR_OFFSET, 221 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
224 reg_data & (~XEL_RSR_RECV_IE_MASK)); 222 drvdata->base_addr + XEL_RSR_OFFSET);
225 223
226 /* Disable the Rx interrupts for the second buffer 224 /* Disable the Rx interrupts for the second buffer
227 * if configured in HW */ 225 * if configured in HW */
228 if (drvdata->rx_ping_pong != 0) { 226 if (drvdata->rx_ping_pong != 0) {
229 227
230 reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + 228 reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
231 XEL_RSR_OFFSET); 229 XEL_RSR_OFFSET);
232 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + 230 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
233 XEL_RSR_OFFSET, 231 drvdata->base_addr + XEL_BUFFER_OFFSET +
234 reg_data & (~XEL_RSR_RECV_IE_MASK)); 232 XEL_RSR_OFFSET);
235 } 233 }
236} 234}
237 235
@@ -351,7 +349,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
351 byte_count = ETH_FRAME_LEN; 349 byte_count = ETH_FRAME_LEN;
352 350
353 /* Check if the expected buffer is available */ 351 /* Check if the expected buffer is available */
354 reg_data = in_be32(addr + XEL_TSR_OFFSET); 352 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
355 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | 353 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
356 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { 354 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
357 355
@@ -364,7 +362,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
364 362
365 addr = (void __iomem __force *)((u32 __force)addr ^ 363 addr = (void __iomem __force *)((u32 __force)addr ^
366 XEL_BUFFER_OFFSET); 364 XEL_BUFFER_OFFSET);
367 reg_data = in_be32(addr + XEL_TSR_OFFSET); 365 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
368 366
369 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | 367 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
370 XEL_TSR_XMIT_ACTIVE_MASK)) != 0) 368 XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -375,15 +373,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
375 /* Write the frame to the buffer */ 373 /* Write the frame to the buffer */
376 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); 374 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
377 375
378 out_be32(addr + XEL_TPLR_OFFSET, (byte_count & XEL_TPLR_LENGTH_MASK)); 376 __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
377 addr + XEL_TPLR_OFFSET);
379 378
380 /* Update the Tx Status Register to indicate that there is a 379 /* Update the Tx Status Register to indicate that there is a
381 * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which 380 * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
382 * is used by the interrupt handler to check whether a frame 381 * is used by the interrupt handler to check whether a frame
383 * has been transmitted */ 382 * has been transmitted */
384 reg_data = in_be32(addr + XEL_TSR_OFFSET); 383 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
385 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); 384 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
386 out_be32(addr + XEL_TSR_OFFSET, reg_data); 385 __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
387 386
388 return 0; 387 return 0;
389} 388}
@@ -408,7 +407,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
408 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); 407 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
409 408
410 /* Verify which buffer has valid data */ 409 /* Verify which buffer has valid data */
411 reg_data = in_be32(addr + XEL_RSR_OFFSET); 410 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
412 411
413 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { 412 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
414 if (drvdata->rx_ping_pong != 0) 413 if (drvdata->rx_ping_pong != 0)
@@ -425,14 +424,14 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
425 return 0; /* No data was available */ 424 return 0; /* No data was available */
426 425
427 /* Verify that buffer has valid data */ 426 /* Verify that buffer has valid data */
428 reg_data = in_be32(addr + XEL_RSR_OFFSET); 427 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
429 if ((reg_data & XEL_RSR_RECV_DONE_MASK) != 428 if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
430 XEL_RSR_RECV_DONE_MASK) 429 XEL_RSR_RECV_DONE_MASK)
431 return 0; /* No data was available */ 430 return 0; /* No data was available */
432 } 431 }
433 432
434 /* Get the protocol type of the ethernet frame that arrived */ 433 /* Get the protocol type of the ethernet frame that arrived */
435 proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET + 434 proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
436 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & 435 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
437 XEL_RPLR_LENGTH_MASK); 436 XEL_RPLR_LENGTH_MASK);
438 437
@@ -441,7 +440,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
441 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 440 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
442 441
443 if (proto_type == ETH_P_IP) { 442 if (proto_type == ETH_P_IP) {
444 length = ((ntohl(in_be32(addr + 443 length = ((ntohl(__raw_readl(addr +
445 XEL_HEADER_IP_LENGTH_OFFSET + 444 XEL_HEADER_IP_LENGTH_OFFSET +
446 XEL_RXBUFF_OFFSET)) >> 445 XEL_RXBUFF_OFFSET)) >>
447 XEL_HEADER_SHIFT) & 446 XEL_HEADER_SHIFT) &
@@ -463,9 +462,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
463 data, length); 462 data, length);
464 463
465 /* Acknowledge the frame */ 464 /* Acknowledge the frame */
466 reg_data = in_be32(addr + XEL_RSR_OFFSET); 465 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
467 reg_data &= ~XEL_RSR_RECV_DONE_MASK; 466 reg_data &= ~XEL_RSR_RECV_DONE_MASK;
468 out_be32(addr + XEL_RSR_OFFSET, reg_data); 467 __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
469 468
470 return length; 469 return length;
471} 470}
@@ -492,14 +491,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
492 491
493 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); 492 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
494 493
495 out_be32(addr + XEL_TPLR_OFFSET, ETH_ALEN); 494 __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
496 495
497 /* Update the MAC address in the EmacLite */ 496 /* Update the MAC address in the EmacLite */
498 reg_data = in_be32(addr + XEL_TSR_OFFSET); 497 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
499 out_be32(addr + XEL_TSR_OFFSET, reg_data | XEL_TSR_PROG_MAC_ADDR); 498 __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
500 499
501 /* Wait for EmacLite to finish with the MAC address update */ 500 /* Wait for EmacLite to finish with the MAC address update */
502 while ((in_be32(addr + XEL_TSR_OFFSET) & 501 while ((__raw_readl(addr + XEL_TSR_OFFSET) &
503 XEL_TSR_PROG_MAC_ADDR) != 0) 502 XEL_TSR_PROG_MAC_ADDR) != 0)
504 ; 503 ;
505} 504}
@@ -669,31 +668,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
669 u32 tx_status; 668 u32 tx_status;
670 669
671 /* Check if there is Rx Data available */ 670 /* Check if there is Rx Data available */
672 if ((in_be32(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) || 671 if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
673 (in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) 672 XEL_RSR_RECV_DONE_MASK) ||
673 (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
674 & XEL_RSR_RECV_DONE_MASK)) 674 & XEL_RSR_RECV_DONE_MASK))
675 675
676 xemaclite_rx_handler(dev); 676 xemaclite_rx_handler(dev);
677 677
678 /* Check if the Transmission for the first buffer is completed */ 678 /* Check if the Transmission for the first buffer is completed */
679 tx_status = in_be32(base_addr + XEL_TSR_OFFSET); 679 tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
680 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && 680 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
681 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { 681 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
682 682
683 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; 683 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
684 out_be32(base_addr + XEL_TSR_OFFSET, tx_status); 684 __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
685 685
686 tx_complete = true; 686 tx_complete = true;
687 } 687 }
688 688
689 /* Check if the Transmission for the second buffer is completed */ 689 /* Check if the Transmission for the second buffer is completed */
690 tx_status = in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 690 tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
691 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && 691 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
692 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { 692 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
693 693
694 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; 694 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
695 out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 695 __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
696 tx_status); 696 XEL_TSR_OFFSET);
697 697
698 tx_complete = true; 698 tx_complete = true;
699 } 699 }
@@ -726,7 +726,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
726 /* wait for the MDIO interface to not be busy or timeout 726 /* wait for the MDIO interface to not be busy or timeout
727 after some time. 727 after some time.
728 */ 728 */
729 while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) & 729 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
730 XEL_MDIOCTRL_MDIOSTS_MASK) { 730 XEL_MDIOCTRL_MDIOSTS_MASK) {
731 if (end - jiffies <= 0) { 731 if (end - jiffies <= 0) {
732 WARN_ON(1); 732 WARN_ON(1);
@@ -762,17 +762,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
762 * MDIO Address register. Set the Status bit in the MDIO Control 762 * MDIO Address register. Set the Status bit in the MDIO Control
763 * register to start a MDIO read transaction. 763 * register to start a MDIO read transaction.
764 */ 764 */
765 ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET); 765 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
766 out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET, 766 __raw_writel(XEL_MDIOADDR_OP_MASK |
767 XEL_MDIOADDR_OP_MASK | 767 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
768 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg)); 768 lp->base_addr + XEL_MDIOADDR_OFFSET);
769 out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, 769 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
770 ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK); 770 lp->base_addr + XEL_MDIOCTRL_OFFSET);
771 771
772 if (xemaclite_mdio_wait(lp)) 772 if (xemaclite_mdio_wait(lp))
773 return -ETIMEDOUT; 773 return -ETIMEDOUT;
774 774
775 rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET); 775 rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
776 776
777 dev_dbg(&lp->ndev->dev, 777 dev_dbg(&lp->ndev->dev,
778 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", 778 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -809,13 +809,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
809 * Data register. Finally, set the Status bit in the MDIO Control 809 * Data register. Finally, set the Status bit in the MDIO Control
810 * register to start a MDIO write transaction. 810 * register to start a MDIO write transaction.
811 */ 811 */
812 ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET); 812 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
813 out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET, 813 __raw_writel(~XEL_MDIOADDR_OP_MASK &
814 ~XEL_MDIOADDR_OP_MASK & 814 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
815 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg)); 815 lp->base_addr + XEL_MDIOADDR_OFFSET);
816 out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val); 816 __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
817 out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, 817 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
818 ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK); 818 lp->base_addr + XEL_MDIOCTRL_OFFSET);
819 819
820 return 0; 820 return 0;
821} 821}
@@ -848,24 +848,39 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
848 int rc; 848 int rc;
849 struct resource res; 849 struct resource res;
850 struct device_node *np = of_get_parent(lp->phy_node); 850 struct device_node *np = of_get_parent(lp->phy_node);
851 struct device_node *npp;
851 852
852 /* Don't register the MDIO bus if the phy_node or its parent node 853 /* Don't register the MDIO bus if the phy_node or its parent node
853 * can't be found. 854 * can't be found.
854 */ 855 */
855 if (!np) 856 if (!np) {
857 dev_err(dev, "Failed to register mdio bus.\n");
856 return -ENODEV; 858 return -ENODEV;
859 }
860 npp = of_get_parent(np);
861
862 of_address_to_resource(npp, 0, &res);
863 if (lp->ndev->mem_start != res.start) {
864 struct phy_device *phydev;
865 phydev = of_phy_find_device(lp->phy_node);
866 if (!phydev)
867 dev_info(dev,
868 "MDIO of the phy is not registered yet\n");
869 return 0;
870 }
857 871
858 /* Enable the MDIO bus by asserting the enable bit in MDIO Control 872 /* Enable the MDIO bus by asserting the enable bit in MDIO Control
859 * register. 873 * register.
860 */ 874 */
861 out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, 875 __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
862 XEL_MDIOCTRL_MDIOEN_MASK); 876 lp->base_addr + XEL_MDIOCTRL_OFFSET);
863 877
864 bus = mdiobus_alloc(); 878 bus = mdiobus_alloc();
865 if (!bus) 879 if (!bus) {
880 dev_err(dev, "Failed to allocate mdiobus\n");
866 return -ENOMEM; 881 return -ENOMEM;
882 }
867 883
868 of_address_to_resource(np, 0, &res);
869 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", 884 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
870 (unsigned long long)res.start); 885 (unsigned long long)res.start);
871 bus->priv = lp; 886 bus->priv = lp;
@@ -879,8 +894,10 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
879 lp->mii_bus = bus; 894 lp->mii_bus = bus;
880 895
881 rc = of_mdiobus_register(bus, np); 896 rc = of_mdiobus_register(bus, np);
882 if (rc) 897 if (rc) {
898 dev_err(dev, "Failed to register mdio bus.\n");
883 goto err_register; 899 goto err_register;
900 }
884 901
885 return 0; 902 return 0;
886 903
@@ -896,7 +913,7 @@ err_register:
896 * There's nothing in the Emaclite device to be configured when the link 913 * There's nothing in the Emaclite device to be configured when the link
897 * state changes. We just print the status. 914 * state changes. We just print the status.
898 */ 915 */
899void xemaclite_adjust_link(struct net_device *ndev) 916static void xemaclite_adjust_link(struct net_device *ndev)
900{ 917{
901 struct net_local *lp = netdev_priv(ndev); 918 struct net_local *lp = netdev_priv(ndev);
902 struct phy_device *phy = lp->phy_dev; 919 struct phy_device *phy = lp->phy_dev;
@@ -1058,13 +1075,14 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1058 * This function un maps the IO region of the Emaclite device and frees the net 1075 * This function un maps the IO region of the Emaclite device and frees the net
1059 * device. 1076 * device.
1060 */ 1077 */
1061static void xemaclite_remove_ndev(struct net_device *ndev) 1078static void xemaclite_remove_ndev(struct net_device *ndev,
1079 struct platform_device *pdev)
1062{ 1080{
1063 if (ndev) { 1081 if (ndev) {
1064 struct net_local *lp = netdev_priv(ndev); 1082 struct net_local *lp = netdev_priv(ndev);
1065 1083
1066 if (lp->base_addr) 1084 if (lp->base_addr)
1067 iounmap((void __iomem __force *) (lp->base_addr)); 1085 devm_iounmap(&pdev->dev, lp->base_addr);
1068 free_netdev(ndev); 1086 free_netdev(ndev);
1069 } 1087 }
1070} 1088}
@@ -1110,8 +1128,7 @@ static struct net_device_ops xemaclite_netdev_ops;
1110 */ 1128 */
1111static int xemaclite_of_probe(struct platform_device *ofdev) 1129static int xemaclite_of_probe(struct platform_device *ofdev)
1112{ 1130{
1113 struct resource r_irq; /* Interrupt resources */ 1131 struct resource *res;
1114 struct resource r_mem; /* IO mem resources */
1115 struct net_device *ndev = NULL; 1132 struct net_device *ndev = NULL;
1116 struct net_local *lp = NULL; 1133 struct net_local *lp = NULL;
1117 struct device *dev = &ofdev->dev; 1134 struct device *dev = &ofdev->dev;
@@ -1121,20 +1138,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1121 1138
1122 dev_info(dev, "Device Tree Probing\n"); 1139 dev_info(dev, "Device Tree Probing\n");
1123 1140
1124 /* Get iospace for the device */
1125 rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
1126 if (rc) {
1127 dev_err(dev, "invalid address\n");
1128 return rc;
1129 }
1130
1131 /* Get IRQ for the device */
1132 rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
1133 if (!rc) {
1134 dev_err(dev, "no IRQ found\n");
1135 return rc;
1136 }
1137
1138 /* Create an ethernet device instance */ 1141 /* Create an ethernet device instance */
1139 ndev = alloc_etherdev(sizeof(struct net_local)); 1142 ndev = alloc_etherdev(sizeof(struct net_local));
1140 if (!ndev) 1143 if (!ndev)
@@ -1143,30 +1146,28 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1143 dev_set_drvdata(dev, ndev); 1146 dev_set_drvdata(dev, ndev);
1144 SET_NETDEV_DEV(ndev, &ofdev->dev); 1147 SET_NETDEV_DEV(ndev, &ofdev->dev);
1145 1148
1146 ndev->irq = r_irq.start;
1147 ndev->mem_start = r_mem.start;
1148 ndev->mem_end = r_mem.end;
1149
1150 lp = netdev_priv(ndev); 1149 lp = netdev_priv(ndev);
1151 lp->ndev = ndev; 1150 lp->ndev = ndev;
1152 1151
1153 if (!request_mem_region(ndev->mem_start, 1152 /* Get IRQ for the device */
1154 ndev->mem_end - ndev->mem_start + 1, 1153 res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
1155 DRIVER_NAME)) { 1154 if (!res) {
1156 dev_err(dev, "Couldn't lock memory region at %p\n", 1155 dev_err(dev, "no IRQ found\n");
1157 (void *)ndev->mem_start); 1156 goto error;
1158 rc = -EBUSY;
1159 goto error2;
1160 } 1157 }
1161 1158
1162 /* Get the virtual base address for the device */ 1159 ndev->irq = res->start;
1163 lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem)); 1160
1164 if (NULL == lp->base_addr) { 1161 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
1165 dev_err(dev, "EmacLite: Could not allocate iomem\n"); 1162 lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
1166 rc = -EIO; 1163 if (IS_ERR(lp->base_addr)) {
1167 goto error1; 1164 rc = PTR_ERR(lp->base_addr);
1165 goto error;
1168 } 1166 }
1169 1167
1168 ndev->mem_start = res->start;
1169 ndev->mem_end = res->end;
1170
1170 spin_lock_init(&lp->reset_lock); 1171 spin_lock_init(&lp->reset_lock);
1171 lp->next_tx_buf_to_use = 0x0; 1172 lp->next_tx_buf_to_use = 0x0;
1172 lp->next_rx_buf_to_use = 0x0; 1173 lp->next_rx_buf_to_use = 0x0;
@@ -1181,8 +1182,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1181 dev_warn(dev, "No MAC address found\n"); 1182 dev_warn(dev, "No MAC address found\n");
1182 1183
1183 /* Clear the Tx CSR's in case this is a restart */ 1184 /* Clear the Tx CSR's in case this is a restart */
1184 out_be32(lp->base_addr + XEL_TSR_OFFSET, 0); 1185 __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
1185 out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0); 1186 __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
1186 1187
1187 /* Set the MAC address in the EmacLite device */ 1188 /* Set the MAC address in the EmacLite device */
1188 xemaclite_update_address(lp, ndev->dev_addr); 1189 xemaclite_update_address(lp, ndev->dev_addr);
@@ -1203,7 +1204,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1203 if (rc) { 1204 if (rc) {
1204 dev_err(dev, 1205 dev_err(dev,
1205 "Cannot register network device, aborting\n"); 1206 "Cannot register network device, aborting\n");
1206 goto error1; 1207 goto error;
1207 } 1208 }
1208 1209
1209 dev_info(dev, 1210 dev_info(dev,
@@ -1212,11 +1213,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1212 (unsigned int __force)lp->base_addr, ndev->irq); 1213 (unsigned int __force)lp->base_addr, ndev->irq);
1213 return 0; 1214 return 0;
1214 1215
1215error1: 1216error:
1216 release_mem_region(ndev->mem_start, resource_size(&r_mem)); 1217 xemaclite_remove_ndev(ndev, ofdev);
1217
1218error2:
1219 xemaclite_remove_ndev(ndev);
1220 return rc; 1218 return rc;
1221} 1219}
1222 1220
@@ -1251,9 +1249,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
1251 of_node_put(lp->phy_node); 1249 of_node_put(lp->phy_node);
1252 lp->phy_node = NULL; 1250 lp->phy_node = NULL;
1253 1251
1254 release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1); 1252 xemaclite_remove_ndev(ndev, of_dev);
1255
1256 xemaclite_remove_ndev(ndev);
1257 dev_set_drvdata(dev, NULL); 1253 dev_set_drvdata(dev, NULL);
1258 1254
1259 return 0; 1255 return 0;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 6958a5e87703..3d689fcb7917 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1472,7 +1472,6 @@ err_phy_dis:
1472 phy_disconnect(port->phydev); 1472 phy_disconnect(port->phydev);
1473err_free_mem: 1473err_free_mem:
1474 npe_port_tab[NPE_ID(port->id)] = NULL; 1474 npe_port_tab[NPE_ID(port->id)] = NULL;
1475 platform_set_drvdata(pdev, NULL);
1476 release_resource(port->mem_res); 1475 release_resource(port->mem_res);
1477err_npe_rel: 1476err_npe_rel:
1478 npe_release(port->npe); 1477 npe_release(port->npe);
@@ -1489,7 +1488,6 @@ static int eth_remove_one(struct platform_device *pdev)
1489 unregister_netdev(dev); 1488 unregister_netdev(dev);
1490 phy_disconnect(port->phydev); 1489 phy_disconnect(port->phydev);
1491 npe_port_tab[NPE_ID(port->id)] = NULL; 1490 npe_port_tab[NPE_ID(port->id)] = NULL;
1492 platform_set_drvdata(pdev, NULL);
1493 npe_release(port->npe); 1491 npe_release(port->npe);
1494 release_resource(port->mem_res); 1492 release_resource(port->mem_res);
1495 free_netdev(dev); 1493 free_netdev(dev);
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index d5bd563ac131..f5d7305a5784 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -2246,15 +2246,4 @@ static struct pci_driver skfddi_pci_driver = {
2246 .remove = skfp_remove_one, 2246 .remove = skfp_remove_one,
2247}; 2247};
2248 2248
2249static int __init skfd_init(void) 2249module_pci_driver(skfddi_pci_driver);
2250{
2251 return pci_register_driver(&skfddi_pci_driver);
2252}
2253
2254static void __exit skfd_exit(void)
2255{
2256 pci_unregister_driver(&skfddi_pci_driver);
2257}
2258
2259module_init(skfd_init);
2260module_exit(skfd_exit);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 02de6c891670..f91bf0ddf031 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -103,7 +103,7 @@ static struct packet_type bpq_packet_type __read_mostly = {
103}; 103};
104 104
105static struct notifier_block bpq_dev_notifier = { 105static struct notifier_block bpq_dev_notifier = {
106 .notifier_call =bpq_device_event, 106 .notifier_call = bpq_device_event,
107}; 107};
108 108
109 109
@@ -544,9 +544,10 @@ static void bpq_free_device(struct net_device *ndev)
544/* 544/*
545 * Handle device status changes. 545 * Handle device status changes.
546 */ 546 */
547static int bpq_device_event(struct notifier_block *this,unsigned long event, void *ptr) 547static int bpq_device_event(struct notifier_block *this,
548 unsigned long event, void *ptr)
548{ 549{
549 struct net_device *dev = (struct net_device *)ptr; 550 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
550 551
551 if (!net_eq(dev_net(dev), &init_net)) 552 if (!net_eq(dev_net(dev), &init_net))
552 return NOTIFY_DONE; 553 return NOTIFY_DONE;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 3c4d6274bb9b..00ed75155ce8 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1686,15 +1686,4 @@ static struct pci_driver rr_driver = {
1686 .remove = rr_remove_one, 1686 .remove = rr_remove_one,
1687}; 1687};
1688 1688
1689static int __init rr_init_module(void) 1689module_pci_driver(rr_driver);
1690{
1691 return pci_register_driver(&rr_driver);
1692}
1693
1694static void __exit rr_cleanup_module(void)
1695{
1696 pci_unregister_driver(&rr_driver);
1697}
1698
1699module_init(rr_init_module);
1700module_exit(rr_cleanup_module);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 22b4527321b1..c74f384c87d5 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -794,7 +794,6 @@ static int bfin_sir_remove(struct platform_device *pdev)
794 kfree(self->rx_buff.head); 794 kfree(self->rx_buff.head);
795 free_netdev(dev); 795 free_netdev(dev);
796 kfree(sir_port); 796 kfree(sir_port);
797 platform_set_drvdata(pdev, NULL);
798 797
799 return 0; 798 return 0;
800} 799}
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9448587de453..4455425f1c77 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -838,7 +838,6 @@ static int sh_irda_remove(struct platform_device *pdev)
838 sh_irda_remove_iobuf(self); 838 sh_irda_remove_iobuf(self);
839 iounmap(self->membase); 839 iounmap(self->membase);
840 free_netdev(ndev); 840 free_netdev(ndev);
841 platform_set_drvdata(pdev, NULL);
842 841
843 return 0; 842 return 0;
844} 843}
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 24aefcd84065..89682b49900f 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -796,7 +796,6 @@ static int sh_sir_remove(struct platform_device *pdev)
796 sh_sir_remove_iobuf(self); 796 sh_sir_remove_iobuf(self);
797 iounmap(self->membase); 797 iounmap(self->membase);
798 free_netdev(ndev); 798 free_netdev(ndev);
799 platform_set_drvdata(pdev, NULL);
800 799
801 return 0; 800 return 0;
802} 801}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6e91931a1c2c..18373b6ae37d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -638,6 +638,14 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
638 return __ethtool_get_settings(vlan->lowerdev, cmd); 638 return __ethtool_get_settings(vlan->lowerdev, cmd);
639} 639}
640 640
641static netdev_features_t macvlan_fix_features(struct net_device *dev,
642 netdev_features_t features)
643{
644 struct macvlan_dev *vlan = netdev_priv(dev);
645
646 return features & (vlan->set_features | ~MACVLAN_FEATURES);
647}
648
641static const struct ethtool_ops macvlan_ethtool_ops = { 649static const struct ethtool_ops macvlan_ethtool_ops = {
642 .get_link = ethtool_op_get_link, 650 .get_link = ethtool_op_get_link,
643 .get_settings = macvlan_ethtool_get_settings, 651 .get_settings = macvlan_ethtool_get_settings,
@@ -651,6 +659,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
651 .ndo_stop = macvlan_stop, 659 .ndo_stop = macvlan_stop,
652 .ndo_start_xmit = macvlan_start_xmit, 660 .ndo_start_xmit = macvlan_start_xmit,
653 .ndo_change_mtu = macvlan_change_mtu, 661 .ndo_change_mtu = macvlan_change_mtu,
662 .ndo_fix_features = macvlan_fix_features,
654 .ndo_change_rx_flags = macvlan_change_rx_flags, 663 .ndo_change_rx_flags = macvlan_change_rx_flags,
655 .ndo_set_mac_address = macvlan_set_mac_address, 664 .ndo_set_mac_address = macvlan_set_mac_address,
656 .ndo_set_rx_mode = macvlan_set_mac_lists, 665 .ndo_set_rx_mode = macvlan_set_mac_lists,
@@ -791,6 +800,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
791 vlan->port = port; 800 vlan->port = port;
792 vlan->receive = receive; 801 vlan->receive = receive;
793 vlan->forward = forward; 802 vlan->forward = forward;
803 vlan->set_features = MACVLAN_FEATURES;
794 804
795 vlan->mode = MACVLAN_MODE_VEPA; 805 vlan->mode = MACVLAN_MODE_VEPA;
796 if (data && data[IFLA_MACVLAN_MODE]) 806 if (data && data[IFLA_MACVLAN_MODE])
@@ -927,7 +937,7 @@ static struct rtnl_link_ops macvlan_link_ops = {
927static int macvlan_device_event(struct notifier_block *unused, 937static int macvlan_device_event(struct notifier_block *unused,
928 unsigned long event, void *ptr) 938 unsigned long event, void *ptr)
929{ 939{
930 struct net_device *dev = ptr; 940 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
931 struct macvlan_dev *vlan, *next; 941 struct macvlan_dev *vlan, *next;
932 struct macvlan_port *port; 942 struct macvlan_port *port;
933 LIST_HEAD(list_kill); 943 LIST_HEAD(list_kill);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index b6dd6a75919a..f2c4a3b218fc 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -31,10 +31,6 @@
31 * macvtap_proto is used to allocate queues through the sock allocation 31 * macvtap_proto is used to allocate queues through the sock allocation
32 * mechanism. 32 * mechanism.
33 * 33 *
34 * TODO: multiqueue support is currently not implemented, even though
35 * macvtap is basically prepared for that. We will need to add this
36 * here as well as in virtio-net and qemu to get line rate on 10gbit
37 * adapters from a guest.
38 */ 34 */
39struct macvtap_queue { 35struct macvtap_queue {
40 struct sock sk; 36 struct sock sk;
@@ -44,6 +40,9 @@ struct macvtap_queue {
44 struct macvlan_dev __rcu *vlan; 40 struct macvlan_dev __rcu *vlan;
45 struct file *file; 41 struct file *file;
46 unsigned int flags; 42 unsigned int flags;
43 u16 queue_index;
44 bool enabled;
45 struct list_head next;
47}; 46};
48 47
49static struct proto macvtap_proto = { 48static struct proto macvtap_proto = {
@@ -66,11 +65,14 @@ static struct cdev macvtap_cdev;
66 65
67static const struct proto_ops macvtap_socket_ops; 66static const struct proto_ops macvtap_socket_ops;
68 67
68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
69 NETIF_F_TSO6 | NETIF_F_UFO)
70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
69/* 71/*
70 * RCU usage: 72 * RCU usage:
71 * The macvtap_queue and the macvlan_dev are loosely coupled, the 73 * The macvtap_queue and the macvlan_dev are loosely coupled, the
72 * pointers from one to the other can only be read while rcu_read_lock 74 * pointers from one to the other can only be read while rcu_read_lock
73 * or macvtap_lock is held. 75 * or rtnl is held.
74 * 76 *
75 * Both the file and the macvlan_dev hold a reference on the macvtap_queue 77 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
76 * through sock_hold(&q->sk). When the macvlan_dev goes away first, 78 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
@@ -82,54 +84,84 @@ static const struct proto_ops macvtap_socket_ops;
82 * file or the dev. The data structure is freed through __sk_free 84 * file or the dev. The data structure is freed through __sk_free
83 * when both our references and any pending SKBs are gone. 85 * when both our references and any pending SKBs are gone.
84 */ 86 */
85static DEFINE_SPINLOCK(macvtap_lock);
86 87
87/* 88static int macvtap_enable_queue(struct net_device *dev, struct file *file,
88 * get_slot: return a [unused/occupied] slot in vlan->taps[]: 89 struct macvtap_queue *q)
89 * - if 'q' is NULL, return the first empty slot;
90 * - otherwise, return the slot this pointer occupies.
91 */
92static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
93{ 90{
94 int i; 91 struct macvlan_dev *vlan = netdev_priv(dev);
92 int err = -EINVAL;
95 93
96 for (i = 0; i < MAX_MACVTAP_QUEUES; i++) { 94 ASSERT_RTNL();
97 if (rcu_dereference_protected(vlan->taps[i], 95
98 lockdep_is_held(&macvtap_lock)) == q) 96 if (q->enabled)
99 return i; 97 goto out;
100 }
101 98
102 /* Should never happen */ 99 err = 0;
103 BUG_ON(1); 100 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
101 q->queue_index = vlan->numvtaps;
102 q->enabled = true;
103
104 vlan->numvtaps++;
105out:
106 return err;
104} 107}
105 108
106static int macvtap_set_queue(struct net_device *dev, struct file *file, 109static int macvtap_set_queue(struct net_device *dev, struct file *file,
107 struct macvtap_queue *q) 110 struct macvtap_queue *q)
108{ 111{
109 struct macvlan_dev *vlan = netdev_priv(dev); 112 struct macvlan_dev *vlan = netdev_priv(dev);
110 int index;
111 int err = -EBUSY; 113 int err = -EBUSY;
112 114
113 spin_lock(&macvtap_lock); 115 rtnl_lock();
114 if (vlan->numvtaps == MAX_MACVTAP_QUEUES) 116 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
115 goto out; 117 goto out;
116 118
117 err = 0; 119 err = 0;
118 index = get_slot(vlan, NULL);
119 rcu_assign_pointer(q->vlan, vlan); 120 rcu_assign_pointer(q->vlan, vlan);
120 rcu_assign_pointer(vlan->taps[index], q); 121 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
121 sock_hold(&q->sk); 122 sock_hold(&q->sk);
122 123
123 q->file = file; 124 q->file = file;
125 q->queue_index = vlan->numvtaps;
126 q->enabled = true;
124 file->private_data = q; 127 file->private_data = q;
128 list_add_tail(&q->next, &vlan->queue_list);
125 129
126 vlan->numvtaps++; 130 vlan->numvtaps++;
131 vlan->numqueues++;
127 132
128out: 133out:
129 spin_unlock(&macvtap_lock); 134 rtnl_unlock();
130 return err; 135 return err;
131} 136}
132 137
138static int macvtap_disable_queue(struct macvtap_queue *q)
139{
140 struct macvlan_dev *vlan;
141 struct macvtap_queue *nq;
142
143 ASSERT_RTNL();
144 if (!q->enabled)
145 return -EINVAL;
146
147 vlan = rtnl_dereference(q->vlan);
148
149 if (vlan) {
150 int index = q->queue_index;
151 BUG_ON(index >= vlan->numvtaps);
152 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
153 nq->queue_index = index;
154
155 rcu_assign_pointer(vlan->taps[index], nq);
156 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
157 q->enabled = false;
158
159 vlan->numvtaps--;
160 }
161
162 return 0;
163}
164
133/* 165/*
134 * The file owning the queue got closed, give up both 166 * The file owning the queue got closed, give up both
135 * the reference that the files holds as well as the 167 * the reference that the files holds as well as the
@@ -142,19 +174,20 @@ static void macvtap_put_queue(struct macvtap_queue *q)
142{ 174{
143 struct macvlan_dev *vlan; 175 struct macvlan_dev *vlan;
144 176
145 spin_lock(&macvtap_lock); 177 rtnl_lock();
146 vlan = rcu_dereference_protected(q->vlan, 178 vlan = rtnl_dereference(q->vlan);
147 lockdep_is_held(&macvtap_lock)); 179
148 if (vlan) { 180 if (vlan) {
149 int index = get_slot(vlan, q); 181 if (q->enabled)
182 BUG_ON(macvtap_disable_queue(q));
150 183
151 RCU_INIT_POINTER(vlan->taps[index], NULL); 184 vlan->numqueues--;
152 RCU_INIT_POINTER(q->vlan, NULL); 185 RCU_INIT_POINTER(q->vlan, NULL);
153 sock_put(&q->sk); 186 sock_put(&q->sk);
154 --vlan->numvtaps; 187 list_del_init(&q->next);
155 } 188 }
156 189
157 spin_unlock(&macvtap_lock); 190 rtnl_unlock();
158 191
159 synchronize_rcu(); 192 synchronize_rcu();
160 sock_put(&q->sk); 193 sock_put(&q->sk);
@@ -172,7 +205,12 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
172{ 205{
173 struct macvlan_dev *vlan = netdev_priv(dev); 206 struct macvlan_dev *vlan = netdev_priv(dev);
174 struct macvtap_queue *tap = NULL; 207 struct macvtap_queue *tap = NULL;
175 int numvtaps = vlan->numvtaps; 208 /* Access to taps array is protected by rcu, but access to numvtaps
209 * isn't. Below we use it to lookup a queue, but treat it as a hint
210 * and validate that the result isn't NULL - in case we are
211 * racing against queue removal.
212 */
213 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
176 __u32 rxq; 214 __u32 rxq;
177 215
178 if (!numvtaps) 216 if (!numvtaps)
@@ -182,8 +220,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
182 rxq = skb_get_rxhash(skb); 220 rxq = skb_get_rxhash(skb);
183 if (rxq) { 221 if (rxq) {
184 tap = rcu_dereference(vlan->taps[rxq % numvtaps]); 222 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
185 if (tap) 223 goto out;
186 goto out;
187 } 224 }
188 225
189 if (likely(skb_rx_queue_recorded(skb))) { 226 if (likely(skb_rx_queue_recorded(skb))) {
@@ -193,17 +230,10 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
193 rxq -= numvtaps; 230 rxq -= numvtaps;
194 231
195 tap = rcu_dereference(vlan->taps[rxq]); 232 tap = rcu_dereference(vlan->taps[rxq]);
196 if (tap) 233 goto out;
197 goto out;
198 }
199
200 /* Everything failed - find first available queue */
201 for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
202 tap = rcu_dereference(vlan->taps[rxq]);
203 if (tap)
204 break;
205 } 234 }
206 235
236 tap = rcu_dereference(vlan->taps[0]);
207out: 237out:
208 return tap; 238 return tap;
209} 239}
@@ -216,27 +246,24 @@ out:
216static void macvtap_del_queues(struct net_device *dev) 246static void macvtap_del_queues(struct net_device *dev)
217{ 247{
218 struct macvlan_dev *vlan = netdev_priv(dev); 248 struct macvlan_dev *vlan = netdev_priv(dev);
219 struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES]; 249 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
220 int i, j = 0; 250 int i, j = 0;
221 251
222 /* macvtap_put_queue can free some slots, so go through all slots */ 252 ASSERT_RTNL();
223 spin_lock(&macvtap_lock); 253 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
224 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) { 254 list_del_init(&q->next);
225 q = rcu_dereference_protected(vlan->taps[i], 255 qlist[j++] = q;
226 lockdep_is_held(&macvtap_lock)); 256 RCU_INIT_POINTER(q->vlan, NULL);
227 if (q) { 257 if (q->enabled)
228 qlist[j++] = q;
229 RCU_INIT_POINTER(vlan->taps[i], NULL);
230 RCU_INIT_POINTER(q->vlan, NULL);
231 vlan->numvtaps--; 258 vlan->numvtaps--;
232 } 259 vlan->numqueues--;
233 } 260 }
234 BUG_ON(vlan->numvtaps != 0); 261 for (i = 0; i < vlan->numvtaps; i++)
262 RCU_INIT_POINTER(vlan->taps[i], NULL);
263 BUG_ON(vlan->numvtaps);
264 BUG_ON(vlan->numqueues);
235 /* guarantee that any future macvtap_set_queue will fail */ 265 /* guarantee that any future macvtap_set_queue will fail */
236 vlan->numvtaps = MAX_MACVTAP_QUEUES; 266 vlan->numvtaps = MAX_MACVTAP_QUEUES;
237 spin_unlock(&macvtap_lock);
238
239 synchronize_rcu();
240 267
241 for (--j; j >= 0; j--) 268 for (--j; j >= 0; j--)
242 sock_put(&qlist[j]->sk); 269 sock_put(&qlist[j]->sk);
@@ -249,14 +276,44 @@ static void macvtap_del_queues(struct net_device *dev)
249 */ 276 */
250static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) 277static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
251{ 278{
279 struct macvlan_dev *vlan = netdev_priv(dev);
252 struct macvtap_queue *q = macvtap_get_queue(dev, skb); 280 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
281 netdev_features_t features;
253 if (!q) 282 if (!q)
254 goto drop; 283 goto drop;
255 284
256 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) 285 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
257 goto drop; 286 goto drop;
258 287
259 skb_queue_tail(&q->sk.sk_receive_queue, skb); 288 skb->dev = dev;
289 /* Apply the forward feature mask so that we perform segmentation
290 * according to users wishes.
291 */
292 features = netif_skb_features(skb) & vlan->tap_features;
293 if (netif_needs_gso(skb, features)) {
294 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
295
296 if (IS_ERR(segs))
297 goto drop;
298
299 if (!segs) {
300 skb_queue_tail(&q->sk.sk_receive_queue, skb);
301 goto wake_up;
302 }
303
304 kfree_skb(skb);
305 while (segs) {
306 struct sk_buff *nskb = segs->next;
307
308 segs->next = NULL;
309 skb_queue_tail(&q->sk.sk_receive_queue, segs);
310 segs = nskb;
311 }
312 } else {
313 skb_queue_tail(&q->sk.sk_receive_queue, skb);
314 }
315
316wake_up:
260 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 317 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
261 return NET_RX_SUCCESS; 318 return NET_RX_SUCCESS;
262 319
@@ -322,6 +379,14 @@ static int macvtap_newlink(struct net *src_net,
322 struct nlattr *tb[], 379 struct nlattr *tb[],
323 struct nlattr *data[]) 380 struct nlattr *data[])
324{ 381{
382 struct macvlan_dev *vlan = netdev_priv(dev);
383 INIT_LIST_HEAD(&vlan->queue_list);
384
385 /* Since macvlan supports all offloads by default, make
386 * tap support all offloads also.
387 */
388 vlan->tap_features = TUN_OFFLOADS;
389
325 /* Don't put anything that may fail after macvlan_common_newlink 390 /* Don't put anything that may fail after macvlan_common_newlink
326 * because we can't undo what it does. 391 * because we can't undo what it does.
327 */ 392 */
@@ -385,7 +450,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
385 if (!q) 450 if (!q)
386 goto out; 451 goto out;
387 452
388 q->sock.wq = &q->wq; 453 RCU_INIT_POINTER(q->sock.wq, &q->wq);
389 init_waitqueue_head(&q->wq.wait); 454 init_waitqueue_head(&q->wq.wait);
390 q->sock.type = SOCK_RAW; 455 q->sock.type = SOCK_RAW;
391 q->sock.state = SS_CONNECTED; 456 q->sock.state = SS_CONNECTED;
@@ -729,8 +794,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
729 794
730 skb_probe_transport_header(skb, ETH_HLEN); 795 skb_probe_transport_header(skb, ETH_HLEN);
731 796
732 rcu_read_lock_bh(); 797 rcu_read_lock();
733 vlan = rcu_dereference_bh(q->vlan); 798 vlan = rcu_dereference(q->vlan);
734 /* copy skb_ubuf_info for callback when skb has no error */ 799 /* copy skb_ubuf_info for callback when skb has no error */
735 if (zerocopy) { 800 if (zerocopy) {
736 skb_shinfo(skb)->destructor_arg = m->msg_control; 801 skb_shinfo(skb)->destructor_arg = m->msg_control;
@@ -741,7 +806,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
741 macvlan_start_xmit(skb, vlan->dev); 806 macvlan_start_xmit(skb, vlan->dev);
742 else 807 else
743 kfree_skb(skb); 808 kfree_skb(skb);
744 rcu_read_unlock_bh(); 809 rcu_read_unlock();
745 810
746 return total_len; 811 return total_len;
747 812
@@ -749,11 +814,11 @@ err_kfree:
749 kfree_skb(skb); 814 kfree_skb(skb);
750 815
751err: 816err:
752 rcu_read_lock_bh(); 817 rcu_read_lock();
753 vlan = rcu_dereference_bh(q->vlan); 818 vlan = rcu_dereference(q->vlan);
754 if (vlan) 819 if (vlan)
755 vlan->dev->stats.tx_dropped++; 820 vlan->dev->stats.tx_dropped++;
756 rcu_read_unlock_bh(); 821 rcu_read_unlock();
757 822
758 return err; 823 return err;
759} 824}
@@ -829,11 +894,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
829 copied += len; 894 copied += len;
830 895
831done: 896done:
832 rcu_read_lock_bh(); 897 rcu_read_lock();
833 vlan = rcu_dereference_bh(q->vlan); 898 vlan = rcu_dereference(q->vlan);
834 if (vlan) 899 if (vlan)
835 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); 900 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
836 rcu_read_unlock_bh(); 901 rcu_read_unlock();
837 902
838 return ret ? ret : copied; 903 return ret ? ret : copied;
839} 904}
@@ -847,7 +912,9 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
847 ssize_t ret = 0; 912 ssize_t ret = 0;
848 913
849 while (len) { 914 while (len) {
850 prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE); 915 if (!noblock)
916 prepare_to_wait(sk_sleep(&q->sk), &wait,
917 TASK_INTERRUPTIBLE);
851 918
852 /* Read frames from the queue */ 919 /* Read frames from the queue */
853 skb = skb_dequeue(&q->sk.sk_receive_queue); 920 skb = skb_dequeue(&q->sk.sk_receive_queue);
@@ -869,7 +936,8 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
869 break; 936 break;
870 } 937 }
871 938
872 finish_wait(sk_sleep(&q->sk), &wait); 939 if (!noblock)
940 finish_wait(sk_sleep(&q->sk), &wait);
873 return ret; 941 return ret;
874} 942}
875 943
@@ -892,6 +960,96 @@ out:
892 return ret; 960 return ret;
893} 961}
894 962
963static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
964{
965 struct macvlan_dev *vlan;
966
967 ASSERT_RTNL();
968 vlan = rtnl_dereference(q->vlan);
969 if (vlan)
970 dev_hold(vlan->dev);
971
972 return vlan;
973}
974
975static void macvtap_put_vlan(struct macvlan_dev *vlan)
976{
977 dev_put(vlan->dev);
978}
979
980static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
981{
982 struct macvtap_queue *q = file->private_data;
983 struct macvlan_dev *vlan;
984 int ret;
985
986 vlan = macvtap_get_vlan(q);
987 if (!vlan)
988 return -EINVAL;
989
990 if (flags & IFF_ATTACH_QUEUE)
991 ret = macvtap_enable_queue(vlan->dev, file, q);
992 else if (flags & IFF_DETACH_QUEUE)
993 ret = macvtap_disable_queue(q);
994 else
995 ret = -EINVAL;
996
997 macvtap_put_vlan(vlan);
998 return ret;
999}
1000
1001static int set_offload(struct macvtap_queue *q, unsigned long arg)
1002{
1003 struct macvlan_dev *vlan;
1004 netdev_features_t features;
1005 netdev_features_t feature_mask = 0;
1006
1007 vlan = rtnl_dereference(q->vlan);
1008 if (!vlan)
1009 return -ENOLINK;
1010
1011 features = vlan->dev->features;
1012
1013 if (arg & TUN_F_CSUM) {
1014 feature_mask = NETIF_F_HW_CSUM;
1015
1016 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
1017 if (arg & TUN_F_TSO_ECN)
1018 feature_mask |= NETIF_F_TSO_ECN;
1019 if (arg & TUN_F_TSO4)
1020 feature_mask |= NETIF_F_TSO;
1021 if (arg & TUN_F_TSO6)
1022 feature_mask |= NETIF_F_TSO6;
1023 }
1024
1025 if (arg & TUN_F_UFO)
1026 feature_mask |= NETIF_F_UFO;
1027 }
1028
1029 /* tun/tap driver inverts the usage for TSO offloads, where
1030 * setting the TSO bit means that the userspace wants to
1031 * accept TSO frames and turning it off means that user space
1032 * does not support TSO.
1033 * For macvtap, we have to invert it to mean the same thing.
1034 * When user space turns off TSO, we turn off GSO/LRO so that
1035 * user-space will not receive TSO frames.
1036 */
1037 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
1038 features |= RX_OFFLOADS;
1039 else
1040 features &= ~RX_OFFLOADS;
1041
1042 /* tap_features are the same as features on tun/tap and
1043 * reflect user expectations.
1044 */
1045 vlan->tap_features = vlan->dev->features &
1046 (feature_mask | ~TUN_OFFLOADS);
1047 vlan->set_features = features;
1048 netdev_update_features(vlan->dev);
1049
1050 return 0;
1051}
1052
895/* 1053/*
896 * provide compatibility with generic tun/tap interface 1054 * provide compatibility with generic tun/tap interface
897 */ 1055 */
@@ -915,7 +1073,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
915 return -EFAULT; 1073 return -EFAULT;
916 1074
917 ret = 0; 1075 ret = 0;
918 if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP)) 1076 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
1077 (IFF_NO_PI | IFF_TAP))
919 ret = -EINVAL; 1078 ret = -EINVAL;
920 else 1079 else
921 q->flags = u; 1080 q->flags = u;
@@ -923,24 +1082,31 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
923 return ret; 1082 return ret;
924 1083
925 case TUNGETIFF: 1084 case TUNGETIFF:
926 rcu_read_lock_bh(); 1085 rtnl_lock();
927 vlan = rcu_dereference_bh(q->vlan); 1086 vlan = macvtap_get_vlan(q);
928 if (vlan) 1087 if (!vlan) {
929 dev_hold(vlan->dev); 1088 rtnl_unlock();
930 rcu_read_unlock_bh();
931
932 if (!vlan)
933 return -ENOLINK; 1089 return -ENOLINK;
1090 }
934 1091
935 ret = 0; 1092 ret = 0;
936 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || 1093 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
937 put_user(q->flags, &ifr->ifr_flags)) 1094 put_user(q->flags, &ifr->ifr_flags))
938 ret = -EFAULT; 1095 ret = -EFAULT;
939 dev_put(vlan->dev); 1096 macvtap_put_vlan(vlan);
1097 rtnl_unlock();
940 return ret; 1098 return ret;
941 1099
1100 case TUNSETQUEUE:
1101 if (get_user(u, &ifr->ifr_flags))
1102 return -EFAULT;
1103 rtnl_lock();
1104 ret = macvtap_ioctl_set_queue(file, u);
1105 rtnl_unlock();
1106
942 case TUNGETFEATURES: 1107 case TUNGETFEATURES:
943 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up)) 1108 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
1109 IFF_MULTI_QUEUE, up))
944 return -EFAULT; 1110 return -EFAULT;
945 return 0; 1111 return 0;
946 1112
@@ -976,7 +1142,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
976 got enabled for forwarded frames */ 1142 got enabled for forwarded frames */
977 if (!(q->flags & IFF_VNET_HDR)) 1143 if (!(q->flags & IFF_VNET_HDR))
978 return -EINVAL; 1144 return -EINVAL;
979 return 0; 1145 rtnl_lock();
1146 ret = set_offload(q, arg);
1147 rtnl_unlock();
1148 return ret;
980 1149
981 default: 1150 default:
982 return -EINVAL; 1151 return -EINVAL;
@@ -1055,7 +1224,7 @@ EXPORT_SYMBOL_GPL(macvtap_get_socket);
1055static int macvtap_device_event(struct notifier_block *unused, 1224static int macvtap_device_event(struct notifier_block *unused,
1056 unsigned long event, void *ptr) 1225 unsigned long event, void *ptr)
1057{ 1226{
1058 struct net_device *dev = ptr; 1227 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1059 struct macvlan_dev *vlan; 1228 struct macvlan_dev *vlan;
1060 struct device *classdev; 1229 struct device *classdev;
1061 dev_t devt; 1230 dev_t devt;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4f777ed9b089..4822aafe638b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -654,12 +654,11 @@ static struct configfs_subsystem netconsole_subsys = {
654 654
655/* Handle network interface device notifications */ 655/* Handle network interface device notifications */
656static int netconsole_netdev_event(struct notifier_block *this, 656static int netconsole_netdev_event(struct notifier_block *this,
657 unsigned long event, 657 unsigned long event, void *ptr)
658 void *ptr)
659{ 658{
660 unsigned long flags; 659 unsigned long flags;
661 struct netconsole_target *nt; 660 struct netconsole_target *nt;
662 struct net_device *dev = ptr; 661 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
663 bool stopped = false; 662 bool stopped = false;
664 663
665 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || 664 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
new file mode 100644
index 000000000000..b57ce5f48962
--- /dev/null
+++ b/drivers/net/nlmon.c
@@ -0,0 +1,181 @@
1#include <linux/module.h>
2#include <linux/kernel.h>
3#include <linux/netdevice.h>
4#include <linux/netlink.h>
5#include <net/net_namespace.h>
6#include <linux/if_arp.h>
7#include <net/rtnetlink.h>
8
9struct pcpu_lstats {
10 u64 packets;
11 u64 bytes;
12 struct u64_stats_sync syncp;
13};
14
15static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
16{
17 int len = skb->len;
18 struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
19
20 u64_stats_update_begin(&stats->syncp);
21 stats->bytes += len;
22 stats->packets++;
23 u64_stats_update_end(&stats->syncp);
24
25 dev_kfree_skb(skb);
26
27 return NETDEV_TX_OK;
28}
29
30static int nlmon_is_valid_mtu(int new_mtu)
31{
32 /* Note that in netlink we do not really have an upper limit. On
33 * default, we use NLMSG_GOODSIZE. Here at least we should make
34 * sure that it's at least the header size.
35 */
36 return new_mtu >= (int) sizeof(struct nlmsghdr);
37}
38
39static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
40{
41 if (!nlmon_is_valid_mtu(new_mtu))
42 return -EINVAL;
43
44 dev->mtu = new_mtu;
45 return 0;
46}
47
48static int nlmon_dev_init(struct net_device *dev)
49{
50 dev->lstats = alloc_percpu(struct pcpu_lstats);
51
52 return dev->lstats == NULL ? -ENOMEM : 0;
53}
54
55static void nlmon_dev_uninit(struct net_device *dev)
56{
57 free_percpu(dev->lstats);
58}
59
60struct nlmon {
61 struct netlink_tap nt;
62};
63
64static int nlmon_open(struct net_device *dev)
65{
66 struct nlmon *nlmon = netdev_priv(dev);
67
68 nlmon->nt.dev = dev;
69 nlmon->nt.module = THIS_MODULE;
70 return netlink_add_tap(&nlmon->nt);
71}
72
73static int nlmon_close(struct net_device *dev)
74{
75 struct nlmon *nlmon = netdev_priv(dev);
76
77 return netlink_remove_tap(&nlmon->nt);
78}
79
80static struct rtnl_link_stats64 *
81nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
82{
83 int i;
84 u64 bytes = 0, packets = 0;
85
86 for_each_possible_cpu(i) {
87 const struct pcpu_lstats *nl_stats;
88 u64 tbytes, tpackets;
89 unsigned int start;
90
91 nl_stats = per_cpu_ptr(dev->lstats, i);
92
93 do {
94 start = u64_stats_fetch_begin_bh(&nl_stats->syncp);
95 tbytes = nl_stats->bytes;
96 tpackets = nl_stats->packets;
97 } while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start));
98
99 packets += tpackets;
100 bytes += tbytes;
101 }
102
103 stats->rx_packets = packets;
104 stats->tx_packets = 0;
105
106 stats->rx_bytes = bytes;
107 stats->tx_bytes = 0;
108
109 return stats;
110}
111
112static u32 always_on(struct net_device *dev)
113{
114 return 1;
115}
116
117static const struct ethtool_ops nlmon_ethtool_ops = {
118 .get_link = always_on,
119};
120
121static const struct net_device_ops nlmon_ops = {
122 .ndo_init = nlmon_dev_init,
123 .ndo_uninit = nlmon_dev_uninit,
124 .ndo_open = nlmon_open,
125 .ndo_stop = nlmon_close,
126 .ndo_start_xmit = nlmon_xmit,
127 .ndo_get_stats64 = nlmon_get_stats64,
128 .ndo_change_mtu = nlmon_change_mtu,
129};
130
131static void nlmon_setup(struct net_device *dev)
132{
133 dev->type = ARPHRD_NETLINK;
134 dev->tx_queue_len = 0;
135
136 dev->netdev_ops = &nlmon_ops;
137 dev->ethtool_ops = &nlmon_ethtool_ops;
138 dev->destructor = free_netdev;
139
140 dev->features = NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
141 dev->flags = IFF_NOARP;
142
143 /* That's rather a softlimit here, which, of course,
144 * can be altered. Not a real MTU, but what is to be
145 * expected in most cases.
146 */
147 dev->mtu = NLMSG_GOODSIZE;
148}
149
150static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
151{
152 if (tb[IFLA_ADDRESS])
153 return -EINVAL;
154 return 0;
155}
156
157static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
158 .kind = "nlmon",
159 .priv_size = sizeof(struct nlmon),
160 .setup = nlmon_setup,
161 .validate = nlmon_validate,
162};
163
164static __init int nlmon_register(void)
165{
166 return rtnl_link_register(&nlmon_link_ops);
167}
168
169static __exit void nlmon_unregister(void)
170{
171 rtnl_link_unregister(&nlmon_link_ops);
172}
173
174module_init(nlmon_register);
175module_exit(nlmon_unregister);
176
177MODULE_LICENSE("GPL v2");
178MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
179MODULE_AUTHOR("Mathieu Geli <geli@enseirb.fr>");
180MODULE_DESCRIPTION("Netlink monitoring device");
181MODULE_ALIAS_RTNL_LINK("nlmon");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1e11f2bfd9ce..3a316b30089f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -144,6 +144,16 @@ config MDIO_OCTEON
144 144
145 If in doubt, say Y. 145 If in doubt, say Y.
146 146
147config MDIO_SUN4I
148 tristate "Allwinner sun4i MDIO interface support"
149 depends on ARCH_SUNXI
150 select REGULATOR
151 select REGULATOR_FIXED_VOLTAGE
152 help
153 This driver supports the MDIO interface found in the network
154 interface units of the Allwinner SoC that have an EMAC (A10,
155 A12, A10s, etc.)
156
147config MDIO_BUS_MUX 157config MDIO_BUS_MUX
148 tristate 158 tristate
149 depends on OF_MDIO 159 depends on OF_MDIO
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 9645e389a58d..23a2ab2e847e 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_AMD_PHY) += amd.o
30obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o 30obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
31obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o 31obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
32obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o 32obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
33obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 45cbc10de01c..1f7091b3c27c 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -27,15 +27,22 @@
27#define AT803X_MMD_ACCESS_CONTROL 0x0D 27#define AT803X_MMD_ACCESS_CONTROL 0x0D
28#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E 28#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
29#define AT803X_FUNC_DATA 0x4003 29#define AT803X_FUNC_DATA 0x4003
30#define AT803X_DEBUG_ADDR 0x1D
31#define AT803X_DEBUG_DATA 0x1E
32#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
33#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8)
30 34
31MODULE_DESCRIPTION("Atheros 803x PHY driver"); 35MODULE_DESCRIPTION("Atheros 803x PHY driver");
32MODULE_AUTHOR("Matus Ujhelyi"); 36MODULE_AUTHOR("Matus Ujhelyi");
33MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
34 38
35static void at803x_set_wol_mac_addr(struct phy_device *phydev) 39static int at803x_set_wol(struct phy_device *phydev,
40 struct ethtool_wolinfo *wol)
36{ 41{
37 struct net_device *ndev = phydev->attached_dev; 42 struct net_device *ndev = phydev->attached_dev;
38 const u8 *mac; 43 const u8 *mac;
44 int ret;
45 u32 value;
39 unsigned int i, offsets[] = { 46 unsigned int i, offsets[] = {
40 AT803X_LOC_MAC_ADDR_32_47_OFFSET, 47 AT803X_LOC_MAC_ADDR_32_47_OFFSET,
41 AT803X_LOC_MAC_ADDR_16_31_OFFSET, 48 AT803X_LOC_MAC_ADDR_16_31_OFFSET,
@@ -43,30 +50,61 @@ static void at803x_set_wol_mac_addr(struct phy_device *phydev)
43 }; 50 };
44 51
45 if (!ndev) 52 if (!ndev)
46 return; 53 return -ENODEV;
47 54
48 mac = (const u8 *) ndev->dev_addr; 55 if (wol->wolopts & WAKE_MAGIC) {
56 mac = (const u8 *) ndev->dev_addr;
49 57
50 if (!is_valid_ether_addr(mac)) 58 if (!is_valid_ether_addr(mac))
51 return; 59 return -EFAULT;
52 60
53 for (i = 0; i < 3; i++) { 61 for (i = 0; i < 3; i++) {
54 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, 62 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
55 AT803X_DEVICE_ADDR); 63 AT803X_DEVICE_ADDR);
56 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA, 64 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
57 offsets[i]); 65 offsets[i]);
58 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, 66 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
59 AT803X_FUNC_DATA); 67 AT803X_FUNC_DATA);
60 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA, 68 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
61 mac[(i * 2) + 1] | (mac[(i * 2)] << 8)); 69 mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
70 }
71
72 value = phy_read(phydev, AT803X_INTR_ENABLE);
73 value |= AT803X_WOL_ENABLE;
74 ret = phy_write(phydev, AT803X_INTR_ENABLE, value);
75 if (ret)
76 return ret;
77 value = phy_read(phydev, AT803X_INTR_STATUS);
78 } else {
79 value = phy_read(phydev, AT803X_INTR_ENABLE);
80 value &= (~AT803X_WOL_ENABLE);
81 ret = phy_write(phydev, AT803X_INTR_ENABLE, value);
82 if (ret)
83 return ret;
84 value = phy_read(phydev, AT803X_INTR_STATUS);
62 } 85 }
86
87 return ret;
88}
89
90static void at803x_get_wol(struct phy_device *phydev,
91 struct ethtool_wolinfo *wol)
92{
93 u32 value;
94
95 wol->supported = WAKE_MAGIC;
96 wol->wolopts = 0;
97
98 value = phy_read(phydev, AT803X_INTR_ENABLE);
99 if (value & AT803X_WOL_ENABLE)
100 wol->wolopts |= WAKE_MAGIC;
63} 101}
64 102
65static int at803x_config_init(struct phy_device *phydev) 103static int at803x_config_init(struct phy_device *phydev)
66{ 104{
67 int val; 105 int val;
106 int ret;
68 u32 features; 107 u32 features;
69 int status;
70 108
71 features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI | 109 features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
72 SUPPORTED_FIBRE | SUPPORTED_BNC; 110 SUPPORTED_FIBRE | SUPPORTED_BNC;
@@ -100,20 +138,29 @@ static int at803x_config_init(struct phy_device *phydev)
100 phydev->supported = features; 138 phydev->supported = features;
101 phydev->advertising = features; 139 phydev->advertising = features;
102 140
103 /* enable WOL */ 141 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
104 at803x_set_wol_mac_addr(phydev); 142 ret = phy_write(phydev, AT803X_DEBUG_ADDR,
105 status = phy_write(phydev, AT803X_INTR_ENABLE, AT803X_WOL_ENABLE); 143 AT803X_DEBUG_SYSTEM_MODE_CTRL);
106 status = phy_read(phydev, AT803X_INTR_STATUS); 144 if (ret)
145 return ret;
146 ret = phy_write(phydev, AT803X_DEBUG_DATA,
147 AT803X_DEBUG_RGMII_TX_CLK_DLY);
148 if (ret)
149 return ret;
150 }
107 151
108 return 0; 152 return 0;
109} 153}
110 154
111/* ATHEROS 8035 */ 155static struct phy_driver at803x_driver[] = {
112static struct phy_driver at8035_driver = { 156{
157 /* ATHEROS 8035 */
113 .phy_id = 0x004dd072, 158 .phy_id = 0x004dd072,
114 .name = "Atheros 8035 ethernet", 159 .name = "Atheros 8035 ethernet",
115 .phy_id_mask = 0xffffffef, 160 .phy_id_mask = 0xffffffef,
116 .config_init = at803x_config_init, 161 .config_init = at803x_config_init,
162 .set_wol = at803x_set_wol,
163 .get_wol = at803x_get_wol,
117 .features = PHY_GBIT_FEATURES, 164 .features = PHY_GBIT_FEATURES,
118 .flags = PHY_HAS_INTERRUPT, 165 .flags = PHY_HAS_INTERRUPT,
119 .config_aneg = &genphy_config_aneg, 166 .config_aneg = &genphy_config_aneg,
@@ -121,14 +168,14 @@ static struct phy_driver at8035_driver = {
121 .driver = { 168 .driver = {
122 .owner = THIS_MODULE, 169 .owner = THIS_MODULE,
123 }, 170 },
124}; 171}, {
125 172 /* ATHEROS 8030 */
126/* ATHEROS 8030 */
127static struct phy_driver at8030_driver = {
128 .phy_id = 0x004dd076, 173 .phy_id = 0x004dd076,
129 .name = "Atheros 8030 ethernet", 174 .name = "Atheros 8030 ethernet",
130 .phy_id_mask = 0xffffffef, 175 .phy_id_mask = 0xffffffef,
131 .config_init = at803x_config_init, 176 .config_init = at803x_config_init,
177 .set_wol = at803x_set_wol,
178 .get_wol = at803x_get_wol,
132 .features = PHY_GBIT_FEATURES, 179 .features = PHY_GBIT_FEATURES,
133 .flags = PHY_HAS_INTERRUPT, 180 .flags = PHY_HAS_INTERRUPT,
134 .config_aneg = &genphy_config_aneg, 181 .config_aneg = &genphy_config_aneg,
@@ -136,32 +183,33 @@ static struct phy_driver at8030_driver = {
136 .driver = { 183 .driver = {
137 .owner = THIS_MODULE, 184 .owner = THIS_MODULE,
138 }, 185 },
139}; 186}, {
187 /* ATHEROS 8031 */
188 .phy_id = 0x004dd074,
189 .name = "Atheros 8031 ethernet",
190 .phy_id_mask = 0xffffffef,
191 .config_init = at803x_config_init,
192 .set_wol = at803x_set_wol,
193 .get_wol = at803x_get_wol,
194 .features = PHY_GBIT_FEATURES,
195 .flags = PHY_HAS_INTERRUPT,
196 .config_aneg = &genphy_config_aneg,
197 .read_status = &genphy_read_status,
198 .driver = {
199 .owner = THIS_MODULE,
200 },
201} };
140 202
141static int __init atheros_init(void) 203static int __init atheros_init(void)
142{ 204{
143 int ret; 205 return phy_drivers_register(at803x_driver,
144 206 ARRAY_SIZE(at803x_driver));
145 ret = phy_driver_register(&at8035_driver);
146 if (ret)
147 goto err1;
148
149 ret = phy_driver_register(&at8030_driver);
150 if (ret)
151 goto err2;
152
153 return 0;
154
155err2:
156 phy_driver_unregister(&at8035_driver);
157err1:
158 return ret;
159} 207}
160 208
161static void __exit atheros_exit(void) 209static void __exit atheros_exit(void)
162{ 210{
163 phy_driver_unregister(&at8035_driver); 211 return phy_drivers_unregister(at803x_driver,
164 phy_driver_unregister(&at8030_driver); 212 ARRAY_SIZE(at803x_driver));
165} 213}
166 214
167module_init(atheros_init); 215module_init(atheros_init);
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 84c7a39b1c65..ac55b0807853 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -78,7 +78,7 @@ static struct phy_driver bcm63xx_driver[] = {
78 .name = "Broadcom BCM63XX (1)", 78 .name = "Broadcom BCM63XX (1)",
79 /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */ 79 /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
80 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), 80 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
81 .flags = PHY_HAS_INTERRUPT, 81 .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
82 .config_init = bcm63xx_config_init, 82 .config_init = bcm63xx_config_init,
83 .config_aneg = genphy_config_aneg, 83 .config_aneg = genphy_config_aneg,
84 .read_status = genphy_read_status, 84 .read_status = genphy_read_status,
@@ -91,7 +91,7 @@ static struct phy_driver bcm63xx_driver[] = {
91 .phy_id_mask = 0xfffffc00, 91 .phy_id_mask = 0xfffffc00,
92 .name = "Broadcom BCM63XX (2)", 92 .name = "Broadcom BCM63XX (2)",
93 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), 93 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
94 .flags = PHY_HAS_INTERRUPT, 94 .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
95 .config_init = bcm63xx_config_init, 95 .config_init = bcm63xx_config_init,
96 .config_aneg = genphy_config_aneg, 96 .config_aneg = genphy_config_aneg,
97 .read_status = genphy_read_status, 97 .read_status = genphy_read_status,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 202fe1ff1987..2e91477362d4 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -116,6 +116,8 @@
116#define MII_M1011_PHY_STATUS_RESOLVED 0x0800 116#define MII_M1011_PHY_STATUS_RESOLVED 0x0800
117#define MII_M1011_PHY_STATUS_LINK 0x0400 117#define MII_M1011_PHY_STATUS_LINK 0x0400
118 118
119#define MII_M1116R_CONTROL_REG_MAC 21
120
119 121
120MODULE_DESCRIPTION("Marvell PHY driver"); 122MODULE_DESCRIPTION("Marvell PHY driver");
121MODULE_AUTHOR("Andy Fleming"); 123MODULE_AUTHOR("Andy Fleming");
@@ -372,6 +374,66 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
372 return m88e1121_config_aneg(phydev); 374 return m88e1121_config_aneg(phydev);
373} 375}
374 376
377static int m88e1510_config_aneg(struct phy_device *phydev)
378{
379 int err;
380
381 err = m88e1318_config_aneg(phydev);
382 if (err < 0)
383 return err;
384
385 return marvell_of_reg_init(phydev);
386}
387
388static int m88e1116r_config_init(struct phy_device *phydev)
389{
390 int temp;
391 int err;
392
393 temp = phy_read(phydev, MII_BMCR);
394 temp |= BMCR_RESET;
395 err = phy_write(phydev, MII_BMCR, temp);
396 if (err < 0)
397 return err;
398
399 mdelay(500);
400
401 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
402 if (err < 0)
403 return err;
404
405 temp = phy_read(phydev, MII_M1011_PHY_SCR);
406 temp |= (7 << 12); /* max number of gigabit attempts */
407 temp |= (1 << 11); /* enable downshift */
408 temp |= MII_M1011_PHY_SCR_AUTO_CROSS;
409 err = phy_write(phydev, MII_M1011_PHY_SCR, temp);
410 if (err < 0)
411 return err;
412
413 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 2);
414 if (err < 0)
415 return err;
416 temp = phy_read(phydev, MII_M1116R_CONTROL_REG_MAC);
417 temp |= (1 << 5);
418 temp |= (1 << 4);
419 err = phy_write(phydev, MII_M1116R_CONTROL_REG_MAC, temp);
420 if (err < 0)
421 return err;
422 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
423 if (err < 0)
424 return err;
425
426 temp = phy_read(phydev, MII_BMCR);
427 temp |= BMCR_RESET;
428 err = phy_write(phydev, MII_BMCR, temp);
429 if (err < 0)
430 return err;
431
432 mdelay(500);
433
434 return 0;
435}
436
375static int m88e1111_config_init(struct phy_device *phydev) 437static int m88e1111_config_init(struct phy_device *phydev)
376{ 438{
377 int err; 439 int err;
@@ -940,6 +1002,32 @@ static struct phy_driver marvell_drivers[] = {
940 .config_intr = &marvell_config_intr, 1002 .config_intr = &marvell_config_intr,
941 .driver = { .owner = THIS_MODULE }, 1003 .driver = { .owner = THIS_MODULE },
942 }, 1004 },
1005 {
1006 .phy_id = MARVELL_PHY_ID_88E1116R,
1007 .phy_id_mask = MARVELL_PHY_ID_MASK,
1008 .name = "Marvell 88E1116R",
1009 .features = PHY_GBIT_FEATURES,
1010 .flags = PHY_HAS_INTERRUPT,
1011 .config_init = &m88e1116r_config_init,
1012 .config_aneg = &genphy_config_aneg,
1013 .read_status = &genphy_read_status,
1014 .ack_interrupt = &marvell_ack_interrupt,
1015 .config_intr = &marvell_config_intr,
1016 .driver = { .owner = THIS_MODULE },
1017 },
1018 {
1019 .phy_id = MARVELL_PHY_ID_88E1510,
1020 .phy_id_mask = MARVELL_PHY_ID_MASK,
1021 .name = "Marvell 88E1510",
1022 .features = PHY_GBIT_FEATURES,
1023 .flags = PHY_HAS_INTERRUPT,
1024 .config_aneg = &m88e1510_config_aneg,
1025 .read_status = &marvell_read_status,
1026 .ack_interrupt = &marvell_ack_interrupt,
1027 .config_intr = &marvell_config_intr,
1028 .did_interrupt = &m88e1121_did_interrupt,
1029 .driver = { .owner = THIS_MODULE },
1030 },
943}; 1031};
944 1032
945static int __init marvell_init(void) 1033static int __init marvell_init(void)
@@ -958,15 +1046,17 @@ module_init(marvell_init);
958module_exit(marvell_exit); 1046module_exit(marvell_exit);
959 1047
960static struct mdio_device_id __maybe_unused marvell_tbl[] = { 1048static struct mdio_device_id __maybe_unused marvell_tbl[] = {
961 { 0x01410c60, 0xfffffff0 }, 1049 { MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
962 { 0x01410c90, 0xfffffff0 }, 1050 { MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
963 { 0x01410cc0, 0xfffffff0 }, 1051 { MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK },
964 { 0x01410e10, 0xfffffff0 }, 1052 { MARVELL_PHY_ID_88E1118, MARVELL_PHY_ID_MASK },
965 { 0x01410cb0, 0xfffffff0 }, 1053 { MARVELL_PHY_ID_88E1121R, MARVELL_PHY_ID_MASK },
966 { 0x01410cd0, 0xfffffff0 }, 1054 { MARVELL_PHY_ID_88E1145, MARVELL_PHY_ID_MASK },
967 { 0x01410e50, 0xfffffff0 }, 1055 { MARVELL_PHY_ID_88E1149R, MARVELL_PHY_ID_MASK },
968 { 0x01410e30, 0xfffffff0 }, 1056 { MARVELL_PHY_ID_88E1240, MARVELL_PHY_ID_MASK },
969 { 0x01410e90, 0xfffffff0 }, 1057 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
1058 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
1059 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
970 { } 1060 { }
971}; 1061};
972 1062
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
new file mode 100644
index 000000000000..61d3f4ebf52e
--- /dev/null
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -0,0 +1,194 @@
1/*
2 * Allwinner EMAC MDIO interface driver
3 *
4 * Copyright 2012-2013 Stefan Roese <sr@denx.de>
5 * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * Based on the Linux driver provided by Allwinner:
8 * Copyright (C) 1997 Sten Wang
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/of_address.h>
21#include <linux/of_mdio.h>
22#include <linux/phy.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25
26#define EMAC_MAC_MCMD_REG (0x00)
27#define EMAC_MAC_MADR_REG (0x04)
28#define EMAC_MAC_MWTD_REG (0x08)
29#define EMAC_MAC_MRDD_REG (0x0c)
30#define EMAC_MAC_MIND_REG (0x10)
31#define EMAC_MAC_SSRR_REG (0x14)
32
33#define MDIO_TIMEOUT (msecs_to_jiffies(100))
34
35struct sun4i_mdio_data {
36 void __iomem *membase;
37 struct regulator *regulator;
38};
39
40static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
41{
42 struct sun4i_mdio_data *data = bus->priv;
43 unsigned long start_jiffies;
44 int value;
45
46 /* issue the phy address and reg */
47 writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG);
48 /* pull up the phy io line */
49 writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
50
51 /* Wait read complete */
52 start_jiffies = jiffies;
53 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
54 if (time_after(start_jiffies,
55 start_jiffies + MDIO_TIMEOUT))
56 return -ETIMEDOUT;
57 msleep(1);
58 }
59
60 /* push down the phy io line */
61 writel(0x0, data->membase + EMAC_MAC_MCMD_REG);
62 /* and read data */
63 value = readl(data->membase + EMAC_MAC_MRDD_REG);
64
65 return value;
66}
67
68static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
69 u16 value)
70{
71 struct sun4i_mdio_data *data = bus->priv;
72 unsigned long start_jiffies;
73
74 /* issue the phy address and reg */
75 writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG);
76 /* pull up the phy io line */
77 writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
78
79 /* Wait read complete */
80 start_jiffies = jiffies;
81 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
82 if (time_after(start_jiffies,
83 start_jiffies + MDIO_TIMEOUT))
84 return -ETIMEDOUT;
85 msleep(1);
86 }
87
88 /* push down the phy io line */
89 writel(0x0, data->membase + EMAC_MAC_MCMD_REG);
90 /* and write data */
91 writel(value, data->membase + EMAC_MAC_MWTD_REG);
92
93 return 0;
94}
95
96static int sun4i_mdio_reset(struct mii_bus *bus)
97{
98 return 0;
99}
100
101static int sun4i_mdio_probe(struct platform_device *pdev)
102{
103 struct device_node *np = pdev->dev.of_node;
104 struct mii_bus *bus;
105 struct sun4i_mdio_data *data;
106 int ret, i;
107
108 bus = mdiobus_alloc_size(sizeof(*data));
109 if (!bus)
110 return -ENOMEM;
111
112 bus->name = "sun4i_mii_bus";
113 bus->read = &sun4i_mdio_read;
114 bus->write = &sun4i_mdio_write;
115 bus->reset = &sun4i_mdio_reset;
116 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
117 bus->parent = &pdev->dev;
118
119 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
120 if (!bus->irq) {
121 ret = -ENOMEM;
122 goto err_out_free_mdiobus;
123 }
124
125 for (i = 0; i < PHY_MAX_ADDR; i++)
126 bus->irq[i] = PHY_POLL;
127
128 data = bus->priv;
129 data->membase = of_iomap(np, 0);
130 if (!data->membase) {
131 ret = -ENOMEM;
132 goto err_out_free_mdio_irq;
133 }
134
135 data->regulator = devm_regulator_get(&pdev->dev, "phy");
136 if (IS_ERR(data->regulator)) {
137 if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
138 return -EPROBE_DEFER;
139
140 dev_info(&pdev->dev, "no regulator found\n");
141 } else {
142 ret = regulator_enable(data->regulator);
143 if (ret)
144 goto err_out_free_mdio_irq;
145 }
146
147 ret = of_mdiobus_register(bus, np);
148 if (ret < 0)
149 goto err_out_disable_regulator;
150
151 platform_set_drvdata(pdev, bus);
152
153 return 0;
154
155err_out_disable_regulator:
156 regulator_disable(data->regulator);
157err_out_free_mdio_irq:
158 kfree(bus->irq);
159err_out_free_mdiobus:
160 mdiobus_free(bus);
161 return ret;
162}
163
164static int sun4i_mdio_remove(struct platform_device *pdev)
165{
166 struct mii_bus *bus = platform_get_drvdata(pdev);
167
168 mdiobus_unregister(bus);
169 kfree(bus->irq);
170 mdiobus_free(bus);
171
172 return 0;
173}
174
175static const struct of_device_id sun4i_mdio_dt_ids[] = {
176 { .compatible = "allwinner,sun4i-mdio" },
177 { }
178};
179MODULE_DEVICE_TABLE(of, sun4i_mdio_dt_ids);
180
181static struct platform_driver sun4i_mdio_driver = {
182 .probe = sun4i_mdio_probe,
183 .remove = sun4i_mdio_remove,
184 .driver = {
185 .name = "sun4i-mdio",
186 .of_match_table = sun4i_mdio_dt_ids,
187 },
188};
189
190module_platform_driver(sun4i_mdio_driver);
191
192MODULE_DESCRIPTION("Allwinner EMAC MDIO interface driver");
193MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
194MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 38f0b312ff85..36c6994436b7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -294,7 +294,8 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
294 cmd->duplex = phydev->duplex; 294 cmd->duplex = phydev->duplex;
295 cmd->port = PORT_MII; 295 cmd->port = PORT_MII;
296 cmd->phy_address = phydev->addr; 296 cmd->phy_address = phydev->addr;
297 cmd->transceiver = XCVR_EXTERNAL; 297 cmd->transceiver = phy_is_internal(phydev) ?
298 XCVR_INTERNAL : XCVR_EXTERNAL;
298 cmd->autoneg = phydev->autoneg; 299 cmd->autoneg = phydev->autoneg;
299 300
300 return 0; 301 return 0;
@@ -419,8 +420,6 @@ out_unlock:
419EXPORT_SYMBOL(phy_start_aneg); 420EXPORT_SYMBOL(phy_start_aneg);
420 421
421 422
422static void phy_change(struct work_struct *work);
423
424/** 423/**
425 * phy_start_machine - start PHY state machine tracking 424 * phy_start_machine - start PHY state machine tracking
426 * @phydev: the phy_device struct 425 * @phydev: the phy_device struct
@@ -439,7 +438,7 @@ void phy_start_machine(struct phy_device *phydev,
439{ 438{
440 phydev->adjust_state = handler; 439 phydev->adjust_state = handler;
441 440
442 schedule_delayed_work(&phydev->state_queue, HZ); 441 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
443} 442}
444 443
445/** 444/**
@@ -500,7 +499,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
500 disable_irq_nosync(irq); 499 disable_irq_nosync(irq);
501 atomic_inc(&phydev->irq_disable); 500 atomic_inc(&phydev->irq_disable);
502 501
503 schedule_work(&phydev->phy_queue); 502 queue_work(system_power_efficient_wq, &phydev->phy_queue);
504 503
505 return IRQ_HANDLED; 504 return IRQ_HANDLED;
506} 505}
@@ -565,8 +564,6 @@ int phy_start_interrupts(struct phy_device *phydev)
565{ 564{
566 int err = 0; 565 int err = 0;
567 566
568 INIT_WORK(&phydev->phy_queue, phy_change);
569
570 atomic_set(&phydev->irq_disable, 0); 567 atomic_set(&phydev->irq_disable, 0);
571 if (request_irq(phydev->irq, phy_interrupt, 568 if (request_irq(phydev->irq, phy_interrupt,
572 IRQF_SHARED, 569 IRQF_SHARED,
@@ -623,7 +620,7 @@ EXPORT_SYMBOL(phy_stop_interrupts);
623 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes 620 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
624 * @work: work_struct that describes the work to be done 621 * @work: work_struct that describes the work to be done
625 */ 622 */
626static void phy_change(struct work_struct *work) 623void phy_change(struct work_struct *work)
627{ 624{
628 int err; 625 int err;
629 struct phy_device *phydev = 626 struct phy_device *phydev =
@@ -655,7 +652,7 @@ static void phy_change(struct work_struct *work)
655 652
656 /* reschedule state queue work to run as soon as possible */ 653 /* reschedule state queue work to run as soon as possible */
657 cancel_delayed_work_sync(&phydev->state_queue); 654 cancel_delayed_work_sync(&phydev->state_queue);
658 schedule_delayed_work(&phydev->state_queue, 0); 655 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
659 656
660 return; 657 return;
661 658
@@ -682,7 +679,7 @@ void phy_stop(struct phy_device *phydev)
682 if (PHY_HALTED == phydev->state) 679 if (PHY_HALTED == phydev->state)
683 goto out_unlock; 680 goto out_unlock;
684 681
685 if (phydev->irq != PHY_POLL) { 682 if (phy_interrupt_is_valid(phydev)) {
686 /* Disable PHY Interrupts */ 683 /* Disable PHY Interrupts */
687 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 684 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
688 685
@@ -828,8 +825,9 @@ void phy_state_machine(struct work_struct *work)
828 break; 825 break;
829 case PHY_RUNNING: 826 case PHY_RUNNING:
830 /* Only register a CHANGE if we are 827 /* Only register a CHANGE if we are
831 * polling */ 828 * polling or ignoring interrupts
832 if (PHY_POLL == phydev->irq) 829 */
830 if (!phy_interrupt_is_valid(phydev))
833 phydev->state = PHY_CHANGELINK; 831 phydev->state = PHY_CHANGELINK;
834 break; 832 break;
835 case PHY_CHANGELINK: 833 case PHY_CHANGELINK:
@@ -848,7 +846,7 @@ void phy_state_machine(struct work_struct *work)
848 846
849 phydev->adjust_link(phydev->attached_dev); 847 phydev->adjust_link(phydev->attached_dev);
850 848
851 if (PHY_POLL != phydev->irq) 849 if (phy_interrupt_is_valid(phydev))
852 err = phy_config_interrupt(phydev, 850 err = phy_config_interrupt(phydev,
853 PHY_INTERRUPT_ENABLED); 851 PHY_INTERRUPT_ENABLED);
854 break; 852 break;
@@ -918,8 +916,17 @@ void phy_state_machine(struct work_struct *work)
918 if (err < 0) 916 if (err < 0)
919 phy_error(phydev); 917 phy_error(phydev);
920 918
921 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); 919 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
920 PHY_STATE_TIME * HZ);
921}
922
923void phy_mac_interrupt(struct phy_device *phydev, int new_link)
924{
925 cancel_work_sync(&phydev->phy_queue);
926 phydev->link = new_link;
927 schedule_work(&phydev->phy_queue);
922} 928}
929EXPORT_SYMBOL(phy_mac_interrupt);
923 930
924static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, 931static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
925 int addr) 932 int addr)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3657b4a29124..74630e94fa3b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -189,6 +189,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
189 189
190 mutex_init(&dev->lock); 190 mutex_init(&dev->lock);
191 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); 191 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
192 INIT_WORK(&dev->phy_queue, phy_change);
192 193
193 /* Request the appropriate module unconditionally; don't 194 /* Request the appropriate module unconditionally; don't
194 bother trying to do so only if it isn't already loaded, 195 bother trying to do so only if it isn't already loaded,
@@ -1009,10 +1010,16 @@ static int phy_probe(struct device *dev)
1009 phydrv = to_phy_driver(drv); 1010 phydrv = to_phy_driver(drv);
1010 phydev->drv = phydrv; 1011 phydev->drv = phydrv;
1011 1012
1012 /* Disable the interrupt if the PHY doesn't support it */ 1013 /* Disable the interrupt if the PHY doesn't support it
1013 if (!(phydrv->flags & PHY_HAS_INTERRUPT)) 1014 * but the interrupt is still a valid one
1015 */
1016 if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
1017 phy_interrupt_is_valid(phydev))
1014 phydev->irq = PHY_POLL; 1018 phydev->irq = PHY_POLL;
1015 1019
1020 if (phydrv->flags & PHY_IS_INTERNAL)
1021 phydev->is_internal = true;
1022
1016 mutex_lock(&phydev->lock); 1023 mutex_lock(&phydev->lock);
1017 1024
1018 /* Start out supporting everything. Eventually, 1025 /* Start out supporting everything. Eventually,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index d11c93e69e03..f3bea1346021 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -354,19 +354,7 @@ static struct spi_driver ks8995_driver = {
354 .remove = ks8995_remove, 354 .remove = ks8995_remove,
355}; 355};
356 356
357static int __init ks8995_init(void) 357module_spi_driver(ks8995_driver);
358{
359 pr_info(DRV_DESC " version " DRV_VERSION "\n");
360
361 return spi_register_driver(&ks8995_driver);
362}
363module_init(ks8995_init);
364
365static void __exit ks8995_exit(void)
366{
367 spi_unregister_driver(&ks8995_driver);
368}
369module_exit(ks8995_exit);
370 358
371MODULE_DESCRIPTION(DRV_DESC); 359MODULE_DESCRIPTION(DRV_DESC);
372MODULE_VERSION(DRV_VERSION); 360MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 3492b5391273..69b482bce7d2 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -44,18 +44,19 @@
44#define MII_VSC8244_ISTAT_DUPLEX 0x1000 44#define MII_VSC8244_ISTAT_DUPLEX 0x1000
45 45
46/* Vitesse Auxiliary Control/Status Register */ 46/* Vitesse Auxiliary Control/Status Register */
47#define MII_VSC8244_AUX_CONSTAT 0x1c 47#define MII_VSC8244_AUX_CONSTAT 0x1c
48#define MII_VSC8244_AUXCONSTAT_INIT 0x0000 48#define MII_VSC8244_AUXCONSTAT_INIT 0x0000
49#define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020 49#define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020
50#define MII_VSC8244_AUXCONSTAT_SPEED 0x0018 50#define MII_VSC8244_AUXCONSTAT_SPEED 0x0018
51#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010 51#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010
52#define MII_VSC8244_AUXCONSTAT_100 0x0008 52#define MII_VSC8244_AUXCONSTAT_100 0x0008
53 53
54#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */ 54#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */
55#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004 55#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004
56 56
57#define PHY_ID_VSC8244 0x000fc6c0 57#define PHY_ID_VSC8244 0x000fc6c0
58#define PHY_ID_VSC8221 0x000fc550 58#define PHY_ID_VSC8221 0x000fc550
59#define PHY_ID_VSC8211 0x000fc4b0
59 60
60MODULE_DESCRIPTION("Vitesse PHY driver"); 61MODULE_DESCRIPTION("Vitesse PHY driver");
61MODULE_AUTHOR("Kriston Carson"); 62MODULE_AUTHOR("Kriston Carson");
@@ -100,9 +101,8 @@ static int vsc824x_config_init(struct phy_device *phydev)
100static int vsc824x_ack_interrupt(struct phy_device *phydev) 101static int vsc824x_ack_interrupt(struct phy_device *phydev)
101{ 102{
102 int err = 0; 103 int err = 0;
103 104
104 /* 105 /* Don't bother to ACK the interrupts if interrupts
105 * Don't bother to ACK the interrupts if interrupts
106 * are disabled. The 824x cannot clear the interrupts 106 * are disabled. The 824x cannot clear the interrupts
107 * if they are disabled. 107 * if they are disabled.
108 */ 108 */
@@ -122,8 +122,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
122 MII_VSC8244_IMASK_MASK : 122 MII_VSC8244_IMASK_MASK :
123 MII_VSC8221_IMASK_MASK); 123 MII_VSC8221_IMASK_MASK);
124 else { 124 else {
125 /* 125 /* The Vitesse PHY cannot clear the interrupt
126 * The Vitesse PHY cannot clear the interrupt
127 * once it has disabled them, so we clear them first 126 * once it has disabled them, so we clear them first
128 */ 127 */
129 err = phy_read(phydev, MII_VSC8244_ISTAT); 128 err = phy_read(phydev, MII_VSC8244_ISTAT);
@@ -146,7 +145,8 @@ static int vsc8221_config_init(struct phy_device *phydev)
146 return err; 145 return err;
147 146
148 /* Perhaps we should set EXT_CON1 based on the interface? 147 /* Perhaps we should set EXT_CON1 based on the interface?
149 Options are 802.3Z SerDes or SGMII */ 148 * Options are 802.3Z SerDes or SGMII
149 */
150} 150}
151 151
152/* Vitesse 824x */ 152/* Vitesse 824x */
@@ -176,6 +176,19 @@ static struct phy_driver vsc82xx_driver[] = {
176 .ack_interrupt = &vsc824x_ack_interrupt, 176 .ack_interrupt = &vsc824x_ack_interrupt,
177 .config_intr = &vsc82xx_config_intr, 177 .config_intr = &vsc82xx_config_intr,
178 .driver = { .owner = THIS_MODULE,}, 178 .driver = { .owner = THIS_MODULE,},
179}, {
180 /* Vitesse 8211 */
181 .phy_id = PHY_ID_VSC8211,
182 .phy_id_mask = 0x000ffff0,
183 .name = "Vitesse VSC8211",
184 .features = PHY_GBIT_FEATURES,
185 .flags = PHY_HAS_INTERRUPT,
186 .config_init = &vsc8221_config_init,
187 .config_aneg = &genphy_config_aneg,
188 .read_status = &genphy_read_status,
189 .ack_interrupt = &vsc824x_ack_interrupt,
190 .config_intr = &vsc82xx_config_intr,
191 .driver = { .owner = THIS_MODULE,},
179} }; 192} };
180 193
181static int __init vsc82xx_init(void) 194static int __init vsc82xx_init(void)
@@ -196,6 +209,7 @@ module_exit(vsc82xx_exit);
196static struct mdio_device_id __maybe_unused vitesse_tbl[] = { 209static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
197 { PHY_ID_VSC8244, 0x000fffc0 }, 210 { PHY_ID_VSC8244, 0x000fffc0 },
198 { PHY_ID_VSC8221, 0x000ffff0 }, 211 { PHY_ID_VSC8221, 0x000ffff0 },
212 { PHY_ID_VSC8211, 0x000ffff0 },
199 { } 213 { }
200}; 214};
201 215
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index bb07ba94c3aa..5f66e30d9823 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -338,7 +338,7 @@ static void pppoe_flush_dev(struct net_device *dev)
338static int pppoe_device_event(struct notifier_block *this, 338static int pppoe_device_event(struct notifier_block *this,
339 unsigned long event, void *ptr) 339 unsigned long event, void *ptr)
340{ 340{
341 struct net_device *dev = (struct net_device *)ptr; 341 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
342 342
343 /* Only look at sockets that are using this specific device. */ 343 /* Only look at sockets that are using this specific device. */
344 switch (event) { 344 switch (event) {
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index f433b594388e..6d1f6ed3113f 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -208,6 +208,17 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
208 if (nets[rnet->mport->id].active[destid]) 208 if (nets[rnet->mport->id].active[destid])
209 rionet_queue_tx_msg(skb, ndev, 209 rionet_queue_tx_msg(skb, ndev,
210 nets[rnet->mport->id].active[destid]); 210 nets[rnet->mport->id].active[destid]);
211 else {
212 /*
213 * If the target device was removed from the list of
214 * active peers but we still have TX packets targeting
215 * it just report sending a packet to the target
216 * (without actual packet transfer).
217 */
218 dev_kfree_skb_any(skb);
219 ndev->stats.tx_packets++;
220 ndev->stats.tx_bytes += skb->len;
221 }
211 } 222 }
212 223
213 spin_unlock_irqrestore(&rnet->tx_lock, flags); 224 spin_unlock_irqrestore(&rnet->tx_lock, flags);
@@ -385,24 +396,28 @@ static int rionet_close(struct net_device *ndev)
385 return 0; 396 return 0;
386} 397}
387 398
388static void rionet_remove(struct rio_dev *rdev) 399static int rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
389{ 400{
390 struct net_device *ndev = rio_get_drvdata(rdev); 401 struct rio_dev *rdev = to_rio_dev(dev);
391 unsigned char netid = rdev->net->hport->id; 402 unsigned char netid = rdev->net->hport->id;
392 struct rionet_peer *peer, *tmp; 403 struct rionet_peer *peer, *tmp;
393 404
394 unregister_netdev(ndev); 405 if (dev_rionet_capable(rdev)) {
395 406 list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) {
396 free_pages((unsigned long)nets[netid].active, get_order(sizeof(void *) * 407 if (peer->rdev == rdev) {
397 RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size))); 408 if (nets[netid].active[rdev->destid]) {
398 nets[netid].active = NULL; 409 nets[netid].active[rdev->destid] = NULL;
410 nets[netid].nact--;
411 }
399 412
400 list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) { 413 list_del(&peer->node);
401 list_del(&peer->node); 414 kfree(peer);
402 kfree(peer); 415 break;
416 }
417 }
403 } 418 }
404 419
405 free_netdev(ndev); 420 return 0;
406} 421}
407 422
408static void rionet_get_drvinfo(struct net_device *ndev, 423static void rionet_get_drvinfo(struct net_device *ndev,
@@ -503,12 +518,13 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
503 518
504static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1]; 519static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1];
505 520
506static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) 521static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
507{ 522{
508 int rc = -ENODEV; 523 int rc = -ENODEV;
509 u32 lsrc_ops, ldst_ops; 524 u32 lsrc_ops, ldst_ops;
510 struct rionet_peer *peer; 525 struct rionet_peer *peer;
511 struct net_device *ndev = NULL; 526 struct net_device *ndev = NULL;
527 struct rio_dev *rdev = to_rio_dev(dev);
512 unsigned char netid = rdev->net->hport->id; 528 unsigned char netid = rdev->net->hport->id;
513 int oldnet; 529 int oldnet;
514 530
@@ -518,8 +534,9 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
518 oldnet = test_and_set_bit(netid, net_table); 534 oldnet = test_and_set_bit(netid, net_table);
519 535
520 /* 536 /*
521 * First time through, make sure local device is rionet 537 * If first time through this net, make sure local device is rionet
522 * capable, setup netdev (will be skipped on later probes) 538 * capable and setup netdev (this step will be skipped in later probes
539 * on the same net).
523 */ 540 */
524 if (!oldnet) { 541 if (!oldnet) {
525 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 542 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
@@ -541,6 +558,12 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
541 } 558 }
542 nets[netid].ndev = ndev; 559 nets[netid].ndev = ndev;
543 rc = rionet_setup_netdev(rdev->net->hport, ndev); 560 rc = rionet_setup_netdev(rdev->net->hport, ndev);
561 if (rc) {
562 printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
563 DRV_NAME, rc);
564 goto out;
565 }
566
544 INIT_LIST_HEAD(&nets[netid].peers); 567 INIT_LIST_HEAD(&nets[netid].peers);
545 nets[netid].nact = 0; 568 nets[netid].nact = 0;
546 } else if (nets[netid].ndev == NULL) 569 } else if (nets[netid].ndev == NULL)
@@ -559,31 +582,61 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
559 list_add_tail(&peer->node, &nets[netid].peers); 582 list_add_tail(&peer->node, &nets[netid].peers);
560 } 583 }
561 584
562 rio_set_drvdata(rdev, nets[netid].ndev); 585 return 0;
563 586out:
564 out:
565 return rc; 587 return rc;
566} 588}
567 589
590#ifdef MODULE
568static struct rio_device_id rionet_id_table[] = { 591static struct rio_device_id rionet_id_table[] = {
569 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)} 592 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)},
593 { 0, } /* terminate list */
570}; 594};
571 595
572static struct rio_driver rionet_driver = { 596MODULE_DEVICE_TABLE(rapidio, rionet_id_table);
573 .name = "rionet", 597#endif
574 .id_table = rionet_id_table, 598
575 .probe = rionet_probe, 599static struct subsys_interface rionet_interface = {
576 .remove = rionet_remove, 600 .name = "rionet",
601 .subsys = &rio_bus_type,
602 .add_dev = rionet_add_dev,
603 .remove_dev = rionet_remove_dev,
577}; 604};
578 605
579static int __init rionet_init(void) 606static int __init rionet_init(void)
580{ 607{
581 return rio_register_driver(&rionet_driver); 608 return subsys_interface_register(&rionet_interface);
582} 609}
583 610
584static void __exit rionet_exit(void) 611static void __exit rionet_exit(void)
585{ 612{
586 rio_unregister_driver(&rionet_driver); 613 struct rionet_private *rnet;
614 struct net_device *ndev;
615 struct rionet_peer *peer, *tmp;
616 int i;
617
618 for (i = 0; i < RIONET_MAX_NETS; i++) {
619 if (nets[i].ndev != NULL) {
620 ndev = nets[i].ndev;
621 rnet = netdev_priv(ndev);
622 unregister_netdev(ndev);
623
624 list_for_each_entry_safe(peer,
625 tmp, &nets[i].peers, node) {
626 list_del(&peer->node);
627 kfree(peer);
628 }
629
630 free_pages((unsigned long)nets[i].active,
631 get_order(sizeof(void *) *
632 RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size)));
633 nets[i].active = NULL;
634
635 free_netdev(ndev);
636 }
637 }
638
639 subsys_interface_unregister(&rionet_interface);
587} 640}
588 641
589late_initcall(rionet_init); 642late_initcall(rionet_init);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b3051052f3ad..bff7e0b0b4e7 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -525,31 +525,26 @@ static void team_set_no_mode(struct team *team)
525 team->mode = &__team_no_mode; 525 team->mode = &__team_no_mode;
526} 526}
527 527
528static void __team_adjust_ops(struct team *team, int en_port_count) 528static void team_adjust_ops(struct team *team)
529{ 529{
530 /* 530 /*
531 * To avoid checks in rx/tx skb paths, ensure here that non-null and 531 * To avoid checks in rx/tx skb paths, ensure here that non-null and
532 * correct ops are always set. 532 * correct ops are always set.
533 */ 533 */
534 534
535 if (!en_port_count || !team_is_mode_set(team) || 535 if (!team->en_port_count || !team_is_mode_set(team) ||
536 !team->mode->ops->transmit) 536 !team->mode->ops->transmit)
537 team->ops.transmit = team_dummy_transmit; 537 team->ops.transmit = team_dummy_transmit;
538 else 538 else
539 team->ops.transmit = team->mode->ops->transmit; 539 team->ops.transmit = team->mode->ops->transmit;
540 540
541 if (!en_port_count || !team_is_mode_set(team) || 541 if (!team->en_port_count || !team_is_mode_set(team) ||
542 !team->mode->ops->receive) 542 !team->mode->ops->receive)
543 team->ops.receive = team_dummy_receive; 543 team->ops.receive = team_dummy_receive;
544 else 544 else
545 team->ops.receive = team->mode->ops->receive; 545 team->ops.receive = team->mode->ops->receive;
546} 546}
547 547
548static void team_adjust_ops(struct team *team)
549{
550 __team_adjust_ops(team, team->en_port_count);
551}
552
553/* 548/*
554 * We can benefit from the fact that it's ensured no port is present 549 * We can benefit from the fact that it's ensured no port is present
555 * at the time of mode change. Therefore no packets are in fly so there's no 550 * at the time of mode change. Therefore no packets are in fly so there's no
@@ -725,9 +720,9 @@ static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
725static void __team_queue_override_port_del(struct team *team, 720static void __team_queue_override_port_del(struct team *team,
726 struct team_port *port) 721 struct team_port *port)
727{ 722{
723 if (!port->queue_id)
724 return;
728 list_del_rcu(&port->qom_list); 725 list_del_rcu(&port->qom_list);
729 synchronize_rcu();
730 INIT_LIST_HEAD(&port->qom_list);
731} 726}
732 727
733static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, 728static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
@@ -749,9 +744,8 @@ static void __team_queue_override_port_add(struct team *team,
749 struct list_head *qom_list; 744 struct list_head *qom_list;
750 struct list_head *node; 745 struct list_head *node;
751 746
752 if (!port->queue_id || !team_port_enabled(port)) 747 if (!port->queue_id)
753 return; 748 return;
754
755 qom_list = __team_get_qom_list(team, port->queue_id); 749 qom_list = __team_get_qom_list(team, port->queue_id);
756 node = qom_list; 750 node = qom_list;
757 list_for_each_entry(cur, qom_list, qom_list) { 751 list_for_each_entry(cur, qom_list, qom_list) {
@@ -768,7 +762,7 @@ static void __team_queue_override_enabled_check(struct team *team)
768 bool enabled = false; 762 bool enabled = false;
769 763
770 list_for_each_entry(port, &team->port_list, list) { 764 list_for_each_entry(port, &team->port_list, list) {
771 if (!list_empty(&port->qom_list)) { 765 if (port->queue_id) {
772 enabled = true; 766 enabled = true;
773 break; 767 break;
774 } 768 }
@@ -780,14 +774,44 @@ static void __team_queue_override_enabled_check(struct team *team)
780 team->queue_override_enabled = enabled; 774 team->queue_override_enabled = enabled;
781} 775}
782 776
783static void team_queue_override_port_refresh(struct team *team, 777static void team_queue_override_port_prio_changed(struct team *team,
784 struct team_port *port) 778 struct team_port *port)
785{ 779{
780 if (!port->queue_id || team_port_enabled(port))
781 return;
786 __team_queue_override_port_del(team, port); 782 __team_queue_override_port_del(team, port);
787 __team_queue_override_port_add(team, port); 783 __team_queue_override_port_add(team, port);
788 __team_queue_override_enabled_check(team); 784 __team_queue_override_enabled_check(team);
789} 785}
790 786
787static void team_queue_override_port_change_queue_id(struct team *team,
788 struct team_port *port,
789 u16 new_queue_id)
790{
791 if (team_port_enabled(port)) {
792 __team_queue_override_port_del(team, port);
793 port->queue_id = new_queue_id;
794 __team_queue_override_port_add(team, port);
795 __team_queue_override_enabled_check(team);
796 } else {
797 port->queue_id = new_queue_id;
798 }
799}
800
801static void team_queue_override_port_add(struct team *team,
802 struct team_port *port)
803{
804 __team_queue_override_port_add(team, port);
805 __team_queue_override_enabled_check(team);
806}
807
808static void team_queue_override_port_del(struct team *team,
809 struct team_port *port)
810{
811 __team_queue_override_port_del(team, port);
812 __team_queue_override_enabled_check(team);
813}
814
791 815
792/**************** 816/****************
793 * Port handling 817 * Port handling
@@ -819,7 +843,7 @@ static void team_port_enable(struct team *team,
819 hlist_add_head_rcu(&port->hlist, 843 hlist_add_head_rcu(&port->hlist,
820 team_port_index_hash(team, port->index)); 844 team_port_index_hash(team, port->index));
821 team_adjust_ops(team); 845 team_adjust_ops(team);
822 team_queue_override_port_refresh(team, port); 846 team_queue_override_port_add(team, port);
823 if (team->ops.port_enabled) 847 if (team->ops.port_enabled)
824 team->ops.port_enabled(team, port); 848 team->ops.port_enabled(team, port);
825} 849}
@@ -848,14 +872,9 @@ static void team_port_disable(struct team *team,
848 hlist_del_rcu(&port->hlist); 872 hlist_del_rcu(&port->hlist);
849 __reconstruct_port_hlist(team, port->index); 873 __reconstruct_port_hlist(team, port->index);
850 port->index = -1; 874 port->index = -1;
851 team_queue_override_port_refresh(team, port);
852 __team_adjust_ops(team, team->en_port_count - 1);
853 /*
854 * Wait until readers see adjusted ops. This ensures that
855 * readers never see team->en_port_count == 0
856 */
857 synchronize_rcu();
858 team->en_port_count--; 875 team->en_port_count--;
876 team_queue_override_port_del(team, port);
877 team_adjust_ops(team);
859} 878}
860 879
861#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 880#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -1163,8 +1182,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1163 1182
1164 team_port_set_orig_dev_addr(port); 1183 team_port_set_orig_dev_addr(port);
1165 dev_set_mtu(port_dev, port->orig.mtu); 1184 dev_set_mtu(port_dev, port->orig.mtu);
1166 synchronize_rcu(); 1185 kfree_rcu(port, rcu);
1167 kfree(port);
1168 netdev_info(dev, "Port device %s removed\n", portname); 1186 netdev_info(dev, "Port device %s removed\n", portname);
1169 __team_compute_features(team); 1187 __team_compute_features(team);
1170 1188
@@ -1259,9 +1277,12 @@ static int team_priority_option_set(struct team *team,
1259 struct team_gsetter_ctx *ctx) 1277 struct team_gsetter_ctx *ctx)
1260{ 1278{
1261 struct team_port *port = ctx->info->port; 1279 struct team_port *port = ctx->info->port;
1280 s32 priority = ctx->data.s32_val;
1262 1281
1263 port->priority = ctx->data.s32_val; 1282 if (port->priority == priority)
1264 team_queue_override_port_refresh(team, port); 1283 return 0;
1284 port->priority = priority;
1285 team_queue_override_port_prio_changed(team, port);
1265 return 0; 1286 return 0;
1266} 1287}
1267 1288
@@ -1278,17 +1299,16 @@ static int team_queue_id_option_set(struct team *team,
1278 struct team_gsetter_ctx *ctx) 1299 struct team_gsetter_ctx *ctx)
1279{ 1300{
1280 struct team_port *port = ctx->info->port; 1301 struct team_port *port = ctx->info->port;
1302 u16 new_queue_id = ctx->data.u32_val;
1281 1303
1282 if (port->queue_id == ctx->data.u32_val) 1304 if (port->queue_id == new_queue_id)
1283 return 0; 1305 return 0;
1284 if (ctx->data.u32_val >= team->dev->real_num_tx_queues) 1306 if (new_queue_id >= team->dev->real_num_tx_queues)
1285 return -EINVAL; 1307 return -EINVAL;
1286 port->queue_id = ctx->data.u32_val; 1308 team_queue_override_port_change_queue_id(team, port, new_queue_id);
1287 team_queue_override_port_refresh(team, port);
1288 return 0; 1309 return 0;
1289} 1310}
1290 1311
1291
1292static const struct team_option team_options[] = { 1312static const struct team_option team_options[] = {
1293 { 1313 {
1294 .name = "mode", 1314 .name = "mode",
@@ -2648,7 +2668,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
2648static int team_device_event(struct notifier_block *unused, 2668static int team_device_event(struct notifier_block *unused,
2649 unsigned long event, void *ptr) 2669 unsigned long event, void *ptr)
2650{ 2670{
2651 struct net_device *dev = (struct net_device *) ptr; 2671 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2652 struct team_port *port; 2672 struct team_port *port;
2653 2673
2654 port = team_port_get_rtnl(dev); 2674 port = team_port_get_rtnl(dev);
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index cdc31b5ea15e..829a9cd2b4da 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -112,9 +112,8 @@ static struct team_port *lb_hash_select_tx_port(struct team *team,
112 struct sk_buff *skb, 112 struct sk_buff *skb,
113 unsigned char hash) 113 unsigned char hash)
114{ 114{
115 int port_index; 115 int port_index = team_num_to_port_index(team, hash);
116 116
117 port_index = hash % team->en_port_count;
118 return team_get_port_by_index_rcu(team, port_index); 117 return team_get_port_by_index_rcu(team, port_index);
119} 118}
120 119
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 472623f8ce3d..53665850b59e 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -30,7 +30,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
30 struct team_port *port; 30 struct team_port *port;
31 int port_index; 31 int port_index;
32 32
33 port_index = rr_priv(team)->sent_packets++ % team->en_port_count; 33 port_index = team_num_to_port_index(team,
34 rr_priv(team)->sent_packets++);
34 port = team_get_port_by_index_rcu(team, port_index); 35 port = team_get_port_by_index_rcu(team, port_index);
35 if (unlikely(!port)) 36 if (unlikely(!port))
36 goto drop; 37 goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9c61f8734a40..7eab5fcd064f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -841,7 +841,7 @@ static const struct net_device_ops tap_netdev_ops = {
841#endif 841#endif
842}; 842};
843 843
844static int tun_flow_init(struct tun_struct *tun) 844static void tun_flow_init(struct tun_struct *tun)
845{ 845{
846 int i; 846 int i;
847 847
@@ -852,8 +852,6 @@ static int tun_flow_init(struct tun_struct *tun)
852 setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun); 852 setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
853 mod_timer(&tun->flow_gc_timer, 853 mod_timer(&tun->flow_gc_timer,
854 round_jiffies_up(jiffies + tun->ageing_time)); 854 round_jiffies_up(jiffies + tun->ageing_time));
855
856 return 0;
857} 855}
858 856
859static void tun_flow_uninit(struct tun_struct *tun) 857static void tun_flow_uninit(struct tun_struct *tun)
@@ -1532,6 +1530,9 @@ static int tun_flags(struct tun_struct *tun)
1532 if (tun->flags & TUN_TAP_MQ) 1530 if (tun->flags & TUN_TAP_MQ)
1533 flags |= IFF_MULTI_QUEUE; 1531 flags |= IFF_MULTI_QUEUE;
1534 1532
1533 if (tun->flags & TUN_PERSIST)
1534 flags |= IFF_PERSIST;
1535
1535 return flags; 1536 return flags;
1536} 1537}
1537 1538
@@ -1661,10 +1662,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1661 goto err_free_dev; 1662 goto err_free_dev;
1662 1663
1663 tun_net_init(dev); 1664 tun_net_init(dev);
1664 1665 tun_flow_init(tun);
1665 err = tun_flow_init(tun);
1666 if (err < 0)
1667 goto err_free_dev;
1668 1666
1669 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 1667 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1670 TUN_USER_FEATURES; 1668 TUN_USER_FEATURES;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 287cc624b90b..d84bfd4109a4 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -67,7 +67,6 @@ config USB_KAWETH
67 67
68config USB_PEGASUS 68config USB_PEGASUS
69 tristate "USB Pegasus/Pegasus-II based ethernet device support" 69 tristate "USB Pegasus/Pegasus-II based ethernet device support"
70 select NET_CORE
71 select MII 70 select MII
72 ---help--- 71 ---help---
73 Say Y here if you know you have Pegasus or Pegasus-II based adapter. 72 Say Y here if you know you have Pegasus or Pegasus-II based adapter.
@@ -83,7 +82,6 @@ config USB_PEGASUS
83 82
84config USB_RTL8150 83config USB_RTL8150
85 tristate "USB RTL8150 based ethernet device support" 84 tristate "USB RTL8150 based ethernet device support"
86 select NET_CORE
87 select MII 85 select MII
88 help 86 help
89 Say Y here if you have RTL8150 based usb-ethernet adapter. 87 Say Y here if you have RTL8150 based usb-ethernet adapter.
@@ -95,7 +93,6 @@ config USB_RTL8150
95 93
96config USB_RTL8152 94config USB_RTL8152
97 tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" 95 tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
98 select NET_CORE
99 select MII 96 select MII
100 help 97 help
101 This option adds support for Realtek RTL8152 based USB 2.0 98 This option adds support for Realtek RTL8152 based USB 2.0
@@ -106,7 +103,6 @@ config USB_RTL8152
106 103
107config USB_USBNET 104config USB_USBNET
108 tristate "Multi-purpose USB Networking Framework" 105 tristate "Multi-purpose USB Networking Framework"
109 select NET_CORE
110 select MII 106 select MII
111 ---help--- 107 ---help---
112 This driver supports several kinds of network links over USB, 108 This driver supports several kinds of network links over USB,
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index bd8758fa38c1..1e3c302d94fe 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1371,7 +1371,7 @@ static int ax88179_stop(struct usbnet *dev)
1371} 1371}
1372 1372
1373static const struct driver_info ax88179_info = { 1373static const struct driver_info ax88179_info = {
1374 .description = "ASIX AX88179 USB 3.0 Gigibit Ethernet", 1374 .description = "ASIX AX88179 USB 3.0 Gigabit Ethernet",
1375 .bind = ax88179_bind, 1375 .bind = ax88179_bind,
1376 .unbind = ax88179_unbind, 1376 .unbind = ax88179_unbind,
1377 .status = ax88179_status, 1377 .status = ax88179_status,
@@ -1384,7 +1384,7 @@ static const struct driver_info ax88179_info = {
1384}; 1384};
1385 1385
1386static const struct driver_info ax88178a_info = { 1386static const struct driver_info ax88178a_info = {
1387 .description = "ASIX AX88178A USB 2.0 Gigibit Ethernet", 1387 .description = "ASIX AX88178A USB 2.0 Gigabit Ethernet",
1388 .bind = ax88179_bind, 1388 .bind = ax88179_bind,
1389 .unbind = ax88179_unbind, 1389 .unbind = ax88179_unbind,
1390 .status = ax88179_status, 1390 .status = ax88179_status,
@@ -1433,6 +1433,7 @@ static struct usb_driver ax88179_178a_driver = {
1433 .probe = usbnet_probe, 1433 .probe = usbnet_probe,
1434 .suspend = ax88179_suspend, 1434 .suspend = ax88179_suspend,
1435 .resume = ax88179_resume, 1435 .resume = ax88179_resume,
1436 .reset_resume = ax88179_resume,
1436 .disconnect = usbnet_disconnect, 1437 .disconnect = usbnet_disconnect,
1437 .supports_autosuspend = 1, 1438 .supports_autosuspend = 1,
1438 .disable_hub_initiated_lpm = 1, 1439 .disable_hub_initiated_lpm = 1,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 04ee044dde51..4393f1483126 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -215,6 +215,10 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
215 goto bad_desc; 215 goto bad_desc;
216 } 216 }
217 217
218 /* some devices merge these - skip class check */
219 if (info->control == info->data)
220 goto next_desc;
221
218 /* a data interface altsetting does the real i/o */ 222 /* a data interface altsetting does the real i/o */
219 d = &info->data->cur_altsetting->desc; 223 d = &info->data->cur_altsetting->desc;
220 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { 224 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) {
@@ -304,19 +308,23 @@ next_desc:
304 /* claim data interface and set it up ... with side effects. 308 /* claim data interface and set it up ... with side effects.
305 * network traffic can't flow until an altsetting is enabled. 309 * network traffic can't flow until an altsetting is enabled.
306 */ 310 */
307 status = usb_driver_claim_interface(driver, info->data, dev); 311 if (info->data != info->control) {
308 if (status < 0) 312 status = usb_driver_claim_interface(driver, info->data, dev);
309 return status; 313 if (status < 0)
314 return status;
315 }
310 status = usbnet_get_endpoints(dev, info->data); 316 status = usbnet_get_endpoints(dev, info->data);
311 if (status < 0) { 317 if (status < 0) {
312 /* ensure immediate exit from usbnet_disconnect */ 318 /* ensure immediate exit from usbnet_disconnect */
313 usb_set_intfdata(info->data, NULL); 319 usb_set_intfdata(info->data, NULL);
314 usb_driver_release_interface(driver, info->data); 320 if (info->data != info->control)
321 usb_driver_release_interface(driver, info->data);
315 return status; 322 return status;
316 } 323 }
317 324
318 /* status endpoint: optional for CDC Ethernet, not RNDIS (or ACM) */ 325 /* status endpoint: optional for CDC Ethernet, not RNDIS (or ACM) */
319 dev->status = NULL; 326 if (info->data != info->control)
327 dev->status = NULL;
320 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { 328 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) {
321 struct usb_endpoint_descriptor *desc; 329 struct usb_endpoint_descriptor *desc;
322 330
@@ -349,6 +357,10 @@ void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
349 struct cdc_state *info = (void *) &dev->data; 357 struct cdc_state *info = (void *) &dev->data;
350 struct usb_driver *driver = driver_of(intf); 358 struct usb_driver *driver = driver_of(intf);
351 359
360 /* combined interface - nothing to do */
361 if (info->data == info->control)
362 return;
363
352 /* disconnect master --> disconnect slave */ 364 /* disconnect master --> disconnect slave */
353 if (intf == info->control && info->data) { 365 if (intf == info->control && info->data) {
354 /* ensure immediate exit from usbnet_disconnect */ 366 /* ensure immediate exit from usbnet_disconnect */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 534d8becbbdc..ff8594d8dd2d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -60,6 +60,7 @@
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297 61#define USB_PRODUCT_IPHONE_4 0x1297
62#define USB_PRODUCT_IPAD 0x129a 62#define USB_PRODUCT_IPAD 0x129a
63#define USB_PRODUCT_IPAD_MINI 0x12ab
63#define USB_PRODUCT_IPHONE_4_VZW 0x129c 64#define USB_PRODUCT_IPHONE_4_VZW 0x129c
64#define USB_PRODUCT_IPHONE_4S 0x12a0 65#define USB_PRODUCT_IPHONE_4S 0x12a0
65#define USB_PRODUCT_IPHONE_5 0x12a8 66#define USB_PRODUCT_IPHONE_5 0x12a8
@@ -107,6 +108,10 @@ static struct usb_device_id ipheth_table[] = {
107 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 108 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
108 IPHETH_USBINTF_PROTO) }, 109 IPHETH_USBINTF_PROTO) },
109 { USB_DEVICE_AND_INTERFACE_INFO( 110 { USB_DEVICE_AND_INTERFACE_INFO(
111 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI,
112 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
113 IPHETH_USBINTF_PROTO) },
114 { USB_DEVICE_AND_INTERFACE_INFO(
110 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, 115 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
111 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 116 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
112 IPHETH_USBINTF_PROTO) }, 117 IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 0192073e53a3..6866eae3e388 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -221,12 +221,9 @@ done:
221 memset(skb_put(skb, padlen), 0, padlen); 221 memset(skb_put(skb, padlen), 0, padlen);
222 } 222 }
223 223
224 netdev_dbg( 224 netdev_dbg(dev->net,
225 dev->net, 225 "Sending package with length %i and padding %i. Header: %6phC.",
226 "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.", 226 content_len, padlen, header_start);
227 content_len, padlen, header_start[0], header_start[1],
228 header_start[2], header_start[3], header_start[4],
229 header_start[5]);
230 227
231 return skb; 228 return skb;
232} 229}
@@ -263,32 +260,23 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
263 sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp( 260 sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
264 header_start, EXPECTED_UNKNOWN_HEADER_2, 261 header_start, EXPECTED_UNKNOWN_HEADER_2,
265 sizeof(EXPECTED_UNKNOWN_HEADER_2))) { 262 sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
266 netdev_dbg( 263 netdev_dbg(dev->net,
267 dev->net, 264 "Received expected unknown frame header: %6phC. Package length: %i\n",
268 "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n", 265 header_start,
269 header_start[0], header_start[1],
270 header_start[2], header_start[3],
271 header_start[4], header_start[5],
272 skb->len - KALMIA_HEADER_LENGTH); 266 skb->len - KALMIA_HEADER_LENGTH);
273 } 267 }
274 else { 268 else {
275 netdev_err( 269 netdev_err(dev->net,
276 dev->net, 270 "Received unknown frame header: %6phC. Package length: %i\n",
277 "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n", 271 header_start,
278 header_start[0], header_start[1],
279 header_start[2], header_start[3],
280 header_start[4], header_start[5],
281 skb->len - KALMIA_HEADER_LENGTH); 272 skb->len - KALMIA_HEADER_LENGTH);
282 return 0; 273 return 0;
283 } 274 }
284 } 275 }
285 else 276 else
286 netdev_dbg( 277 netdev_dbg(dev->net,
287 dev->net, 278 "Received header: %6phC. Package length: %i\n",
288 "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n", 279 header_start, skb->len - KALMIA_HEADER_LENGTH);
289 header_start[0], header_start[1], header_start[2],
290 header_start[3], header_start[4], header_start[5],
291 skb->len - KALMIA_HEADER_LENGTH);
292 280
293 /* subtract start header and end header */ 281 /* subtract start header and end header */
294 usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH); 282 usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
@@ -310,12 +298,9 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
310 sizeof(HEADER_END_OF_USB_PACKET)) == 0); 298 sizeof(HEADER_END_OF_USB_PACKET)) == 0);
311 if (!is_last) { 299 if (!is_last) {
312 header_start = skb->data + ether_packet_length; 300 header_start = skb->data + ether_packet_length;
313 netdev_dbg( 301 netdev_dbg(dev->net,
314 dev->net, 302 "End header: %6phC. Package length: %i\n",
315 "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n", 303 header_start,
316 header_start[0], header_start[1],
317 header_start[2], header_start[3],
318 header_start[4], header_start[5],
319 skb->len - KALMIA_HEADER_LENGTH); 304 skb->len - KALMIA_HEADER_LENGTH);
320 } 305 }
321 } 306 }
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 56459215a22b..606eba2872bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -523,6 +523,7 @@ static const struct usb_device_id products[] = {
523 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 523 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
524 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 524 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
525 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, 525 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
526 {QMI_FIXED_INTF(0x19d2, 0x0019, 3)}, /* ONDA MT689DC */
526 {QMI_FIXED_INTF(0x19d2, 0x0021, 4)}, 527 {QMI_FIXED_INTF(0x19d2, 0x0021, 4)},
527 {QMI_FIXED_INTF(0x19d2, 0x0025, 1)}, 528 {QMI_FIXED_INTF(0x19d2, 0x0025, 1)},
528 {QMI_FIXED_INTF(0x19d2, 0x0031, 4)}, 529 {QMI_FIXED_INTF(0x19d2, 0x0031, 4)},
@@ -582,6 +583,7 @@ static const struct usb_device_id products[] = {
582 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 583 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
583 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 584 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
584 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 585 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
586 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
585 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 587 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
586 {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ 588 {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */
587 589
@@ -618,6 +620,7 @@ static const struct usb_device_id products[] = {
618 {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ 620 {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
619 {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ 621 {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
620 {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ 622 {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
623 {QMI_GOBI_DEVICE(0x0af0, 0x8120)}, /* Option GTM681W */
621 {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ 624 {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
622 {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */ 625 {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
623 {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 626 {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
@@ -631,7 +634,6 @@ static const struct usb_device_id products[] = {
631 {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 634 {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
632 {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 635 {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
633 {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ 636 {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
634 {QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */
635 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ 637 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
636 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ 638 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
637 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ 639 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 14e519888631..d02bac82fc57 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -11,7 +11,6 @@
11#include <linux/signal.h> 11#include <linux/signal.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/version.h>
15#include <linux/netdevice.h> 14#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
17#include <linux/mii.h> 16#include <linux/mii.h>
@@ -1749,18 +1748,7 @@ static struct usb_driver rtl8152_driver = {
1749 .resume = rtl8152_resume 1748 .resume = rtl8152_resume
1750}; 1749};
1751 1750
1752static int __init usb_rtl8152_init(void) 1751module_usb_driver(rtl8152_driver);
1753{
1754 return usb_register(&rtl8152_driver);
1755}
1756
1757static void __exit usb_rtl8152_exit(void)
1758{
1759 usb_deregister(&rtl8152_driver);
1760}
1761
1762module_init(usb_rtl8152_init);
1763module_exit(usb_rtl8152_exit);
1764 1752
1765MODULE_AUTHOR(DRIVER_AUTHOR); 1753MODULE_AUTHOR(DRIVER_AUTHOR);
1766MODULE_DESCRIPTION(DRIVER_DESC); 1754MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 177f911f5946..da866523cf20 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -379,12 +379,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
379 else 379 else
380 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); 380 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
381 381
382 if (strchr(dev->name, '%')) {
383 err = dev_alloc_name(dev, dev->name);
384 if (err < 0)
385 goto err_alloc_name;
386 }
387
388 err = register_netdevice(dev); 382 err = register_netdevice(dev);
389 if (err < 0) 383 if (err < 0)
390 goto err_register_dev; 384 goto err_register_dev;
@@ -404,7 +398,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
404 398
405err_register_dev: 399err_register_dev:
406 /* nothing to do */ 400 /* nothing to do */
407err_alloc_name:
408err_configure_peer: 401err_configure_peer:
409 unregister_netdevice(peer); 402 unregister_netdevice(peer);
410 return err; 403 return err;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c9e00387d999..3d2a90a62649 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -602,7 +602,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
602 container_of(napi, struct receive_queue, napi); 602 container_of(napi, struct receive_queue, napi);
603 struct virtnet_info *vi = rq->vq->vdev->priv; 603 struct virtnet_info *vi = rq->vq->vdev->priv;
604 void *buf; 604 void *buf;
605 unsigned int len, received = 0; 605 unsigned int r, len, received = 0;
606 606
607again: 607again:
608 while (received < budget && 608 while (received < budget &&
@@ -619,8 +619,9 @@ again:
619 619
620 /* Out of packets? */ 620 /* Out of packets? */
621 if (received < budget) { 621 if (received < budget) {
622 r = virtqueue_enable_cb_prepare(rq->vq);
622 napi_complete(napi); 623 napi_complete(napi);
623 if (unlikely(!virtqueue_enable_cb(rq->vq)) && 624 if (unlikely(virtqueue_poll(rq->vq, r)) &&
624 napi_schedule_prep(napi)) { 625 napi_schedule_prep(napi)) {
625 virtqueue_disable_cb(rq->vq); 626 virtqueue_disable_cb(rq->vq);
626 __napi_schedule(napi); 627 __napi_schedule(napi);
@@ -901,7 +902,6 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
901 struct scatterlist sg; 902 struct scatterlist sg;
902 struct virtio_net_ctrl_mq s; 903 struct virtio_net_ctrl_mq s;
903 struct net_device *dev = vi->dev; 904 struct net_device *dev = vi->dev;
904 int i;
905 905
906 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 906 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
907 return 0; 907 return 0;
@@ -915,10 +915,8 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
915 queue_pairs); 915 queue_pairs);
916 return -EINVAL; 916 return -EINVAL;
917 } else { 917 } else {
918 for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
919 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
920 schedule_delayed_work(&vi->refill, 0);
921 vi->curr_queue_pairs = queue_pairs; 918 vi->curr_queue_pairs = queue_pairs;
919 schedule_delayed_work(&vi->refill, 0);
922 } 920 }
923 921
924 return 0; 922 return 0;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 57325f356d4f..227b54a1f88a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -44,6 +44,8 @@
44 44
45#define VXLAN_VERSION "0.1" 45#define VXLAN_VERSION "0.1"
46 46
47#define PORT_HASH_BITS 8
48#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
47#define VNI_HASH_BITS 10 49#define VNI_HASH_BITS 10
48#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 50#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
49#define FDB_HASH_BITS 8 51#define FDB_HASH_BITS 8
@@ -66,30 +68,44 @@ struct vxlanhdr {
66 68
67/* UDP port for VXLAN traffic. 69/* UDP port for VXLAN traffic.
68 * The IANA assigned port is 4789, but the Linux default is 8472 70 * The IANA assigned port is 4789, but the Linux default is 8472
69 * for compatability with early adopters. 71 * for compatibility with early adopters.
70 */ 72 */
71static unsigned int vxlan_port __read_mostly = 8472; 73static unsigned short vxlan_port __read_mostly = 8472;
72module_param_named(udp_port, vxlan_port, uint, 0444); 74module_param_named(udp_port, vxlan_port, ushort, 0444);
73MODULE_PARM_DESC(udp_port, "Destination UDP port"); 75MODULE_PARM_DESC(udp_port, "Destination UDP port");
74 76
75static bool log_ecn_error = true; 77static bool log_ecn_error = true;
76module_param(log_ecn_error, bool, 0644); 78module_param(log_ecn_error, bool, 0644);
77MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 79MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
78 80
79/* per-net private data for this module */ 81static int vxlan_net_id;
80static unsigned int vxlan_net_id; 82
81struct vxlan_net { 83static const u8 all_zeros_mac[ETH_ALEN];
82 struct socket *sock; /* UDP encap socket */ 84
85/* per UDP socket information */
86struct vxlan_sock {
87 struct hlist_node hlist;
88 struct rcu_head rcu;
89 struct work_struct del_work;
90 atomic_t refcnt;
91 struct socket *sock;
83 struct hlist_head vni_list[VNI_HASH_SIZE]; 92 struct hlist_head vni_list[VNI_HASH_SIZE];
84}; 93};
85 94
95/* per-network namespace private data for this module */
96struct vxlan_net {
97 struct list_head vxlan_list;
98 struct hlist_head sock_list[PORT_HASH_SIZE];
99 spinlock_t sock_lock;
100};
101
86struct vxlan_rdst { 102struct vxlan_rdst {
87 struct rcu_head rcu;
88 __be32 remote_ip; 103 __be32 remote_ip;
89 __be16 remote_port; 104 __be16 remote_port;
90 u32 remote_vni; 105 u32 remote_vni;
91 u32 remote_ifindex; 106 u32 remote_ifindex;
92 struct vxlan_rdst *remote_next; 107 struct list_head list;
108 struct rcu_head rcu;
93}; 109};
94 110
95/* Forwarding table entry */ 111/* Forwarding table entry */
@@ -98,7 +114,7 @@ struct vxlan_fdb {
98 struct rcu_head rcu; 114 struct rcu_head rcu;
99 unsigned long updated; /* jiffies */ 115 unsigned long updated; /* jiffies */
100 unsigned long used; 116 unsigned long used;
101 struct vxlan_rdst remote; 117 struct list_head remotes;
102 u16 state; /* see ndm_state */ 118 u16 state; /* see ndm_state */
103 u8 flags; /* see ndm_flags */ 119 u8 flags; /* see ndm_flags */
104 u8 eth_addr[ETH_ALEN]; 120 u8 eth_addr[ETH_ALEN];
@@ -106,7 +122,9 @@ struct vxlan_fdb {
106 122
107/* Pseudo network device */ 123/* Pseudo network device */
108struct vxlan_dev { 124struct vxlan_dev {
109 struct hlist_node hlist; 125 struct hlist_node hlist; /* vni hash table */
126 struct list_head next; /* vxlan's per namespace list */
127 struct vxlan_sock *vn_sock; /* listening socket */
110 struct net_device *dev; 128 struct net_device *dev;
111 struct vxlan_rdst default_dst; /* default destination */ 129 struct vxlan_rdst default_dst; /* default destination */
112 __be32 saddr; /* source address */ 130 __be32 saddr; /* source address */
@@ -117,6 +135,9 @@ struct vxlan_dev {
117 __u8 ttl; 135 __u8 ttl;
118 u32 flags; /* VXLAN_F_* below */ 136 u32 flags; /* VXLAN_F_* below */
119 137
138 struct work_struct sock_work;
139 struct work_struct igmp_work;
140
120 unsigned long age_interval; 141 unsigned long age_interval;
121 struct timer_list age_timer; 142 struct timer_list age_timer;
122 spinlock_t hash_lock; 143 spinlock_t hash_lock;
@@ -134,20 +155,55 @@ struct vxlan_dev {
134 155
135/* salt for hash table */ 156/* salt for hash table */
136static u32 vxlan_salt __read_mostly; 157static u32 vxlan_salt __read_mostly;
158static struct workqueue_struct *vxlan_wq;
159
160static void vxlan_sock_work(struct work_struct *work);
161
162/* Virtual Network hash table head */
163static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
164{
165 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
166}
137 167
138static inline struct hlist_head *vni_head(struct net *net, u32 id) 168/* Socket hash table head */
169static inline struct hlist_head *vs_head(struct net *net, __be16 port)
139{ 170{
140 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 171 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
141 172
142 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)]; 173 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
174}
175
176/* First remote destination for a forwarding entry.
177 * Guaranteed to be non-NULL because remotes are never deleted.
178 */
179static inline struct vxlan_rdst *first_remote(struct vxlan_fdb *fdb)
180{
181 return list_first_or_null_rcu(&fdb->remotes, struct vxlan_rdst, list);
182}
183
184/* Find VXLAN socket based on network namespace and UDP port */
185static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
186{
187 struct vxlan_sock *vs;
188
189 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
190 if (inet_sk(vs->sock->sk)->inet_sport == port)
191 return vs;
192 }
193 return NULL;
143} 194}
144 195
145/* Look up VNI in a per net namespace table */ 196/* Look up VNI in a per net namespace table */
146static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id) 197static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
147{ 198{
199 struct vxlan_sock *vs;
148 struct vxlan_dev *vxlan; 200 struct vxlan_dev *vxlan;
149 201
150 hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) { 202 vs = vxlan_find_port(net, port);
203 if (!vs)
204 return NULL;
205
206 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
151 if (vxlan->default_dst.remote_vni == id) 207 if (vxlan->default_dst.remote_vni == id)
152 return vxlan; 208 return vxlan;
153 } 209 }
@@ -157,9 +213,9 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
157 213
158/* Fill in neighbour message in skbuff. */ 214/* Fill in neighbour message in skbuff. */
159static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 215static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
160 const struct vxlan_fdb *fdb, 216 const struct vxlan_fdb *fdb,
161 u32 portid, u32 seq, int type, unsigned int flags, 217 u32 portid, u32 seq, int type, unsigned int flags,
162 const struct vxlan_rdst *rdst) 218 const struct vxlan_rdst *rdst)
163{ 219{
164 unsigned long now = jiffies; 220 unsigned long now = jiffies;
165 struct nda_cacheinfo ci; 221 struct nda_cacheinfo ci;
@@ -197,7 +253,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
197 nla_put_be16(skb, NDA_PORT, rdst->remote_port)) 253 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
198 goto nla_put_failure; 254 goto nla_put_failure;
199 if (rdst->remote_vni != vxlan->default_dst.remote_vni && 255 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
200 nla_put_be32(skb, NDA_VNI, rdst->remote_vni)) 256 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
201 goto nla_put_failure; 257 goto nla_put_failure;
202 if (rdst->remote_ifindex && 258 if (rdst->remote_ifindex &&
203 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) 259 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
@@ -230,7 +286,7 @@ static inline size_t vxlan_nlmsg_size(void)
230} 286}
231 287
232static void vxlan_fdb_notify(struct vxlan_dev *vxlan, 288static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
233 const struct vxlan_fdb *fdb, int type) 289 struct vxlan_fdb *fdb, int type)
234{ 290{
235 struct net *net = dev_net(vxlan->dev); 291 struct net *net = dev_net(vxlan->dev);
236 struct sk_buff *skb; 292 struct sk_buff *skb;
@@ -240,7 +296,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
240 if (skb == NULL) 296 if (skb == NULL)
241 goto errout; 297 goto errout;
242 298
243 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, &fdb->remote); 299 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, first_remote(fdb));
244 if (err < 0) { 300 if (err < 0) {
245 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 301 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
246 WARN_ON(err == -EMSGSIZE); 302 WARN_ON(err == -EMSGSIZE);
@@ -258,22 +314,27 @@ errout:
258static void vxlan_ip_miss(struct net_device *dev, __be32 ipa) 314static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
259{ 315{
260 struct vxlan_dev *vxlan = netdev_priv(dev); 316 struct vxlan_dev *vxlan = netdev_priv(dev);
261 struct vxlan_fdb f; 317 struct vxlan_fdb f = {
318 .state = NUD_STALE,
319 };
320 struct vxlan_rdst remote = {
321 .remote_ip = ipa, /* goes to NDA_DST */
322 .remote_vni = VXLAN_N_VID,
323 };
262 324
263 memset(&f, 0, sizeof f); 325 INIT_LIST_HEAD(&f.remotes);
264 f.state = NUD_STALE; 326 list_add_rcu(&remote.list, &f.remotes);
265 f.remote.remote_ip = ipa; /* goes to NDA_DST */
266 f.remote.remote_vni = VXLAN_N_VID;
267 327
268 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 328 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
269} 329}
270 330
271static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 331static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
272{ 332{
273 struct vxlan_fdb f; 333 struct vxlan_fdb f = {
334 .state = NUD_STALE,
335 };
274 336
275 memset(&f, 0, sizeof f); 337 INIT_LIST_HEAD(&f.remotes);
276 f.state = NUD_STALE;
277 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 338 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
278 339
279 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 340 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
@@ -328,21 +389,34 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
328 return f; 389 return f;
329} 390}
330 391
331/* Add/update destinations for multicast */ 392/* caller should hold vxlan->hash_lock */
332static int vxlan_fdb_append(struct vxlan_fdb *f, 393static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
333 __be32 ip, __be16 port, __u32 vni, __u32 ifindex) 394 __be32 ip, __be16 port,
395 __u32 vni, __u32 ifindex)
334{ 396{
335 struct vxlan_rdst *rd_prev, *rd; 397 struct vxlan_rdst *rd;
336 398
337 rd_prev = NULL; 399 list_for_each_entry(rd, &f->remotes, list) {
338 for (rd = &f->remote; rd; rd = rd->remote_next) {
339 if (rd->remote_ip == ip && 400 if (rd->remote_ip == ip &&
340 rd->remote_port == port && 401 rd->remote_port == port &&
341 rd->remote_vni == vni && 402 rd->remote_vni == vni &&
342 rd->remote_ifindex == ifindex) 403 rd->remote_ifindex == ifindex)
343 return 0; 404 return rd;
344 rd_prev = rd;
345 } 405 }
406
407 return NULL;
408}
409
410/* Add/update destinations for multicast */
411static int vxlan_fdb_append(struct vxlan_fdb *f,
412 __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
413{
414 struct vxlan_rdst *rd;
415
416 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
417 if (rd)
418 return 0;
419
346 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 420 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
347 if (rd == NULL) 421 if (rd == NULL)
348 return -ENOBUFS; 422 return -ENOBUFS;
@@ -350,8 +424,9 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
350 rd->remote_port = port; 424 rd->remote_port = port;
351 rd->remote_vni = vni; 425 rd->remote_vni = vni;
352 rd->remote_ifindex = ifindex; 426 rd->remote_ifindex = ifindex;
353 rd->remote_next = NULL; 427
354 rd_prev->remote_next = rd; 428 list_add_tail_rcu(&rd->list, &f->remotes);
429
355 return 1; 430 return 1;
356} 431}
357 432
@@ -383,7 +458,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
383 notify = 1; 458 notify = 1;
384 } 459 }
385 if ((flags & NLM_F_APPEND) && 460 if ((flags & NLM_F_APPEND) &&
386 is_multicast_ether_addr(f->eth_addr)) { 461 (is_multicast_ether_addr(f->eth_addr) ||
462 is_zero_ether_addr(f->eth_addr))) {
387 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex); 463 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
388 464
389 if (rc < 0) 465 if (rc < 0)
@@ -403,16 +479,14 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
403 return -ENOMEM; 479 return -ENOMEM;
404 480
405 notify = 1; 481 notify = 1;
406 f->remote.remote_ip = ip;
407 f->remote.remote_port = port;
408 f->remote.remote_vni = vni;
409 f->remote.remote_ifindex = ifindex;
410 f->remote.remote_next = NULL;
411 f->state = state; 482 f->state = state;
412 f->flags = ndm_flags; 483 f->flags = ndm_flags;
413 f->updated = f->used = jiffies; 484 f->updated = f->used = jiffies;
485 INIT_LIST_HEAD(&f->remotes);
414 memcpy(f->eth_addr, mac, ETH_ALEN); 486 memcpy(f->eth_addr, mac, ETH_ALEN);
415 487
488 vxlan_fdb_append(f, ip, port, vni, ifindex);
489
416 ++vxlan->addrcnt; 490 ++vxlan->addrcnt;
417 hlist_add_head_rcu(&f->hlist, 491 hlist_add_head_rcu(&f->hlist,
418 vxlan_fdb_head(vxlan, mac)); 492 vxlan_fdb_head(vxlan, mac));
@@ -424,16 +498,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
424 return 0; 498 return 0;
425} 499}
426 500
501static void vxlan_fdb_free_rdst(struct rcu_head *head)
502{
503 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
504 kfree(rd);
505}
506
427static void vxlan_fdb_free(struct rcu_head *head) 507static void vxlan_fdb_free(struct rcu_head *head)
428{ 508{
429 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 509 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
510 struct vxlan_rdst *rd, *nd;
430 511
431 while (f->remote.remote_next) { 512 list_for_each_entry_safe(rd, nd, &f->remotes, list)
432 struct vxlan_rdst *rd = f->remote.remote_next;
433
434 f->remote.remote_next = rd->remote_next;
435 kfree(rd); 513 kfree(rd);
436 }
437 kfree(f); 514 kfree(f);
438} 515}
439 516
@@ -449,58 +526,77 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
449 call_rcu(&f->rcu, vxlan_fdb_free); 526 call_rcu(&f->rcu, vxlan_fdb_free);
450} 527}
451 528
452/* Add static entry (via netlink) */ 529static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
453static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 530 __be32 *ip, __be16 *port, u32 *vni, u32 *ifindex)
454 struct net_device *dev,
455 const unsigned char *addr, u16 flags)
456{ 531{
457 struct vxlan_dev *vxlan = netdev_priv(dev);
458 struct net *net = dev_net(vxlan->dev); 532 struct net *net = dev_net(vxlan->dev);
459 __be32 ip;
460 __be16 port;
461 u32 vni, ifindex;
462 int err;
463 533
464 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 534 if (tb[NDA_DST]) {
465 pr_info("RTM_NEWNEIGH with invalid state %#x\n", 535 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
466 ndm->ndm_state); 536 return -EAFNOSUPPORT;
467 return -EINVAL;
468 }
469
470 if (tb[NDA_DST] == NULL)
471 return -EINVAL;
472
473 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
474 return -EAFNOSUPPORT;
475 537
476 ip = nla_get_be32(tb[NDA_DST]); 538 *ip = nla_get_be32(tb[NDA_DST]);
539 } else {
540 *ip = htonl(INADDR_ANY);
541 }
477 542
478 if (tb[NDA_PORT]) { 543 if (tb[NDA_PORT]) {
479 if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) 544 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
480 return -EINVAL; 545 return -EINVAL;
481 port = nla_get_be16(tb[NDA_PORT]); 546 *port = nla_get_be16(tb[NDA_PORT]);
482 } else 547 } else {
483 port = vxlan->dst_port; 548 *port = vxlan->dst_port;
549 }
484 550
485 if (tb[NDA_VNI]) { 551 if (tb[NDA_VNI]) {
486 if (nla_len(tb[NDA_VNI]) != sizeof(u32)) 552 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
487 return -EINVAL; 553 return -EINVAL;
488 vni = nla_get_u32(tb[NDA_VNI]); 554 *vni = nla_get_u32(tb[NDA_VNI]);
489 } else 555 } else {
490 vni = vxlan->default_dst.remote_vni; 556 *vni = vxlan->default_dst.remote_vni;
557 }
491 558
492 if (tb[NDA_IFINDEX]) { 559 if (tb[NDA_IFINDEX]) {
493 struct net_device *tdev; 560 struct net_device *tdev;
494 561
495 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) 562 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
496 return -EINVAL; 563 return -EINVAL;
497 ifindex = nla_get_u32(tb[NDA_IFINDEX]); 564 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
498 tdev = dev_get_by_index(net, ifindex); 565 tdev = dev_get_by_index(net, *ifindex);
499 if (!tdev) 566 if (!tdev)
500 return -EADDRNOTAVAIL; 567 return -EADDRNOTAVAIL;
501 dev_put(tdev); 568 dev_put(tdev);
502 } else 569 } else {
503 ifindex = 0; 570 *ifindex = 0;
571 }
572
573 return 0;
574}
575
576/* Add static entry (via netlink) */
577static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
578 struct net_device *dev,
579 const unsigned char *addr, u16 flags)
580{
581 struct vxlan_dev *vxlan = netdev_priv(dev);
582 /* struct net *net = dev_net(vxlan->dev); */
583 __be32 ip;
584 __be16 port;
585 u32 vni, ifindex;
586 int err;
587
588 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
589 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
590 ndm->ndm_state);
591 return -EINVAL;
592 }
593
594 if (tb[NDA_DST] == NULL)
595 return -EINVAL;
596
597 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
598 if (err)
599 return err;
504 600
505 spin_lock_bh(&vxlan->hash_lock); 601 spin_lock_bh(&vxlan->hash_lock);
506 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags, 602 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags,
@@ -517,14 +613,43 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
517{ 613{
518 struct vxlan_dev *vxlan = netdev_priv(dev); 614 struct vxlan_dev *vxlan = netdev_priv(dev);
519 struct vxlan_fdb *f; 615 struct vxlan_fdb *f;
520 int err = -ENOENT; 616 struct vxlan_rdst *rd = NULL;
617 __be32 ip;
618 __be16 port;
619 u32 vni, ifindex;
620 int err;
621
622 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
623 if (err)
624 return err;
625
626 err = -ENOENT;
521 627
522 spin_lock_bh(&vxlan->hash_lock); 628 spin_lock_bh(&vxlan->hash_lock);
523 f = vxlan_find_mac(vxlan, addr); 629 f = vxlan_find_mac(vxlan, addr);
524 if (f) { 630 if (!f)
525 vxlan_fdb_destroy(vxlan, f); 631 goto out;
526 err = 0; 632
633 if (ip != htonl(INADDR_ANY)) {
634 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
635 if (!rd)
636 goto out;
637 }
638
639 err = 0;
640
641 /* remove a destination if it's not the only one on the list,
642 * otherwise destroy the fdb entry
643 */
644 if (rd && !list_is_singular(&f->remotes)) {
645 list_del_rcu(&rd->list);
646 call_rcu(&rd->rcu, vxlan_fdb_free_rdst);
647 goto out;
527 } 648 }
649
650 vxlan_fdb_destroy(vxlan, f);
651
652out:
528 spin_unlock_bh(&vxlan->hash_lock); 653 spin_unlock_bh(&vxlan->hash_lock);
529 654
530 return err; 655 return err;
@@ -543,23 +668,24 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
543 668
544 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 669 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
545 struct vxlan_rdst *rd; 670 struct vxlan_rdst *rd;
546 for (rd = &f->remote; rd; rd = rd->remote_next) {
547 if (idx < cb->args[0])
548 goto skip;
549 671
672 if (idx < cb->args[0])
673 goto skip;
674
675 list_for_each_entry_rcu(rd, &f->remotes, list) {
550 err = vxlan_fdb_info(skb, vxlan, f, 676 err = vxlan_fdb_info(skb, vxlan, f,
551 NETLINK_CB(cb->skb).portid, 677 NETLINK_CB(cb->skb).portid,
552 cb->nlh->nlmsg_seq, 678 cb->nlh->nlmsg_seq,
553 RTM_NEWNEIGH, 679 RTM_NEWNEIGH,
554 NLM_F_MULTI, rd); 680 NLM_F_MULTI, rd);
555 if (err < 0) 681 if (err < 0)
556 break; 682 goto out;
557skip:
558 ++idx;
559 } 683 }
684skip:
685 ++idx;
560 } 686 }
561 } 687 }
562 688out:
563 return idx; 689 return idx;
564} 690}
565 691
@@ -575,7 +701,9 @@ static bool vxlan_snoop(struct net_device *dev,
575 701
576 f = vxlan_find_mac(vxlan, src_mac); 702 f = vxlan_find_mac(vxlan, src_mac);
577 if (likely(f)) { 703 if (likely(f)) {
578 if (likely(f->remote.remote_ip == src_ip)) 704 struct vxlan_rdst *rdst = first_remote(f);
705
706 if (likely(rdst->remote_ip == src_ip))
579 return false; 707 return false;
580 708
581 /* Don't migrate static entries, drop packets */ 709 /* Don't migrate static entries, drop packets */
@@ -585,10 +713,11 @@ static bool vxlan_snoop(struct net_device *dev,
585 if (net_ratelimit()) 713 if (net_ratelimit())
586 netdev_info(dev, 714 netdev_info(dev,
587 "%pM migrated from %pI4 to %pI4\n", 715 "%pM migrated from %pI4 to %pI4\n",
588 src_mac, &f->remote.remote_ip, &src_ip); 716 src_mac, &rdst->remote_ip, &src_ip);
589 717
590 f->remote.remote_ip = src_ip; 718 rdst->remote_ip = src_ip;
591 f->updated = jiffies; 719 f->updated = jiffies;
720 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
592 } else { 721 } else {
593 /* learned new entry */ 722 /* learned new entry */
594 spin_lock(&vxlan->hash_lock); 723 spin_lock(&vxlan->hash_lock);
@@ -609,78 +738,61 @@ static bool vxlan_snoop(struct net_device *dev,
609 738
610 739
611/* See if multicast group is already in use by other ID */ 740/* See if multicast group is already in use by other ID */
612static bool vxlan_group_used(struct vxlan_net *vn, 741static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
613 const struct vxlan_dev *this)
614{ 742{
615 const struct vxlan_dev *vxlan; 743 struct vxlan_dev *vxlan;
616 unsigned h;
617
618 for (h = 0; h < VNI_HASH_SIZE; ++h)
619 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
620 if (vxlan == this)
621 continue;
622 744
623 if (!netif_running(vxlan->dev)) 745 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
624 continue; 746 if (!netif_running(vxlan->dev))
747 continue;
625 748
626 if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip) 749 if (vxlan->default_dst.remote_ip == remote_ip)
627 return true; 750 return true;
628 } 751 }
629 752
630 return false; 753 return false;
631} 754}
632 755
633/* kernel equivalent to IP_ADD_MEMBERSHIP */ 756static void vxlan_sock_hold(struct vxlan_sock *vs)
634static int vxlan_join_group(struct net_device *dev)
635{ 757{
636 struct vxlan_dev *vxlan = netdev_priv(dev); 758 atomic_inc(&vs->refcnt);
637 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 759}
638 struct sock *sk = vn->sock->sk;
639 struct ip_mreqn mreq = {
640 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
641 .imr_ifindex = vxlan->default_dst.remote_ifindex,
642 };
643 int err;
644 760
645 /* Already a member of group */ 761static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
646 if (vxlan_group_used(vn, vxlan)) 762{
647 return 0; 763 if (!atomic_dec_and_test(&vs->refcnt))
764 return;
648 765
649 /* Need to drop RTNL to call multicast join */ 766 spin_lock(&vn->sock_lock);
650 rtnl_unlock(); 767 hlist_del_rcu(&vs->hlist);
651 lock_sock(sk); 768 spin_unlock(&vn->sock_lock);
652 err = ip_mc_join_group(sk, &mreq);
653 release_sock(sk);
654 rtnl_lock();
655 769
656 return err; 770 queue_work(vxlan_wq, &vs->del_work);
657} 771}
658 772
659 773/* Callback to update multicast group membership.
660/* kernel equivalent to IP_DROP_MEMBERSHIP */ 774 * Scheduled when vxlan goes up/down.
661static int vxlan_leave_group(struct net_device *dev) 775 */
776static void vxlan_igmp_work(struct work_struct *work)
662{ 777{
663 struct vxlan_dev *vxlan = netdev_priv(dev); 778 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work);
664 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 779 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
665 int err = 0; 780 struct vxlan_sock *vs = vxlan->vn_sock;
666 struct sock *sk = vn->sock->sk; 781 struct sock *sk = vs->sock->sk;
667 struct ip_mreqn mreq = { 782 struct ip_mreqn mreq = {
668 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, 783 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
669 .imr_ifindex = vxlan->default_dst.remote_ifindex, 784 .imr_ifindex = vxlan->default_dst.remote_ifindex,
670 }; 785 };
671 786
672 /* Only leave group when last vxlan is done. */
673 if (vxlan_group_used(vn, vxlan))
674 return 0;
675
676 /* Need to drop RTNL to call multicast leave */
677 rtnl_unlock();
678 lock_sock(sk); 787 lock_sock(sk);
679 err = ip_mc_leave_group(sk, &mreq); 788 if (vxlan_group_used(vn, vxlan->default_dst.remote_ip))
789 ip_mc_join_group(sk, &mreq);
790 else
791 ip_mc_leave_group(sk, &mreq);
680 release_sock(sk); 792 release_sock(sk);
681 rtnl_lock();
682 793
683 return err; 794 vxlan_sock_release(vn, vs);
795 dev_put(vxlan->dev);
684} 796}
685 797
686/* Callback from net/ipv4/udp.c to receive packets */ 798/* Callback from net/ipv4/udp.c to receive packets */
@@ -690,6 +802,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
690 struct vxlanhdr *vxh; 802 struct vxlanhdr *vxh;
691 struct vxlan_dev *vxlan; 803 struct vxlan_dev *vxlan;
692 struct pcpu_tstats *stats; 804 struct pcpu_tstats *stats;
805 __be16 port;
693 __u32 vni; 806 __u32 vni;
694 int err; 807 int err;
695 808
@@ -713,9 +826,11 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
713 826
714 /* Is this VNI defined? */ 827 /* Is this VNI defined? */
715 vni = ntohl(vxh->vx_vni) >> 8; 828 vni = ntohl(vxh->vx_vni) >> 8;
716 vxlan = vxlan_find_vni(sock_net(sk), vni); 829 port = inet_sk(sk)->inet_sport;
830 vxlan = vxlan_find_vni(sock_net(sk), vni, port);
717 if (!vxlan) { 831 if (!vxlan) {
718 netdev_dbg(skb->dev, "unknown vni %d\n", vni); 832 netdev_dbg(skb->dev, "unknown vni %d port %u\n",
833 vni, ntohs(port));
719 goto drop; 834 goto drop;
720 } 835 }
721 836
@@ -834,7 +949,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
834 } 949 }
835 950
836 f = vxlan_find_mac(vxlan, n->ha); 951 f = vxlan_find_mac(vxlan, n->ha);
837 if (f && f->remote.remote_ip == htonl(INADDR_ANY)) { 952 if (f && first_remote(f)->remote_ip == htonl(INADDR_ANY)) {
838 /* bridge-local neighbor */ 953 /* bridge-local neighbor */
839 neigh_release(n); 954 neigh_release(n);
840 goto out; 955 goto out;
@@ -896,7 +1011,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
896 return false; 1011 return false;
897} 1012}
898 1013
899static void vxlan_sock_free(struct sk_buff *skb) 1014static void vxlan_sock_put(struct sk_buff *skb)
900{ 1015{
901 sock_put(skb->sk); 1016 sock_put(skb->sk);
902} 1017}
@@ -904,13 +1019,13 @@ static void vxlan_sock_free(struct sk_buff *skb)
904/* On transmit, associate with the tunnel socket */ 1019/* On transmit, associate with the tunnel socket */
905static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb) 1020static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
906{ 1021{
907 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 1022 struct vxlan_dev *vxlan = netdev_priv(dev);
908 struct sock *sk = vn->sock->sk; 1023 struct sock *sk = vxlan->vn_sock->sock->sk;
909 1024
910 skb_orphan(skb); 1025 skb_orphan(skb);
911 sock_hold(sk); 1026 sock_hold(sk);
912 skb->sk = sk; 1027 skb->sk = sk;
913 skb->destructor = vxlan_sock_free; 1028 skb->destructor = vxlan_sock_put;
914} 1029}
915 1030
916/* Compute source port for outgoing packet 1031/* Compute source port for outgoing packet
@@ -976,21 +1091,21 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
976 } 1091 }
977} 1092}
978 1093
979static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, 1094static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
980 struct vxlan_rdst *rdst, bool did_rsc) 1095 struct vxlan_rdst *rdst, bool did_rsc)
981{ 1096{
982 struct vxlan_dev *vxlan = netdev_priv(dev); 1097 struct vxlan_dev *vxlan = netdev_priv(dev);
983 struct rtable *rt; 1098 struct rtable *rt;
984 const struct iphdr *old_iph; 1099 const struct iphdr *old_iph;
985 struct iphdr *iph;
986 struct vxlanhdr *vxh; 1100 struct vxlanhdr *vxh;
987 struct udphdr *uh; 1101 struct udphdr *uh;
988 struct flowi4 fl4; 1102 struct flowi4 fl4;
989 __be32 dst; 1103 __be32 dst;
990 __be16 src_port, dst_port; 1104 __be16 src_port, dst_port;
991 u32 vni; 1105 u32 vni;
992 __be16 df = 0; 1106 __be16 df = 0;
993 __u8 tos, ttl; 1107 __u8 tos, ttl;
1108 int err;
994 1109
995 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; 1110 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
996 vni = rdst->remote_vni; 1111 vni = rdst->remote_vni;
@@ -1000,7 +1115,7 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1000 if (did_rsc) { 1115 if (did_rsc) {
1001 /* short-circuited back to local bridge */ 1116 /* short-circuited back to local bridge */
1002 vxlan_encap_bypass(skb, vxlan, vxlan); 1117 vxlan_encap_bypass(skb, vxlan, vxlan);
1003 return NETDEV_TX_OK; 1118 return;
1004 } 1119 }
1005 goto drop; 1120 goto drop;
1006 } 1121 }
@@ -1052,19 +1167,12 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1052 struct vxlan_dev *dst_vxlan; 1167 struct vxlan_dev *dst_vxlan;
1053 1168
1054 ip_rt_put(rt); 1169 ip_rt_put(rt);
1055 dst_vxlan = vxlan_find_vni(dev_net(dev), vni); 1170 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
1056 if (!dst_vxlan) 1171 if (!dst_vxlan)
1057 goto tx_error; 1172 goto tx_error;
1058 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1173 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1059 return NETDEV_TX_OK; 1174 return;
1060 } 1175 }
1061
1062 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1063 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1064 IPSKB_REROUTED);
1065 skb_dst_drop(skb);
1066 skb_dst_set(skb, &rt->dst);
1067
1068 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1176 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1069 vxh->vx_flags = htonl(VXLAN_FLAGS); 1177 vxh->vx_flags = htonl(VXLAN_FLAGS);
1070 vxh->vx_vni = htonl(vni << 8); 1178 vxh->vx_vni = htonl(vni << 8);
@@ -1079,28 +1187,19 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1079 uh->len = htons(skb->len); 1187 uh->len = htons(skb->len);
1080 uh->check = 0; 1188 uh->check = 0;
1081 1189
1082 __skb_push(skb, sizeof(*iph));
1083 skb_reset_network_header(skb);
1084 iph = ip_hdr(skb);
1085 iph->version = 4;
1086 iph->ihl = sizeof(struct iphdr) >> 2;
1087 iph->frag_off = df;
1088 iph->protocol = IPPROTO_UDP;
1089 iph->tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1090 iph->daddr = dst;
1091 iph->saddr = fl4.saddr;
1092 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1093 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
1094
1095 nf_reset(skb);
1096
1097 vxlan_set_owner(dev, skb); 1190 vxlan_set_owner(dev, skb);
1098 1191
1099 if (handle_offloads(skb)) 1192 if (handle_offloads(skb))
1100 goto drop; 1193 goto drop;
1101 1194
1102 iptunnel_xmit(skb, dev); 1195 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1103 return NETDEV_TX_OK; 1196 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1197
1198 err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst,
1199 IPPROTO_UDP, tos, ttl, df);
1200 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1201
1202 return;
1104 1203
1105drop: 1204drop:
1106 dev->stats.tx_dropped++; 1205 dev->stats.tx_dropped++;
@@ -1110,7 +1209,6 @@ tx_error:
1110 dev->stats.tx_errors++; 1209 dev->stats.tx_errors++;
1111tx_free: 1210tx_free:
1112 dev_kfree_skb(skb); 1211 dev_kfree_skb(skb);
1113 return NETDEV_TX_OK;
1114} 1212}
1115 1213
1116/* Transmit local packets over Vxlan 1214/* Transmit local packets over Vxlan
@@ -1124,9 +1222,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1124 struct vxlan_dev *vxlan = netdev_priv(dev); 1222 struct vxlan_dev *vxlan = netdev_priv(dev);
1125 struct ethhdr *eth; 1223 struct ethhdr *eth;
1126 bool did_rsc = false; 1224 bool did_rsc = false;
1127 struct vxlan_rdst *rdst0, *rdst; 1225 struct vxlan_rdst *rdst;
1128 struct vxlan_fdb *f; 1226 struct vxlan_fdb *f;
1129 int rc1, rc;
1130 1227
1131 skb_reset_mac_header(skb); 1228 skb_reset_mac_header(skb);
1132 eth = eth_hdr(skb); 1229 eth = eth_hdr(skb);
@@ -1145,33 +1242,28 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1145 } 1242 }
1146 1243
1147 if (f == NULL) { 1244 if (f == NULL) {
1148 rdst0 = &vxlan->default_dst; 1245 f = vxlan_find_mac(vxlan, all_zeros_mac);
1149 1246 if (f == NULL) {
1150 if (rdst0->remote_ip == htonl(INADDR_ANY) && 1247 if ((vxlan->flags & VXLAN_F_L2MISS) &&
1151 (vxlan->flags & VXLAN_F_L2MISS) && 1248 !is_multicast_ether_addr(eth->h_dest))
1152 !is_multicast_ether_addr(eth->h_dest)) 1249 vxlan_fdb_miss(vxlan, eth->h_dest);
1153 vxlan_fdb_miss(vxlan, eth->h_dest); 1250
1154 } else 1251 dev->stats.tx_dropped++;
1155 rdst0 = &f->remote; 1252 dev_kfree_skb(skb);
1156 1253 return NETDEV_TX_OK;
1157 rc = NETDEV_TX_OK; 1254 }
1255 }
1158 1256
1159 /* if there are multiple destinations, send copies */ 1257 list_for_each_entry_rcu(rdst, &f->remotes, list) {
1160 for (rdst = rdst0->remote_next; rdst; rdst = rdst->remote_next) {
1161 struct sk_buff *skb1; 1258 struct sk_buff *skb1;
1162 1259
1163 skb1 = skb_clone(skb, GFP_ATOMIC); 1260 skb1 = skb_clone(skb, GFP_ATOMIC);
1164 if (skb1) { 1261 if (skb1)
1165 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); 1262 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1166 if (rc == NETDEV_TX_OK)
1167 rc = rc1;
1168 }
1169 } 1263 }
1170 1264
1171 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); 1265 dev_kfree_skb(skb);
1172 if (rc == NETDEV_TX_OK) 1266 return NETDEV_TX_OK;
1173 rc = rc1;
1174 return rc;
1175} 1267}
1176 1268
1177/* Walk the forwarding table and purge stale entries */ 1269/* Walk the forwarding table and purge stale entries */
@@ -1214,23 +1306,70 @@ static void vxlan_cleanup(unsigned long arg)
1214/* Setup stats when device is created */ 1306/* Setup stats when device is created */
1215static int vxlan_init(struct net_device *dev) 1307static int vxlan_init(struct net_device *dev)
1216{ 1308{
1309 struct vxlan_dev *vxlan = netdev_priv(dev);
1310 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1311 struct vxlan_sock *vs;
1312 __u32 vni = vxlan->default_dst.remote_vni;
1313
1217 dev->tstats = alloc_percpu(struct pcpu_tstats); 1314 dev->tstats = alloc_percpu(struct pcpu_tstats);
1218 if (!dev->tstats) 1315 if (!dev->tstats)
1219 return -ENOMEM; 1316 return -ENOMEM;
1220 1317
1318 spin_lock(&vn->sock_lock);
1319 vs = vxlan_find_port(dev_net(dev), vxlan->dst_port);
1320 if (vs) {
1321 /* If we have a socket with same port already, reuse it */
1322 atomic_inc(&vs->refcnt);
1323 vxlan->vn_sock = vs;
1324 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
1325 } else {
1326 /* otherwise make new socket outside of RTNL */
1327 dev_hold(dev);
1328 queue_work(vxlan_wq, &vxlan->sock_work);
1329 }
1330 spin_unlock(&vn->sock_lock);
1331
1221 return 0; 1332 return 0;
1222} 1333}
1223 1334
1335static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
1336{
1337 struct vxlan_fdb *f;
1338
1339 spin_lock_bh(&vxlan->hash_lock);
1340 f = __vxlan_find_mac(vxlan, all_zeros_mac);
1341 if (f)
1342 vxlan_fdb_destroy(vxlan, f);
1343 spin_unlock_bh(&vxlan->hash_lock);
1344}
1345
1346static void vxlan_uninit(struct net_device *dev)
1347{
1348 struct vxlan_dev *vxlan = netdev_priv(dev);
1349 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1350 struct vxlan_sock *vs = vxlan->vn_sock;
1351
1352 vxlan_fdb_delete_default(vxlan);
1353
1354 if (vs)
1355 vxlan_sock_release(vn, vs);
1356 free_percpu(dev->tstats);
1357}
1358
1224/* Start ageing timer and join group when device is brought up */ 1359/* Start ageing timer and join group when device is brought up */
1225static int vxlan_open(struct net_device *dev) 1360static int vxlan_open(struct net_device *dev)
1226{ 1361{
1227 struct vxlan_dev *vxlan = netdev_priv(dev); 1362 struct vxlan_dev *vxlan = netdev_priv(dev);
1228 int err; 1363 struct vxlan_sock *vs = vxlan->vn_sock;
1364
1365 /* socket hasn't been created */
1366 if (!vs)
1367 return -ENOTCONN;
1229 1368
1230 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) { 1369 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
1231 err = vxlan_join_group(dev); 1370 vxlan_sock_hold(vs);
1232 if (err) 1371 dev_hold(dev);
1233 return err; 1372 queue_work(vxlan_wq, &vxlan->igmp_work);
1234 } 1373 }
1235 1374
1236 if (vxlan->age_interval) 1375 if (vxlan->age_interval)
@@ -1242,7 +1381,7 @@ static int vxlan_open(struct net_device *dev)
1242/* Purge the forwarding table */ 1381/* Purge the forwarding table */
1243static void vxlan_flush(struct vxlan_dev *vxlan) 1382static void vxlan_flush(struct vxlan_dev *vxlan)
1244{ 1383{
1245 unsigned h; 1384 unsigned int h;
1246 1385
1247 spin_lock_bh(&vxlan->hash_lock); 1386 spin_lock_bh(&vxlan->hash_lock);
1248 for (h = 0; h < FDB_HASH_SIZE; ++h) { 1387 for (h = 0; h < FDB_HASH_SIZE; ++h) {
@@ -1250,7 +1389,9 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
1250 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 1389 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1251 struct vxlan_fdb *f 1390 struct vxlan_fdb *f
1252 = container_of(p, struct vxlan_fdb, hlist); 1391 = container_of(p, struct vxlan_fdb, hlist);
1253 vxlan_fdb_destroy(vxlan, f); 1392 /* the all_zeros_mac entry is deleted at vxlan_uninit */
1393 if (!is_zero_ether_addr(f->eth_addr))
1394 vxlan_fdb_destroy(vxlan, f);
1254 } 1395 }
1255 } 1396 }
1256 spin_unlock_bh(&vxlan->hash_lock); 1397 spin_unlock_bh(&vxlan->hash_lock);
@@ -1260,9 +1401,13 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
1260static int vxlan_stop(struct net_device *dev) 1401static int vxlan_stop(struct net_device *dev)
1261{ 1402{
1262 struct vxlan_dev *vxlan = netdev_priv(dev); 1403 struct vxlan_dev *vxlan = netdev_priv(dev);
1404 struct vxlan_sock *vs = vxlan->vn_sock;
1263 1405
1264 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) 1406 if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
1265 vxlan_leave_group(dev); 1407 vxlan_sock_hold(vs);
1408 dev_hold(dev);
1409 queue_work(vxlan_wq, &vxlan->igmp_work);
1410 }
1266 1411
1267 del_timer_sync(&vxlan->age_timer); 1412 del_timer_sync(&vxlan->age_timer);
1268 1413
@@ -1278,6 +1423,7 @@ static void vxlan_set_multicast_list(struct net_device *dev)
1278 1423
1279static const struct net_device_ops vxlan_netdev_ops = { 1424static const struct net_device_ops vxlan_netdev_ops = {
1280 .ndo_init = vxlan_init, 1425 .ndo_init = vxlan_init,
1426 .ndo_uninit = vxlan_uninit,
1281 .ndo_open = vxlan_open, 1427 .ndo_open = vxlan_open,
1282 .ndo_stop = vxlan_stop, 1428 .ndo_stop = vxlan_stop,
1283 .ndo_start_xmit = vxlan_xmit, 1429 .ndo_start_xmit = vxlan_xmit,
@@ -1296,17 +1442,11 @@ static struct device_type vxlan_type = {
1296 .name = "vxlan", 1442 .name = "vxlan",
1297}; 1443};
1298 1444
1299static void vxlan_free(struct net_device *dev)
1300{
1301 free_percpu(dev->tstats);
1302 free_netdev(dev);
1303}
1304
1305/* Initialize the device structure. */ 1445/* Initialize the device structure. */
1306static void vxlan_setup(struct net_device *dev) 1446static void vxlan_setup(struct net_device *dev)
1307{ 1447{
1308 struct vxlan_dev *vxlan = netdev_priv(dev); 1448 struct vxlan_dev *vxlan = netdev_priv(dev);
1309 unsigned h; 1449 unsigned int h;
1310 int low, high; 1450 int low, high;
1311 1451
1312 eth_hw_addr_random(dev); 1452 eth_hw_addr_random(dev);
@@ -1314,7 +1454,7 @@ static void vxlan_setup(struct net_device *dev)
1314 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; 1454 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
1315 1455
1316 dev->netdev_ops = &vxlan_netdev_ops; 1456 dev->netdev_ops = &vxlan_netdev_ops;
1317 dev->destructor = vxlan_free; 1457 dev->destructor = free_netdev;
1318 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 1458 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1319 1459
1320 dev->tx_queue_len = 0; 1460 dev->tx_queue_len = 0;
@@ -1329,7 +1469,10 @@ static void vxlan_setup(struct net_device *dev)
1329 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1469 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1330 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1470 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1331 1471
1472 INIT_LIST_HEAD(&vxlan->next);
1332 spin_lock_init(&vxlan->hash_lock); 1473 spin_lock_init(&vxlan->hash_lock);
1474 INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work);
1475 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
1333 1476
1334 init_timer_deferrable(&vxlan->age_timer); 1477 init_timer_deferrable(&vxlan->age_timer);
1335 vxlan->age_timer.function = vxlan_cleanup; 1478 vxlan->age_timer.function = vxlan_cleanup;
@@ -1413,9 +1556,113 @@ static const struct ethtool_ops vxlan_ethtool_ops = {
1413 .get_link = ethtool_op_get_link, 1556 .get_link = ethtool_op_get_link,
1414}; 1557};
1415 1558
1559static void vxlan_del_work(struct work_struct *work)
1560{
1561 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
1562
1563 sk_release_kernel(vs->sock->sk);
1564 kfree_rcu(vs, rcu);
1565}
1566
1567static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
1568{
1569 struct vxlan_sock *vs;
1570 struct sock *sk;
1571 struct sockaddr_in vxlan_addr = {
1572 .sin_family = AF_INET,
1573 .sin_addr.s_addr = htonl(INADDR_ANY),
1574 .sin_port = port,
1575 };
1576 int rc;
1577 unsigned int h;
1578
1579 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
1580 if (!vs)
1581 return ERR_PTR(-ENOMEM);
1582
1583 for (h = 0; h < VNI_HASH_SIZE; ++h)
1584 INIT_HLIST_HEAD(&vs->vni_list[h]);
1585
1586 INIT_WORK(&vs->del_work, vxlan_del_work);
1587
1588 /* Create UDP socket for encapsulation receive. */
1589 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
1590 if (rc < 0) {
1591 pr_debug("UDP socket create failed\n");
1592 kfree(vs);
1593 return ERR_PTR(rc);
1594 }
1595
1596 /* Put in proper namespace */
1597 sk = vs->sock->sk;
1598 sk_change_net(sk, net);
1599
1600 rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
1601 sizeof(vxlan_addr));
1602 if (rc < 0) {
1603 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1604 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1605 sk_release_kernel(sk);
1606 kfree(vs);
1607 return ERR_PTR(rc);
1608 }
1609
1610 /* Disable multicast loopback */
1611 inet_sk(sk)->mc_loop = 0;
1612
1613 /* Mark socket as an encapsulation socket. */
1614 udp_sk(sk)->encap_type = 1;
1615 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1616 udp_encap_enable();
1617 atomic_set(&vs->refcnt, 1);
1618
1619 return vs;
1620}
1621
1622/* Scheduled at device creation to bind to a socket */
1623static void vxlan_sock_work(struct work_struct *work)
1624{
1625 struct vxlan_dev *vxlan
1626 = container_of(work, struct vxlan_dev, sock_work);
1627 struct net_device *dev = vxlan->dev;
1628 struct net *net = dev_net(dev);
1629 __u32 vni = vxlan->default_dst.remote_vni;
1630 __be16 port = vxlan->dst_port;
1631 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1632 struct vxlan_sock *nvs, *ovs;
1633
1634 nvs = vxlan_socket_create(net, port);
1635 if (IS_ERR(nvs)) {
1636 netdev_err(vxlan->dev, "Can not create UDP socket, %ld\n",
1637 PTR_ERR(nvs));
1638 goto out;
1639 }
1640
1641 spin_lock(&vn->sock_lock);
1642 /* Look again to see if can reuse socket */
1643 ovs = vxlan_find_port(net, port);
1644 if (ovs) {
1645 atomic_inc(&ovs->refcnt);
1646 vxlan->vn_sock = ovs;
1647 hlist_add_head_rcu(&vxlan->hlist, vni_head(ovs, vni));
1648 spin_unlock(&vn->sock_lock);
1649
1650 sk_release_kernel(nvs->sock->sk);
1651 kfree(nvs);
1652 } else {
1653 vxlan->vn_sock = nvs;
1654 hlist_add_head_rcu(&nvs->hlist, vs_head(net, port));
1655 hlist_add_head_rcu(&vxlan->hlist, vni_head(nvs, vni));
1656 spin_unlock(&vn->sock_lock);
1657 }
1658out:
1659 dev_put(dev);
1660}
1661
1416static int vxlan_newlink(struct net *net, struct net_device *dev, 1662static int vxlan_newlink(struct net *net, struct net_device *dev,
1417 struct nlattr *tb[], struct nlattr *data[]) 1663 struct nlattr *tb[], struct nlattr *data[])
1418{ 1664{
1665 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1419 struct vxlan_dev *vxlan = netdev_priv(dev); 1666 struct vxlan_dev *vxlan = netdev_priv(dev);
1420 struct vxlan_rdst *dst = &vxlan->default_dst; 1667 struct vxlan_rdst *dst = &vxlan->default_dst;
1421 __u32 vni; 1668 __u32 vni;
@@ -1425,10 +1672,6 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1425 return -EINVAL; 1672 return -EINVAL;
1426 1673
1427 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 1674 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1428 if (vxlan_find_vni(net, vni)) {
1429 pr_info("duplicate VNI %u\n", vni);
1430 return -EEXIST;
1431 }
1432 dst->remote_vni = vni; 1675 dst->remote_vni = vni;
1433 1676
1434 if (data[IFLA_VXLAN_GROUP]) 1677 if (data[IFLA_VXLAN_GROUP])
@@ -1494,13 +1737,32 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1494 if (data[IFLA_VXLAN_PORT]) 1737 if (data[IFLA_VXLAN_PORT])
1495 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 1738 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
1496 1739
1740 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
1741 pr_info("duplicate VNI %u\n", vni);
1742 return -EEXIST;
1743 }
1744
1497 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 1745 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
1498 1746
1747 /* create an fdb entry for default destination */
1748 err = vxlan_fdb_create(vxlan, all_zeros_mac,
1749 vxlan->default_dst.remote_ip,
1750 NUD_REACHABLE|NUD_PERMANENT,
1751 NLM_F_EXCL|NLM_F_CREATE,
1752 vxlan->dst_port, vxlan->default_dst.remote_vni,
1753 vxlan->default_dst.remote_ifindex, NTF_SELF);
1754 if (err)
1755 return err;
1756
1499 err = register_netdevice(dev); 1757 err = register_netdevice(dev);
1500 if (!err) 1758 if (err) {
1501 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, dst->remote_vni)); 1759 vxlan_fdb_delete_default(vxlan);
1760 return err;
1761 }
1502 1762
1503 return err; 1763 list_add(&vxlan->next, &vn->vxlan_list);
1764
1765 return 0;
1504} 1766}
1505 1767
1506static void vxlan_dellink(struct net_device *dev, struct list_head *head) 1768static void vxlan_dellink(struct net_device *dev, struct list_head *head)
@@ -1508,7 +1770,7 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1508 struct vxlan_dev *vxlan = netdev_priv(dev); 1770 struct vxlan_dev *vxlan = netdev_priv(dev);
1509 1771
1510 hlist_del_rcu(&vxlan->hlist); 1772 hlist_del_rcu(&vxlan->hlist);
1511 1773 list_del(&vxlan->next);
1512 unregister_netdevice_queue(dev, head); 1774 unregister_netdevice_queue(dev, head);
1513} 1775}
1514 1776
@@ -1595,46 +1857,13 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1595static __net_init int vxlan_init_net(struct net *net) 1857static __net_init int vxlan_init_net(struct net *net)
1596{ 1858{
1597 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1859 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1598 struct sock *sk; 1860 unsigned int h;
1599 struct sockaddr_in vxlan_addr = {
1600 .sin_family = AF_INET,
1601 .sin_addr.s_addr = htonl(INADDR_ANY),
1602 };
1603 int rc;
1604 unsigned h;
1605
1606 /* Create UDP socket for encapsulation receive. */
1607 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1608 if (rc < 0) {
1609 pr_debug("UDP socket create failed\n");
1610 return rc;
1611 }
1612 /* Put in proper namespace */
1613 sk = vn->sock->sk;
1614 sk_change_net(sk, net);
1615
1616 vxlan_addr.sin_port = htons(vxlan_port);
1617
1618 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1619 sizeof(vxlan_addr));
1620 if (rc < 0) {
1621 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1622 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1623 sk_release_kernel(sk);
1624 vn->sock = NULL;
1625 return rc;
1626 }
1627
1628 /* Disable multicast loopback */
1629 inet_sk(sk)->mc_loop = 0;
1630 1861
1631 /* Mark socket as an encapsulation socket. */ 1862 INIT_LIST_HEAD(&vn->vxlan_list);
1632 udp_sk(sk)->encap_type = 1; 1863 spin_lock_init(&vn->sock_lock);
1633 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1634 udp_encap_enable();
1635 1864
1636 for (h = 0; h < VNI_HASH_SIZE; ++h) 1865 for (h = 0; h < PORT_HASH_SIZE; ++h)
1637 INIT_HLIST_HEAD(&vn->vni_list[h]); 1866 INIT_HLIST_HEAD(&vn->sock_list[h]);
1638 1867
1639 return 0; 1868 return 0;
1640} 1869}
@@ -1643,18 +1872,11 @@ static __net_exit void vxlan_exit_net(struct net *net)
1643{ 1872{
1644 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1873 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1645 struct vxlan_dev *vxlan; 1874 struct vxlan_dev *vxlan;
1646 unsigned h;
1647 1875
1648 rtnl_lock(); 1876 rtnl_lock();
1649 for (h = 0; h < VNI_HASH_SIZE; ++h) 1877 list_for_each_entry(vxlan, &vn->vxlan_list, next)
1650 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) 1878 dev_close(vxlan->dev);
1651 dev_close(vxlan->dev);
1652 rtnl_unlock(); 1879 rtnl_unlock();
1653
1654 if (vn->sock) {
1655 sk_release_kernel(vn->sock->sk);
1656 vn->sock = NULL;
1657 }
1658} 1880}
1659 1881
1660static struct pernet_operations vxlan_net_ops = { 1882static struct pernet_operations vxlan_net_ops = {
@@ -1668,6 +1890,10 @@ static int __init vxlan_init_module(void)
1668{ 1890{
1669 int rc; 1891 int rc;
1670 1892
1893 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
1894 if (!vxlan_wq)
1895 return -ENOMEM;
1896
1671 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); 1897 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1672 1898
1673 rc = register_pernet_device(&vxlan_net_ops); 1899 rc = register_pernet_device(&vxlan_net_ops);
@@ -1683,14 +1909,16 @@ static int __init vxlan_init_module(void)
1683out2: 1909out2:
1684 unregister_pernet_device(&vxlan_net_ops); 1910 unregister_pernet_device(&vxlan_net_ops);
1685out1: 1911out1:
1912 destroy_workqueue(vxlan_wq);
1686 return rc; 1913 return rc;
1687} 1914}
1688module_init(vxlan_init_module); 1915late_initcall(vxlan_init_module);
1689 1916
1690static void __exit vxlan_cleanup_module(void) 1917static void __exit vxlan_cleanup_module(void)
1691{ 1918{
1692 rtnl_link_unregister(&vxlan_link_ops);
1693 unregister_pernet_device(&vxlan_net_ops); 1919 unregister_pernet_device(&vxlan_net_ops);
1920 rtnl_link_unregister(&vxlan_link_ops);
1921 destroy_workqueue(vxlan_wq);
1694 rcu_barrier(); 1922 rcu_barrier();
1695} 1923}
1696module_exit(vxlan_cleanup_module); 1924module_exit(vxlan_cleanup_module);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 6a8a382c5f4c..0d1c7592efa0 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -493,7 +493,7 @@ static void dlci_setup(struct net_device *dev)
493static int dlci_dev_event(struct notifier_block *unused, 493static int dlci_dev_event(struct notifier_block *unused,
494 unsigned long event, void *ptr) 494 unsigned long event, void *ptr)
495{ 495{
496 struct net_device *dev = (struct net_device *) ptr; 496 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
497 497
498 if (dev_net(dev) != &init_net) 498 if (dev_net(dev) != &init_net)
499 return NOTIFY_DONE; 499 return NOTIFY_DONE;
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index a0a932c63d0a..9c33ca918e19 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -99,7 +99,7 @@ static inline void hdlc_proto_stop(struct net_device *dev)
99static int hdlc_device_event(struct notifier_block *this, unsigned long event, 99static int hdlc_device_event(struct notifier_block *this, unsigned long event,
100 void *ptr) 100 void *ptr)
101{ 101{
102 struct net_device *dev = ptr; 102 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
103 hdlc_device *hdlc; 103 hdlc_device *hdlc;
104 unsigned long flags; 104 unsigned long flags;
105 int on; 105 int on;
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index fc9d11d74d60..e7bbdb7af53a 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -1384,7 +1384,6 @@ static int hss_remove_one(struct platform_device *pdev)
1384 unregister_hdlc_device(port->netdev); 1384 unregister_hdlc_device(port->netdev);
1385 free_netdev(port->netdev); 1385 free_netdev(port->netdev);
1386 npe_release(port->npe); 1386 npe_release(port->npe);
1387 platform_set_drvdata(pdev, NULL);
1388 kfree(port); 1387 kfree(port);
1389 return 0; 1388 return 0;
1390} 1389}
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index a73b49eb87e3..a33a46fa88dd 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -370,7 +370,7 @@ static int lapbeth_device_event(struct notifier_block *this,
370 unsigned long event, void *ptr) 370 unsigned long event, void *ptr)
371{ 371{
372 struct lapbethdev *lapbeth; 372 struct lapbethdev *lapbeth;
373 struct net_device *dev = ptr; 373 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
374 374
375 if (dev_net(dev) != &init_net) 375 if (dev_net(dev) != &init_net)
376 return NOTIFY_DONE; 376 return NOTIFY_DONE;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f8f0156dff4e..200020eb3005 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -280,5 +280,6 @@ source "drivers/net/wireless/rtlwifi/Kconfig"
280source "drivers/net/wireless/ti/Kconfig" 280source "drivers/net/wireless/ti/Kconfig"
281source "drivers/net/wireless/zd1211rw/Kconfig" 281source "drivers/net/wireless/zd1211rw/Kconfig"
282source "drivers/net/wireless/mwifiex/Kconfig" 282source "drivers/net/wireless/mwifiex/Kconfig"
283source "drivers/net/wireless/cw1200/Kconfig"
283 284
284endif # WLAN 285endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 67156efe14c4..0fab227025be 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -57,3 +57,5 @@ obj-$(CONFIG_MWIFIEX) += mwifiex/
57 57
58obj-$(CONFIG_BRCMFMAC) += brcm80211/ 58obj-$(CONFIG_BRCMFMAC) += brcm80211/
59obj-$(CONFIG_BRCMSMAC) += brcm80211/ 59obj-$(CONFIG_BRCMSMAC) += brcm80211/
60
61obj-$(CONFIG_CW1200) += cw1200/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 6125adb520a3..d0adbaf86186 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1893,7 +1893,8 @@ static int airo_open(struct net_device *dev) {
1893 1893
1894 if (ai->wifidev != dev) { 1894 if (ai->wifidev != dev) {
1895 clear_bit(JOB_DIE, &ai->jobs); 1895 clear_bit(JOB_DIE, &ai->jobs);
1896 ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name); 1896 ai->airo_thread_task = kthread_run(airo_thread, dev, "%s",
1897 dev->name);
1897 if (IS_ERR(ai->airo_thread_task)) 1898 if (IS_ERR(ai->airo_thread_task))
1898 return (int)PTR_ERR(ai->airo_thread_task); 1899 return (int)PTR_ERR(ai->airo_thread_task);
1899 1900
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 2c02b4e84094..1abf1d421173 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -31,5 +31,6 @@ source "drivers/net/wireless/ath/carl9170/Kconfig"
31source "drivers/net/wireless/ath/ath6kl/Kconfig" 31source "drivers/net/wireless/ath/ath6kl/Kconfig"
32source "drivers/net/wireless/ath/ar5523/Kconfig" 32source "drivers/net/wireless/ath/ar5523/Kconfig"
33source "drivers/net/wireless/ath/wil6210/Kconfig" 33source "drivers/net/wireless/ath/wil6210/Kconfig"
34source "drivers/net/wireless/ath/ath10k/Kconfig"
34 35
35endif 36endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 97b964ded2be..fb05cfd19361 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_CARL9170) += carl9170/
4obj-$(CONFIG_ATH6KL) += ath6kl/ 4obj-$(CONFIG_ATH6KL) += ath6kl/
5obj-$(CONFIG_AR5523) += ar5523/ 5obj-$(CONFIG_AR5523) += ar5523/
6obj-$(CONFIG_WIL6210) += wil6210/ 6obj-$(CONFIG_WIL6210) += wil6210/
7obj-$(CONFIG_ATH10K) += ath10k/
7 8
8obj-$(CONFIG_ATH_COMMON) += ath.o 9obj-$(CONFIG_ATH_COMMON) += ath.o
9 10
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 4521342c62cc..daeafeff186b 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -239,13 +239,12 @@ enum ATH_DEBUG {
239 ATH_DBG_CONFIG = 0x00000200, 239 ATH_DBG_CONFIG = 0x00000200,
240 ATH_DBG_FATAL = 0x00000400, 240 ATH_DBG_FATAL = 0x00000400,
241 ATH_DBG_PS = 0x00000800, 241 ATH_DBG_PS = 0x00000800,
242 ATH_DBG_HWTIMER = 0x00001000, 242 ATH_DBG_BTCOEX = 0x00001000,
243 ATH_DBG_BTCOEX = 0x00002000, 243 ATH_DBG_WMI = 0x00002000,
244 ATH_DBG_WMI = 0x00004000, 244 ATH_DBG_BSTUCK = 0x00004000,
245 ATH_DBG_BSTUCK = 0x00008000, 245 ATH_DBG_MCI = 0x00008000,
246 ATH_DBG_MCI = 0x00010000, 246 ATH_DBG_DFS = 0x00010000,
247 ATH_DBG_DFS = 0x00020000, 247 ATH_DBG_WOW = 0x00020000,
248 ATH_DBG_WOW = 0x00040000,
249 ATH_DBG_ANY = 0xffffffff 248 ATH_DBG_ANY = 0xffffffff
250}; 249};
251 250
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
new file mode 100644
index 000000000000..cde58fe96254
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -0,0 +1,39 @@
1config ATH10K
2 tristate "Atheros 802.11ac wireless cards support"
3 depends on MAC80211
4 select ATH_COMMON
5 ---help---
6 This module adds support for wireless adapters based on
7 Atheros IEEE 802.11ac family of chipsets.
8
9 If you choose to build a module, it'll be called ath10k.
10
11config ATH10K_PCI
12 tristate "Atheros ath10k PCI support"
13 depends on ATH10K && PCI
14 ---help---
15 This module adds support for PCIE bus
16
17config ATH10K_DEBUG
18 bool "Atheros ath10k debugging"
19 depends on ATH10K
20 ---help---
21 Enables debug support
22
23 If unsure, say Y to make it easier to debug problems.
24
25config ATH10K_DEBUGFS
26 bool "Atheros ath10k debugfs support"
27 depends on ATH10K
28 ---help---
29 Enabled debugfs support
30
31 If unsure, say Y to make it easier to debug problems.
32
33config ATH10K_TRACING
34 bool "Atheros ath10k tracing support"
35 depends on ATH10K
36 depends on EVENT_TRACING
37 ---help---
38 Select this to ath10k use tracing infrastructure.
39
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
new file mode 100644
index 000000000000..a4179f49ee1f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -0,0 +1,20 @@
1obj-$(CONFIG_ATH10K) += ath10k_core.o
2ath10k_core-y += mac.o \
3 debug.o \
4 core.o \
5 htc.o \
6 htt.o \
7 htt_rx.o \
8 htt_tx.o \
9 txrx.o \
10 wmi.o \
11 bmi.o
12
13ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
14
15obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
16ath10k_pci-y += pci.o \
17 ce.o
18
19# for tracing framework to find trace.h
20CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
new file mode 100644
index 000000000000..1a2ef51b69d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "bmi.h"
19#include "hif.h"
20#include "debug.h"
21#include "htc.h"
22
23int ath10k_bmi_done(struct ath10k *ar)
24{
25 struct bmi_cmd cmd;
26 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
27 int ret;
28
29 if (ar->bmi.done_sent) {
30 ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
31 return 0;
32 }
33
34 ar->bmi.done_sent = true;
35 cmd.id = __cpu_to_le32(BMI_DONE);
36
37 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
38 if (ret) {
39 ath10k_warn("unable to write to the device: %d\n", ret);
40 return ret;
41 }
42
43 ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
44 return 0;
45}
46
47int ath10k_bmi_get_target_info(struct ath10k *ar,
48 struct bmi_target_info *target_info)
49{
50 struct bmi_cmd cmd;
51 union bmi_resp resp;
52 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
53 u32 resplen = sizeof(resp.get_target_info);
54 int ret;
55
56 if (ar->bmi.done_sent) {
57 ath10k_warn("BMI Get Target Info Command disallowed\n");
58 return -EBUSY;
59 }
60
61 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
62
63 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
64 if (ret) {
65 ath10k_warn("unable to get target info from device\n");
66 return ret;
67 }
68
69 if (resplen < sizeof(resp.get_target_info)) {
70 ath10k_warn("invalid get_target_info response length (%d)\n",
71 resplen);
72 return -EIO;
73 }
74
75 target_info->version = __le32_to_cpu(resp.get_target_info.version);
76 target_info->type = __le32_to_cpu(resp.get_target_info.type);
77 return 0;
78}
79
80int ath10k_bmi_read_memory(struct ath10k *ar,
81 u32 address, void *buffer, u32 length)
82{
83 struct bmi_cmd cmd;
84 union bmi_resp resp;
85 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
86 u32 rxlen;
87 int ret;
88
89 if (ar->bmi.done_sent) {
90 ath10k_warn("command disallowed\n");
91 return -EBUSY;
92 }
93
94 ath10k_dbg(ATH10K_DBG_CORE,
95 "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
96 __func__, ar, address, length);
97
98 while (length) {
99 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
100
101 cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
102 cmd.read_mem.addr = __cpu_to_le32(address);
103 cmd.read_mem.len = __cpu_to_le32(rxlen);
104
105 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
106 &resp, &rxlen);
107 if (ret) {
108 ath10k_warn("unable to read from the device\n");
109 return ret;
110 }
111
112 memcpy(buffer, resp.read_mem.payload, rxlen);
113 address += rxlen;
114 buffer += rxlen;
115 length -= rxlen;
116 }
117
118 return 0;
119}
120
121int ath10k_bmi_write_memory(struct ath10k *ar,
122 u32 address, const void *buffer, u32 length)
123{
124 struct bmi_cmd cmd;
125 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
126 u32 txlen;
127 int ret;
128
129 if (ar->bmi.done_sent) {
130 ath10k_warn("command disallowed\n");
131 return -EBUSY;
132 }
133
134 ath10k_dbg(ATH10K_DBG_CORE,
135 "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
136 __func__, ar, address, length);
137
138 while (length) {
139 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
140
141 /* copy before roundup to avoid reading beyond buffer*/
142 memcpy(cmd.write_mem.payload, buffer, txlen);
143 txlen = roundup(txlen, 4);
144
145 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
146 cmd.write_mem.addr = __cpu_to_le32(address);
147 cmd.write_mem.len = __cpu_to_le32(txlen);
148
149 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
150 NULL, NULL);
151 if (ret) {
152 ath10k_warn("unable to write to the device\n");
153 return ret;
154 }
155
156 /* fixup roundup() so `length` zeroes out for last chunk */
157 txlen = min(txlen, length);
158
159 address += txlen;
160 buffer += txlen;
161 length -= txlen;
162 }
163
164 return 0;
165}
166
167int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
168{
169 struct bmi_cmd cmd;
170 union bmi_resp resp;
171 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
172 u32 resplen = sizeof(resp.execute);
173 int ret;
174
175 if (ar->bmi.done_sent) {
176 ath10k_warn("command disallowed\n");
177 return -EBUSY;
178 }
179
180 ath10k_dbg(ATH10K_DBG_CORE,
181 "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
182 __func__, ar, address, *param);
183
184 cmd.id = __cpu_to_le32(BMI_EXECUTE);
185 cmd.execute.addr = __cpu_to_le32(address);
186 cmd.execute.param = __cpu_to_le32(*param);
187
188 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
189 if (ret) {
190 ath10k_warn("unable to read from the device\n");
191 return ret;
192 }
193
194 if (resplen < sizeof(resp.execute)) {
195 ath10k_warn("invalid execute response length (%d)\n",
196 resplen);
197 return ret;
198 }
199
200 *param = __le32_to_cpu(resp.execute.result);
201 return 0;
202}
203
204int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
205{
206 struct bmi_cmd cmd;
207 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
208 u32 txlen;
209 int ret;
210
211 if (ar->bmi.done_sent) {
212 ath10k_warn("command disallowed\n");
213 return -EBUSY;
214 }
215
216 while (length) {
217 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
218
219 WARN_ON_ONCE(txlen & 3);
220
221 cmd.id = __cpu_to_le32(BMI_LZ_DATA);
222 cmd.lz_data.len = __cpu_to_le32(txlen);
223 memcpy(cmd.lz_data.payload, buffer, txlen);
224
225 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
226 NULL, NULL);
227 if (ret) {
228 ath10k_warn("unable to write to the device\n");
229 return ret;
230 }
231
232 buffer += txlen;
233 length -= txlen;
234 }
235
236 return 0;
237}
238
239int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
240{
241 struct bmi_cmd cmd;
242 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
243 int ret;
244
245 if (ar->bmi.done_sent) {
246 ath10k_warn("command disallowed\n");
247 return -EBUSY;
248 }
249
250 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
251 cmd.lz_start.addr = __cpu_to_le32(address);
252
253 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
254 if (ret) {
255 ath10k_warn("unable to Start LZ Stream to the device\n");
256 return ret;
257 }
258
259 return 0;
260}
261
262int ath10k_bmi_fast_download(struct ath10k *ar,
263 u32 address, const void *buffer, u32 length)
264{
265 u8 trailer[4] = {};
266 u32 head_len = rounddown(length, 4);
267 u32 trailer_len = length - head_len;
268 int ret;
269
270 ret = ath10k_bmi_lz_stream_start(ar, address);
271 if (ret)
272 return ret;
273
274 /* copy the last word into a zero padded buffer */
275 if (trailer_len > 0)
276 memcpy(trailer, buffer + head_len, trailer_len);
277
278 ret = ath10k_bmi_lz_data(ar, buffer, head_len);
279 if (ret)
280 return ret;
281
282 if (trailer_len > 0)
283 ret = ath10k_bmi_lz_data(ar, trailer, 4);
284
285 if (ret != 0)
286 return ret;
287
288 /*
289 * Close compressed stream and open a new (fake) one.
290 * This serves mainly to flush Target caches.
291 */
292 ret = ath10k_bmi_lz_stream_start(ar, 0x00);
293
294 return ret;
295}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
new file mode 100644
index 000000000000..32c56aa33a5e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -0,0 +1,224 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _BMI_H_
19#define _BMI_H_
20
21#include "core.h"
22
23/*
24 * Bootloader Messaging Interface (BMI)
25 *
26 * BMI is a very simple messaging interface used during initialization
27 * to read memory, write memory, execute code, and to define an
28 * application entry PC.
29 *
30 * It is used to download an application to QCA988x, to provide
31 * patches to code that is already resident on QCA988x, and generally
32 * to examine and modify state. The Host has an opportunity to use
33 * BMI only once during bootup. Once the Host issues a BMI_DONE
34 * command, this opportunity ends.
35 *
36 * The Host writes BMI requests to mailbox0, and reads BMI responses
37 * from mailbox0. BMI requests all begin with a command
38 * (see below for specific commands), and are followed by
39 * command-specific data.
40 *
41 * Flow control:
42 * The Host can only issue a command once the Target gives it a
43 * "BMI Command Credit", using AR8K Counter #4. As soon as the
44 * Target has completed a command, it issues another BMI Command
45 * Credit (so the Host can issue the next command).
46 *
47 * BMI handles all required Target-side cache flushing.
48 */
49
50/* Maximum data size used for BMI transfers */
51#define BMI_MAX_DATA_SIZE 256
52
53/* len = cmd + addr + length */
54#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
55 sizeof(u32) + \
56 sizeof(u32) + \
57 sizeof(u32))
58
59/* BMI Commands */
60
61enum bmi_cmd_id {
62 BMI_NO_COMMAND = 0,
63 BMI_DONE = 1,
64 BMI_READ_MEMORY = 2,
65 BMI_WRITE_MEMORY = 3,
66 BMI_EXECUTE = 4,
67 BMI_SET_APP_START = 5,
68 BMI_READ_SOC_REGISTER = 6,
69 BMI_READ_SOC_WORD = 6,
70 BMI_WRITE_SOC_REGISTER = 7,
71 BMI_WRITE_SOC_WORD = 7,
72 BMI_GET_TARGET_ID = 8,
73 BMI_GET_TARGET_INFO = 8,
74 BMI_ROMPATCH_INSTALL = 9,
75 BMI_ROMPATCH_UNINSTALL = 10,
76 BMI_ROMPATCH_ACTIVATE = 11,
77 BMI_ROMPATCH_DEACTIVATE = 12,
78 BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */
79 BMI_LZ_DATA = 14,
80 BMI_NVRAM_PROCESS = 15,
81};
82
83#define BMI_NVRAM_SEG_NAME_SZ 16
84
85struct bmi_cmd {
86 __le32 id; /* enum bmi_cmd_id */
87 union {
88 struct {
89 } done;
90 struct {
91 __le32 addr;
92 __le32 len;
93 } read_mem;
94 struct {
95 __le32 addr;
96 __le32 len;
97 u8 payload[0];
98 } write_mem;
99 struct {
100 __le32 addr;
101 __le32 param;
102 } execute;
103 struct {
104 __le32 addr;
105 } set_app_start;
106 struct {
107 __le32 addr;
108 } read_soc_reg;
109 struct {
110 __le32 addr;
111 __le32 value;
112 } write_soc_reg;
113 struct {
114 } get_target_info;
115 struct {
116 __le32 rom_addr;
117 __le32 ram_addr; /* or value */
118 __le32 size;
119 __le32 activate; /* 0=install, but dont activate */
120 } rompatch_install;
121 struct {
122 __le32 patch_id;
123 } rompatch_uninstall;
124 struct {
125 __le32 count;
126 __le32 patch_ids[0]; /* length of @count */
127 } rompatch_activate;
128 struct {
129 __le32 count;
130 __le32 patch_ids[0]; /* length of @count */
131 } rompatch_deactivate;
132 struct {
133 __le32 addr;
134 } lz_start;
135 struct {
136 __le32 len; /* max BMI_MAX_DATA_SIZE */
137 u8 payload[0]; /* length of @len */
138 } lz_data;
139 struct {
140 u8 name[BMI_NVRAM_SEG_NAME_SZ];
141 } nvram_process;
142 u8 payload[BMI_MAX_CMDBUF_SIZE];
143 };
144} __packed;
145
146union bmi_resp {
147 struct {
148 u8 payload[0];
149 } read_mem;
150 struct {
151 __le32 result;
152 } execute;
153 struct {
154 __le32 value;
155 } read_soc_reg;
156 struct {
157 __le32 len;
158 __le32 version;
159 __le32 type;
160 } get_target_info;
161 struct {
162 __le32 patch_id;
163 } rompatch_install;
164 struct {
165 __le32 patch_id;
166 } rompatch_uninstall;
167 struct {
168 /* 0 = nothing executed
169 * otherwise = NVRAM segment return value */
170 __le32 result;
171 } nvram_process;
172 u8 payload[BMI_MAX_CMDBUF_SIZE];
173} __packed;
174
175struct bmi_target_info {
176 u32 version;
177 u32 type;
178};
179
180
181/* in msec */
182#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
183
184#define BMI_CE_NUM_TO_TARG 0
185#define BMI_CE_NUM_TO_HOST 1
186
187int ath10k_bmi_done(struct ath10k *ar);
188int ath10k_bmi_get_target_info(struct ath10k *ar,
189 struct bmi_target_info *target_info);
190int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
191 void *buffer, u32 length);
192int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
193 const void *buffer, u32 length);
194
195#define ath10k_bmi_read32(ar, item, val) \
196 ({ \
197 int ret; \
198 u32 addr; \
199 __le32 tmp; \
200 \
201 addr = host_interest_item_address(HI_ITEM(item)); \
202 ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
203 *val = __le32_to_cpu(tmp); \
204 ret; \
205 })
206
207#define ath10k_bmi_write32(ar, item, val) \
208 ({ \
209 int ret; \
210 u32 address; \
211 __le32 v = __cpu_to_le32(val); \
212 \
213 address = host_interest_item_address(HI_ITEM(item)); \
214 ret = ath10k_bmi_write_memory(ar, address, \
215 (u8 *)&v, sizeof(v)); \
216 ret; \
217 })
218
219int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
220int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
221int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
222int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
223 const void *buffer, u32 length);
224#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
new file mode 100644
index 000000000000..61a8ac70d3ca
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -0,0 +1,1189 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "hif.h"
19#include "pci.h"
20#include "ce.h"
21#include "debug.h"
22
23/*
24 * Support for Copy Engine hardware, which is mainly used for
25 * communication between Host and Target over a PCIe interconnect.
26 */
27
28/*
29 * A single CopyEngine (CE) comprises two "rings":
30 * a source ring
31 * a destination ring
32 *
33 * Each ring consists of a number of descriptors which specify
34 * an address, length, and meta-data.
35 *
36 * Typically, one side of the PCIe interconnect (Host or Target)
37 * controls one ring and the other side controls the other ring.
38 * The source side chooses when to initiate a transfer and it
39 * chooses what to send (buffer address, length). The destination
40 * side keeps a supply of "anonymous receive buffers" available and
41 * it handles incoming data as it arrives (when the destination
42 * recieves an interrupt).
43 *
44 * The sender may send a simple buffer (address/length) or it may
45 * send a small list of buffers. When a small list is sent, hardware
46 * "gathers" these and they end up in a single destination buffer
47 * with a single interrupt.
48 *
49 * There are several "contexts" managed by this layer -- more, it
50 * may seem -- than should be needed. These are provided mainly for
51 * maximum flexibility and especially to facilitate a simpler HIF
52 * implementation. There are per-CopyEngine recv, send, and watermark
53 * contexts. These are supplied by the caller when a recv, send,
54 * or watermark handler is established and they are echoed back to
55 * the caller when the respective callbacks are invoked. There is
56 * also a per-transfer context supplied by the caller when a buffer
57 * (or sendlist) is sent and when a buffer is enqueued for recv.
58 * These per-transfer contexts are echoed back to the caller when
59 * the buffer is sent/received.
60 */
61
62static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
63 u32 ce_ctrl_addr,
64 unsigned int n)
65{
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
67}
68
69static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
70 u32 ce_ctrl_addr)
71{
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
73}
74
75static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
76 u32 ce_ctrl_addr,
77 unsigned int n)
78{
79 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
80 void __iomem *indicator_addr;
81
82 if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
83 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
84 return;
85 }
86
87 /* workaround for QCA988x_1.0 HW CE */
88 indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
89
90 if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
91 iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
92 } else {
93 unsigned long irq_flags;
94 local_irq_save(irq_flags);
95 iowrite32(1, indicator_addr);
96
97 /*
98 * PCIE write waits for ACK in IPQ8K, there is no
99 * need to read back value.
100 */
101 (void)ioread32(indicator_addr);
102 (void)ioread32(indicator_addr); /* conservative */
103
104 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
105
106 iowrite32(0, indicator_addr);
107 local_irq_restore(irq_flags);
108 }
109}
110
111static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
112 u32 ce_ctrl_addr)
113{
114 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
115}
116
117static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
118 u32 ce_ctrl_addr)
119{
120 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
121}
122
123static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
124 u32 ce_ctrl_addr,
125 unsigned int addr)
126{
127 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
128}
129
130static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
131 u32 ce_ctrl_addr,
132 unsigned int n)
133{
134 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
135}
136
137static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
138 u32 ce_ctrl_addr,
139 unsigned int n)
140{
141 u32 ctrl1_addr = ath10k_pci_read32((ar),
142 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
143
144 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
145 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
146 CE_CTRL1_DMAX_LENGTH_SET(n));
147}
148
149static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
150 u32 ce_ctrl_addr,
151 unsigned int n)
152{
153 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
154
155 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
156 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
157 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
158}
159
160static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
161 u32 ce_ctrl_addr,
162 unsigned int n)
163{
164 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
165
166 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
167 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
168 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
169}
170
171static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
172 u32 ce_ctrl_addr)
173{
174 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
175}
176
177static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
178 u32 ce_ctrl_addr,
179 u32 addr)
180{
181 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
182}
183
184static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
185 u32 ce_ctrl_addr,
186 unsigned int n)
187{
188 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
189}
190
191static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
192 u32 ce_ctrl_addr,
193 unsigned int n)
194{
195 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
196
197 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
198 (addr & ~SRC_WATERMARK_HIGH_MASK) |
199 SRC_WATERMARK_HIGH_SET(n));
200}
201
202static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
203 u32 ce_ctrl_addr,
204 unsigned int n)
205{
206 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
207
208 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
209 (addr & ~SRC_WATERMARK_LOW_MASK) |
210 SRC_WATERMARK_LOW_SET(n));
211}
212
213static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
214 u32 ce_ctrl_addr,
215 unsigned int n)
216{
217 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
218
219 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
220 (addr & ~DST_WATERMARK_HIGH_MASK) |
221 DST_WATERMARK_HIGH_SET(n));
222}
223
224static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
225 u32 ce_ctrl_addr,
226 unsigned int n)
227{
228 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
229
230 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
231 (addr & ~DST_WATERMARK_LOW_MASK) |
232 DST_WATERMARK_LOW_SET(n));
233}
234
235static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
236 u32 ce_ctrl_addr)
237{
238 u32 host_ie_addr = ath10k_pci_read32(ar,
239 ce_ctrl_addr + HOST_IE_ADDRESS);
240
241 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
242 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
243}
244
245static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
246 u32 ce_ctrl_addr)
247{
248 u32 host_ie_addr = ath10k_pci_read32(ar,
249 ce_ctrl_addr + HOST_IE_ADDRESS);
250
251 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
252 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
253}
254
255static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
256 u32 ce_ctrl_addr)
257{
258 u32 host_ie_addr = ath10k_pci_read32(ar,
259 ce_ctrl_addr + HOST_IE_ADDRESS);
260
261 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
262 host_ie_addr & ~CE_WATERMARK_MASK);
263}
264
265static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
266 u32 ce_ctrl_addr)
267{
268 u32 misc_ie_addr = ath10k_pci_read32(ar,
269 ce_ctrl_addr + MISC_IE_ADDRESS);
270
271 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
272 misc_ie_addr | CE_ERROR_MASK);
273}
274
275static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
276 u32 ce_ctrl_addr,
277 unsigned int mask)
278{
279 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
280}
281
282
283/*
284 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
285 * ath10k_ce_sendlist_send.
286 * The caller takes responsibility for any needed locking.
287 */
288static int ath10k_ce_send_nolock(struct ce_state *ce_state,
289 void *per_transfer_context,
290 u32 buffer,
291 unsigned int nbytes,
292 unsigned int transfer_id,
293 unsigned int flags)
294{
295 struct ath10k *ar = ce_state->ar;
296 struct ce_ring_state *src_ring = ce_state->src_ring;
297 struct ce_desc *desc, *sdesc;
298 unsigned int nentries_mask = src_ring->nentries_mask;
299 unsigned int sw_index = src_ring->sw_index;
300 unsigned int write_index = src_ring->write_index;
301 u32 ctrl_addr = ce_state->ctrl_addr;
302 u32 desc_flags = 0;
303 int ret = 0;
304
305 if (nbytes > ce_state->src_sz_max)
306 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
307 __func__, nbytes, ce_state->src_sz_max);
308
309 ath10k_pci_wake(ar);
310
311 if (unlikely(CE_RING_DELTA(nentries_mask,
312 write_index, sw_index - 1) <= 0)) {
313 ret = -EIO;
314 goto exit;
315 }
316
317 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
318 write_index);
319 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
320
321 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
322
323 if (flags & CE_SEND_FLAG_GATHER)
324 desc_flags |= CE_DESC_FLAGS_GATHER;
325 if (flags & CE_SEND_FLAG_BYTE_SWAP)
326 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
327
328 sdesc->addr = __cpu_to_le32(buffer);
329 sdesc->nbytes = __cpu_to_le16(nbytes);
330 sdesc->flags = __cpu_to_le16(desc_flags);
331
332 *desc = *sdesc;
333
334 src_ring->per_transfer_context[write_index] = per_transfer_context;
335
336 /* Update Source Ring Write Index */
337 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
338
339 /* WORKAROUND */
340 if (!(flags & CE_SEND_FLAG_GATHER))
341 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
342
343 src_ring->write_index = write_index;
344exit:
345 ath10k_pci_sleep(ar);
346 return ret;
347}
348
349int ath10k_ce_send(struct ce_state *ce_state,
350 void *per_transfer_context,
351 u32 buffer,
352 unsigned int nbytes,
353 unsigned int transfer_id,
354 unsigned int flags)
355{
356 struct ath10k *ar = ce_state->ar;
357 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
358 int ret;
359
360 spin_lock_bh(&ar_pci->ce_lock);
361 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
362 buffer, nbytes, transfer_id, flags);
363 spin_unlock_bh(&ar_pci->ce_lock);
364
365 return ret;
366}
367
368void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
369 unsigned int nbytes, u32 flags)
370{
371 unsigned int num_items = sendlist->num_items;
372 struct ce_sendlist_item *item;
373
374 item = &sendlist->item[num_items];
375 item->data = buffer;
376 item->u.nbytes = nbytes;
377 item->flags = flags;
378 sendlist->num_items++;
379}
380
381int ath10k_ce_sendlist_send(struct ce_state *ce_state,
382 void *per_transfer_context,
383 struct ce_sendlist *sendlist,
384 unsigned int transfer_id)
385{
386 struct ce_ring_state *src_ring = ce_state->src_ring;
387 struct ce_sendlist_item *item;
388 struct ath10k *ar = ce_state->ar;
389 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
390 unsigned int nentries_mask = src_ring->nentries_mask;
391 unsigned int num_items = sendlist->num_items;
392 unsigned int sw_index;
393 unsigned int write_index;
394 int i, delta, ret = -ENOMEM;
395
396 spin_lock_bh(&ar_pci->ce_lock);
397
398 sw_index = src_ring->sw_index;
399 write_index = src_ring->write_index;
400
401 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
402
403 if (delta >= num_items) {
404 /*
405 * Handle all but the last item uniformly.
406 */
407 for (i = 0; i < num_items - 1; i++) {
408 item = &sendlist->item[i];
409 ret = ath10k_ce_send_nolock(ce_state,
410 CE_SENDLIST_ITEM_CTXT,
411 (u32) item->data,
412 item->u.nbytes, transfer_id,
413 item->flags |
414 CE_SEND_FLAG_GATHER);
415 if (ret)
416 ath10k_warn("CE send failed for item: %d\n", i);
417 }
418 /*
419 * Provide valid context pointer for final item.
420 */
421 item = &sendlist->item[i];
422 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
423 (u32) item->data, item->u.nbytes,
424 transfer_id, item->flags);
425 if (ret)
426 ath10k_warn("CE send failed for last item: %d\n", i);
427 }
428
429 spin_unlock_bh(&ar_pci->ce_lock);
430
431 return ret;
432}
433
434int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
435 void *per_recv_context,
436 u32 buffer)
437{
438 struct ce_ring_state *dest_ring = ce_state->dest_ring;
439 u32 ctrl_addr = ce_state->ctrl_addr;
440 struct ath10k *ar = ce_state->ar;
441 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
442 unsigned int nentries_mask = dest_ring->nentries_mask;
443 unsigned int write_index;
444 unsigned int sw_index;
445 int ret;
446
447 spin_lock_bh(&ar_pci->ce_lock);
448 write_index = dest_ring->write_index;
449 sw_index = dest_ring->sw_index;
450
451 ath10k_pci_wake(ar);
452
453 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
454 struct ce_desc *base = dest_ring->base_addr_owner_space;
455 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
456
457 /* Update destination descriptor */
458 desc->addr = __cpu_to_le32(buffer);
459 desc->nbytes = 0;
460
461 dest_ring->per_transfer_context[write_index] =
462 per_recv_context;
463
464 /* Update Destination Ring Write Index */
465 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
466 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
467 dest_ring->write_index = write_index;
468 ret = 0;
469 } else {
470 ret = -EIO;
471 }
472 ath10k_pci_sleep(ar);
473 spin_unlock_bh(&ar_pci->ce_lock);
474
475 return ret;
476}
477
478/*
479 * Guts of ath10k_ce_completed_recv_next.
480 * The caller takes responsibility for any necessary locking.
481 */
482static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
483 void **per_transfer_contextp,
484 u32 *bufferp,
485 unsigned int *nbytesp,
486 unsigned int *transfer_idp,
487 unsigned int *flagsp)
488{
489 struct ce_ring_state *dest_ring = ce_state->dest_ring;
490 unsigned int nentries_mask = dest_ring->nentries_mask;
491 unsigned int sw_index = dest_ring->sw_index;
492
493 struct ce_desc *base = dest_ring->base_addr_owner_space;
494 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
495 struct ce_desc sdesc;
496 u16 nbytes;
497
498 /* Copy in one go for performance reasons */
499 sdesc = *desc;
500
501 nbytes = __le16_to_cpu(sdesc.nbytes);
502 if (nbytes == 0) {
503 /*
504 * This closes a relatively unusual race where the Host
505 * sees the updated DRRI before the update to the
506 * corresponding descriptor has completed. We treat this
507 * as a descriptor that is not yet done.
508 */
509 return -EIO;
510 }
511
512 desc->nbytes = 0;
513
514 /* Return data from completed destination descriptor */
515 *bufferp = __le32_to_cpu(sdesc.addr);
516 *nbytesp = nbytes;
517 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
518
519 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
520 *flagsp = CE_RECV_FLAG_SWAPPED;
521 else
522 *flagsp = 0;
523
524 if (per_transfer_contextp)
525 *per_transfer_contextp =
526 dest_ring->per_transfer_context[sw_index];
527
528 /* sanity */
529 dest_ring->per_transfer_context[sw_index] = NULL;
530
531 /* Update sw_index */
532 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
533 dest_ring->sw_index = sw_index;
534
535 return 0;
536}
537
538int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
539 void **per_transfer_contextp,
540 u32 *bufferp,
541 unsigned int *nbytesp,
542 unsigned int *transfer_idp,
543 unsigned int *flagsp)
544{
545 struct ath10k *ar = ce_state->ar;
546 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
547 int ret;
548
549 spin_lock_bh(&ar_pci->ce_lock);
550 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
551 per_transfer_contextp,
552 bufferp, nbytesp,
553 transfer_idp, flagsp);
554 spin_unlock_bh(&ar_pci->ce_lock);
555
556 return ret;
557}
558
559int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
560 void **per_transfer_contextp,
561 u32 *bufferp)
562{
563 struct ce_ring_state *dest_ring;
564 unsigned int nentries_mask;
565 unsigned int sw_index;
566 unsigned int write_index;
567 int ret;
568 struct ath10k *ar;
569 struct ath10k_pci *ar_pci;
570
571 dest_ring = ce_state->dest_ring;
572
573 if (!dest_ring)
574 return -EIO;
575
576 ar = ce_state->ar;
577 ar_pci = ath10k_pci_priv(ar);
578
579 spin_lock_bh(&ar_pci->ce_lock);
580
581 nentries_mask = dest_ring->nentries_mask;
582 sw_index = dest_ring->sw_index;
583 write_index = dest_ring->write_index;
584 if (write_index != sw_index) {
585 struct ce_desc *base = dest_ring->base_addr_owner_space;
586 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
587
588 /* Return data from completed destination descriptor */
589 *bufferp = __le32_to_cpu(desc->addr);
590
591 if (per_transfer_contextp)
592 *per_transfer_contextp =
593 dest_ring->per_transfer_context[sw_index];
594
595 /* sanity */
596 dest_ring->per_transfer_context[sw_index] = NULL;
597
598 /* Update sw_index */
599 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
600 dest_ring->sw_index = sw_index;
601 ret = 0;
602 } else {
603 ret = -EIO;
604 }
605
606 spin_unlock_bh(&ar_pci->ce_lock);
607
608 return ret;
609}
610
611/*
612 * Guts of ath10k_ce_completed_send_next.
613 * The caller takes responsibility for any necessary locking.
614 */
615static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
616 void **per_transfer_contextp,
617 u32 *bufferp,
618 unsigned int *nbytesp,
619 unsigned int *transfer_idp)
620{
621 struct ce_ring_state *src_ring = ce_state->src_ring;
622 u32 ctrl_addr = ce_state->ctrl_addr;
623 struct ath10k *ar = ce_state->ar;
624 unsigned int nentries_mask = src_ring->nentries_mask;
625 unsigned int sw_index = src_ring->sw_index;
626 unsigned int read_index;
627 int ret = -EIO;
628
629 if (src_ring->hw_index == sw_index) {
630 /*
631 * The SW completion index has caught up with the cached
632 * version of the HW completion index.
633 * Update the cached HW completion index to see whether
634 * the SW has really caught up to the HW, or if the cached
635 * value of the HW index has become stale.
636 */
637 ath10k_pci_wake(ar);
638 src_ring->hw_index =
639 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
640 ath10k_pci_sleep(ar);
641 }
642 read_index = src_ring->hw_index;
643
644 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
645 struct ce_desc *sbase = src_ring->shadow_base;
646 struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
647
648 /* Return data from completed source descriptor */
649 *bufferp = __le32_to_cpu(sdesc->addr);
650 *nbytesp = __le16_to_cpu(sdesc->nbytes);
651 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
652 CE_DESC_FLAGS_META_DATA);
653
654 if (per_transfer_contextp)
655 *per_transfer_contextp =
656 src_ring->per_transfer_context[sw_index];
657
658 /* sanity */
659 src_ring->per_transfer_context[sw_index] = NULL;
660
661 /* Update sw_index */
662 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
663 src_ring->sw_index = sw_index;
664 ret = 0;
665 }
666
667 return ret;
668}
669
670/* NB: Modeled after ath10k_ce_completed_send_next */
671int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
672 void **per_transfer_contextp,
673 u32 *bufferp,
674 unsigned int *nbytesp,
675 unsigned int *transfer_idp)
676{
677 struct ce_ring_state *src_ring;
678 unsigned int nentries_mask;
679 unsigned int sw_index;
680 unsigned int write_index;
681 int ret;
682 struct ath10k *ar;
683 struct ath10k_pci *ar_pci;
684
685 src_ring = ce_state->src_ring;
686
687 if (!src_ring)
688 return -EIO;
689
690 ar = ce_state->ar;
691 ar_pci = ath10k_pci_priv(ar);
692
693 spin_lock_bh(&ar_pci->ce_lock);
694
695 nentries_mask = src_ring->nentries_mask;
696 sw_index = src_ring->sw_index;
697 write_index = src_ring->write_index;
698
699 if (write_index != sw_index) {
700 struct ce_desc *base = src_ring->base_addr_owner_space;
701 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
702
703 /* Return data from completed source descriptor */
704 *bufferp = __le32_to_cpu(desc->addr);
705 *nbytesp = __le16_to_cpu(desc->nbytes);
706 *transfer_idp = MS(__le16_to_cpu(desc->flags),
707 CE_DESC_FLAGS_META_DATA);
708
709 if (per_transfer_contextp)
710 *per_transfer_contextp =
711 src_ring->per_transfer_context[sw_index];
712
713 /* sanity */
714 src_ring->per_transfer_context[sw_index] = NULL;
715
716 /* Update sw_index */
717 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
718 src_ring->sw_index = sw_index;
719 ret = 0;
720 } else {
721 ret = -EIO;
722 }
723
724 spin_unlock_bh(&ar_pci->ce_lock);
725
726 return ret;
727}
728
729int ath10k_ce_completed_send_next(struct ce_state *ce_state,
730 void **per_transfer_contextp,
731 u32 *bufferp,
732 unsigned int *nbytesp,
733 unsigned int *transfer_idp)
734{
735 struct ath10k *ar = ce_state->ar;
736 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
737 int ret;
738
739 spin_lock_bh(&ar_pci->ce_lock);
740 ret = ath10k_ce_completed_send_next_nolock(ce_state,
741 per_transfer_contextp,
742 bufferp, nbytesp,
743 transfer_idp);
744 spin_unlock_bh(&ar_pci->ce_lock);
745
746 return ret;
747}
748
749/*
750 * Guts of interrupt handler for per-engine interrupts on a particular CE.
751 *
752 * Invokes registered callbacks for recv_complete,
753 * send_complete, and watermarks.
754 */
755void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
756{
757 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
758 struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
759 u32 ctrl_addr = ce_state->ctrl_addr;
760 void *transfer_context;
761 u32 buf;
762 unsigned int nbytes;
763 unsigned int id;
764 unsigned int flags;
765
766 ath10k_pci_wake(ar);
767 spin_lock_bh(&ar_pci->ce_lock);
768
769 /* Clear the copy-complete interrupts that will be handled here. */
770 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
771 HOST_IS_COPY_COMPLETE_MASK);
772
773 if (ce_state->recv_cb) {
774 /*
775 * Pop completed recv buffers and call the registered
776 * recv callback for each
777 */
778 while (ath10k_ce_completed_recv_next_nolock(ce_state,
779 &transfer_context,
780 &buf, &nbytes,
781 &id, &flags) == 0) {
782 spin_unlock_bh(&ar_pci->ce_lock);
783 ce_state->recv_cb(ce_state, transfer_context, buf,
784 nbytes, id, flags);
785 spin_lock_bh(&ar_pci->ce_lock);
786 }
787 }
788
789 if (ce_state->send_cb) {
790 /*
791 * Pop completed send buffers and call the registered
792 * send callback for each
793 */
794 while (ath10k_ce_completed_send_next_nolock(ce_state,
795 &transfer_context,
796 &buf,
797 &nbytes,
798 &id) == 0) {
799 spin_unlock_bh(&ar_pci->ce_lock);
800 ce_state->send_cb(ce_state, transfer_context,
801 buf, nbytes, id);
802 spin_lock_bh(&ar_pci->ce_lock);
803 }
804 }
805
806 /*
807 * Misc CE interrupts are not being handled, but still need
808 * to be cleared.
809 */
810 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
811
812 spin_unlock_bh(&ar_pci->ce_lock);
813 ath10k_pci_sleep(ar);
814}
815
816/*
817 * Handler for per-engine interrupts on ALL active CEs.
818 * This is used in cases where the system is sharing a
819 * single interrput for all CEs
820 */
821
822void ath10k_ce_per_engine_service_any(struct ath10k *ar)
823{
824 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
825 int ce_id;
826 u32 intr_summary;
827
828 ath10k_pci_wake(ar);
829 intr_summary = CE_INTERRUPT_SUMMARY(ar);
830
831 for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
832 if (intr_summary & (1 << ce_id))
833 intr_summary &= ~(1 << ce_id);
834 else
835 /* no intr pending on this CE */
836 continue;
837
838 ath10k_ce_per_engine_service(ar, ce_id);
839 }
840
841 ath10k_pci_sleep(ar);
842}
843
844/*
845 * Adjust interrupts for the copy complete handler.
846 * If it's needed for either send or recv, then unmask
847 * this interrupt; otherwise, mask it.
848 *
849 * Called with ce_lock held.
850 */
851static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
852 int disable_copy_compl_intr)
853{
854 u32 ctrl_addr = ce_state->ctrl_addr;
855 struct ath10k *ar = ce_state->ar;
856
857 ath10k_pci_wake(ar);
858
859 if ((!disable_copy_compl_intr) &&
860 (ce_state->send_cb || ce_state->recv_cb))
861 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
862 else
863 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
864
865 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
866
867 ath10k_pci_sleep(ar);
868}
869
870void ath10k_ce_disable_interrupts(struct ath10k *ar)
871{
872 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
873 int ce_id;
874
875 ath10k_pci_wake(ar);
876 for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
877 struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
878 u32 ctrl_addr = ce_state->ctrl_addr;
879
880 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
881 }
882 ath10k_pci_sleep(ar);
883}
884
885void ath10k_ce_send_cb_register(struct ce_state *ce_state,
886 void (*send_cb) (struct ce_state *ce_state,
887 void *transfer_context,
888 u32 buffer,
889 unsigned int nbytes,
890 unsigned int transfer_id),
891 int disable_interrupts)
892{
893 struct ath10k *ar = ce_state->ar;
894 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
895
896 spin_lock_bh(&ar_pci->ce_lock);
897 ce_state->send_cb = send_cb;
898 ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
899 spin_unlock_bh(&ar_pci->ce_lock);
900}
901
902void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
903 void (*recv_cb) (struct ce_state *ce_state,
904 void *transfer_context,
905 u32 buffer,
906 unsigned int nbytes,
907 unsigned int transfer_id,
908 unsigned int flags))
909{
910 struct ath10k *ar = ce_state->ar;
911 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
912
913 spin_lock_bh(&ar_pci->ce_lock);
914 ce_state->recv_cb = recv_cb;
915 ath10k_ce_per_engine_handler_adjust(ce_state, 0);
916 spin_unlock_bh(&ar_pci->ce_lock);
917}
918
919static int ath10k_ce_init_src_ring(struct ath10k *ar,
920 unsigned int ce_id,
921 struct ce_state *ce_state,
922 const struct ce_attr *attr)
923{
924 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
925 struct ce_ring_state *src_ring;
926 unsigned int nentries = attr->src_nentries;
927 unsigned int ce_nbytes;
928 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
929 dma_addr_t base_addr;
930 char *ptr;
931
932 nentries = roundup_pow_of_two(nentries);
933
934 if (ce_state->src_ring) {
935 WARN_ON(ce_state->src_ring->nentries != nentries);
936 return 0;
937 }
938
939 ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
940 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
941 if (ptr == NULL)
942 return -ENOMEM;
943
944 ce_state->src_ring = (struct ce_ring_state *)ptr;
945 src_ring = ce_state->src_ring;
946
947 ptr += sizeof(struct ce_ring_state);
948 src_ring->nentries = nentries;
949 src_ring->nentries_mask = nentries - 1;
950
951 ath10k_pci_wake(ar);
952 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
953 src_ring->hw_index = src_ring->sw_index;
954
955 src_ring->write_index =
956 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
957 ath10k_pci_sleep(ar);
958
959 src_ring->per_transfer_context = (void **)ptr;
960
961 /*
962 * Legacy platforms that do not support cache
963 * coherent DMA are unsupported
964 */
965 src_ring->base_addr_owner_space_unaligned =
966 pci_alloc_consistent(ar_pci->pdev,
967 (nentries * sizeof(struct ce_desc) +
968 CE_DESC_RING_ALIGN),
969 &base_addr);
970 src_ring->base_addr_ce_space_unaligned = base_addr;
971
972 src_ring->base_addr_owner_space = PTR_ALIGN(
973 src_ring->base_addr_owner_space_unaligned,
974 CE_DESC_RING_ALIGN);
975 src_ring->base_addr_ce_space = ALIGN(
976 src_ring->base_addr_ce_space_unaligned,
977 CE_DESC_RING_ALIGN);
978
979 /*
980 * Also allocate a shadow src ring in regular
981 * mem to use for faster access.
982 */
983 src_ring->shadow_base_unaligned =
984 kmalloc((nentries * sizeof(struct ce_desc) +
985 CE_DESC_RING_ALIGN), GFP_KERNEL);
986
987 src_ring->shadow_base = PTR_ALIGN(
988 src_ring->shadow_base_unaligned,
989 CE_DESC_RING_ALIGN);
990
991 ath10k_pci_wake(ar);
992 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
993 src_ring->base_addr_ce_space);
994 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
995 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
996 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
997 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
998 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
999 ath10k_pci_sleep(ar);
1000
1001 return 0;
1002}
1003
1004static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1005 unsigned int ce_id,
1006 struct ce_state *ce_state,
1007 const struct ce_attr *attr)
1008{
1009 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1010 struct ce_ring_state *dest_ring;
1011 unsigned int nentries = attr->dest_nentries;
1012 unsigned int ce_nbytes;
1013 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1014 dma_addr_t base_addr;
1015 char *ptr;
1016
1017 nentries = roundup_pow_of_two(nentries);
1018
1019 if (ce_state->dest_ring) {
1020 WARN_ON(ce_state->dest_ring->nentries != nentries);
1021 return 0;
1022 }
1023
1024 ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
1025 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
1026 if (ptr == NULL)
1027 return -ENOMEM;
1028
1029 ce_state->dest_ring = (struct ce_ring_state *)ptr;
1030 dest_ring = ce_state->dest_ring;
1031
1032 ptr += sizeof(struct ce_ring_state);
1033 dest_ring->nentries = nentries;
1034 dest_ring->nentries_mask = nentries - 1;
1035
1036 ath10k_pci_wake(ar);
1037 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1038 dest_ring->write_index =
1039 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1040 ath10k_pci_sleep(ar);
1041
1042 dest_ring->per_transfer_context = (void **)ptr;
1043
1044 /*
1045 * Legacy platforms that do not support cache
1046 * coherent DMA are unsupported
1047 */
1048 dest_ring->base_addr_owner_space_unaligned =
1049 pci_alloc_consistent(ar_pci->pdev,
1050 (nentries * sizeof(struct ce_desc) +
1051 CE_DESC_RING_ALIGN),
1052 &base_addr);
1053 dest_ring->base_addr_ce_space_unaligned = base_addr;
1054
1055 /*
1056 * Correctly initialize memory to 0 to prevent garbage
1057 * data crashing system when download firmware
1058 */
1059 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1060 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
1061
1062 dest_ring->base_addr_owner_space = PTR_ALIGN(
1063 dest_ring->base_addr_owner_space_unaligned,
1064 CE_DESC_RING_ALIGN);
1065 dest_ring->base_addr_ce_space = ALIGN(
1066 dest_ring->base_addr_ce_space_unaligned,
1067 CE_DESC_RING_ALIGN);
1068
1069 ath10k_pci_wake(ar);
1070 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
1071 dest_ring->base_addr_ce_space);
1072 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1073 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1074 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1075 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1076 ath10k_pci_sleep(ar);
1077
1078 return 0;
1079}
1080
1081static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
1082 unsigned int ce_id,
1083 const struct ce_attr *attr)
1084{
1085 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1086 struct ce_state *ce_state = NULL;
1087 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1088
1089 spin_lock_bh(&ar_pci->ce_lock);
1090
1091 if (!ar_pci->ce_id_to_state[ce_id]) {
1092 ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
1093 if (ce_state == NULL) {
1094 spin_unlock_bh(&ar_pci->ce_lock);
1095 return NULL;
1096 }
1097
1098 ar_pci->ce_id_to_state[ce_id] = ce_state;
1099 ce_state->ar = ar;
1100 ce_state->id = ce_id;
1101 ce_state->ctrl_addr = ctrl_addr;
1102 ce_state->state = CE_RUNNING;
1103 /* Save attribute flags */
1104 ce_state->attr_flags = attr->flags;
1105 ce_state->src_sz_max = attr->src_sz_max;
1106 }
1107
1108 spin_unlock_bh(&ar_pci->ce_lock);
1109
1110 return ce_state;
1111}
1112
1113/*
1114 * Initialize a Copy Engine based on caller-supplied attributes.
1115 * This may be called once to initialize both source and destination
1116 * rings or it may be called twice for separate source and destination
1117 * initialization. It may be that only one side or the other is
1118 * initialized by software/firmware.
1119 */
1120struct ce_state *ath10k_ce_init(struct ath10k *ar,
1121 unsigned int ce_id,
1122 const struct ce_attr *attr)
1123{
1124 struct ce_state *ce_state;
1125 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1126
1127 ce_state = ath10k_ce_init_state(ar, ce_id, attr);
1128 if (!ce_state) {
1129 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
1130 return NULL;
1131 }
1132
1133 if (attr->src_nentries) {
1134 if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
1135 ath10k_err("Failed to initialize CE src ring for ID: %d\n",
1136 ce_id);
1137 ath10k_ce_deinit(ce_state);
1138 return NULL;
1139 }
1140 }
1141
1142 if (attr->dest_nentries) {
1143 if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
1144 ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
1145 ce_id);
1146 ath10k_ce_deinit(ce_state);
1147 return NULL;
1148 }
1149 }
1150
1151 /* Enable CE error interrupts */
1152 ath10k_pci_wake(ar);
1153 ath10k_ce_error_intr_enable(ar, ctrl_addr);
1154 ath10k_pci_sleep(ar);
1155
1156 return ce_state;
1157}
1158
1159void ath10k_ce_deinit(struct ce_state *ce_state)
1160{
1161 unsigned int ce_id = ce_state->id;
1162 struct ath10k *ar = ce_state->ar;
1163 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1164
1165 ce_state->state = CE_UNUSED;
1166 ar_pci->ce_id_to_state[ce_id] = NULL;
1167
1168 if (ce_state->src_ring) {
1169 kfree(ce_state->src_ring->shadow_base_unaligned);
1170 pci_free_consistent(ar_pci->pdev,
1171 (ce_state->src_ring->nentries *
1172 sizeof(struct ce_desc) +
1173 CE_DESC_RING_ALIGN),
1174 ce_state->src_ring->base_addr_owner_space,
1175 ce_state->src_ring->base_addr_ce_space);
1176 kfree(ce_state->src_ring);
1177 }
1178
1179 if (ce_state->dest_ring) {
1180 pci_free_consistent(ar_pci->pdev,
1181 (ce_state->dest_ring->nentries *
1182 sizeof(struct ce_desc) +
1183 CE_DESC_RING_ALIGN),
1184 ce_state->dest_ring->base_addr_owner_space,
1185 ce_state->dest_ring->base_addr_ce_space);
1186 kfree(ce_state->dest_ring);
1187 }
1188 kfree(ce_state);
1189}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
new file mode 100644
index 000000000000..c17f07c026f4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -0,0 +1,516 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _CE_H_
19#define _CE_H_
20
21#include "hif.h"
22
23
24/* Maximum number of Copy Engine's supported */
25#define CE_COUNT_MAX 8
26#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
27
28/* Descriptor rings must be aligned to this boundary */
29#define CE_DESC_RING_ALIGN 8
30#define CE_SENDLIST_ITEMS_MAX 12
31#define CE_SEND_FLAG_GATHER 0x00010000
32
33/*
34 * Copy Engine support: low-level Target-side Copy Engine API.
35 * This is a hardware access layer used by code that understands
36 * how to use copy engines.
37 */
38
39struct ce_state;
40
41
42/* Copy Engine operational state */
43enum ce_op_state {
44 CE_UNUSED,
45 CE_PAUSED,
46 CE_RUNNING,
47};
48
49#define CE_DESC_FLAGS_GATHER (1 << 0)
50#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
51#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
52#define CE_DESC_FLAGS_META_DATA_LSB 3
53
54struct ce_desc {
55 __le32 addr;
56 __le16 nbytes;
57 __le16 flags; /* %CE_DESC_FLAGS_ */
58};
59
60/* Copy Engine Ring internal state */
61struct ce_ring_state {
62 /* Number of entries in this ring; must be power of 2 */
63 unsigned int nentries;
64 unsigned int nentries_mask;
65
66 /*
67 * For dest ring, this is the next index to be processed
68 * by software after it was/is received into.
69 *
70 * For src ring, this is the last descriptor that was sent
71 * and completion processed by software.
72 *
73 * Regardless of src or dest ring, this is an invariant
74 * (modulo ring size):
75 * write index >= read index >= sw_index
76 */
77 unsigned int sw_index;
78 /* cached copy */
79 unsigned int write_index;
80 /*
81 * For src ring, this is the next index not yet processed by HW.
82 * This is a cached copy of the real HW index (read index), used
83 * for avoiding reading the HW index register more often than
84 * necessary.
85 * This extends the invariant:
86 * write index >= read index >= hw_index >= sw_index
87 *
88 * For dest ring, this is currently unused.
89 */
90 /* cached copy */
91 unsigned int hw_index;
92
93 /* Start of DMA-coherent area reserved for descriptors */
94 /* Host address space */
95 void *base_addr_owner_space_unaligned;
96 /* CE address space */
97 u32 base_addr_ce_space_unaligned;
98
99 /*
100 * Actual start of descriptors.
101 * Aligned to descriptor-size boundary.
102 * Points into reserved DMA-coherent area, above.
103 */
104 /* Host address space */
105 void *base_addr_owner_space;
106
107 /* CE address space */
108 u32 base_addr_ce_space;
109 /*
110 * Start of shadow copy of descriptors, within regular memory.
111 * Aligned to descriptor-size boundary.
112 */
113 void *shadow_base_unaligned;
114 struct ce_desc *shadow_base;
115
116 void **per_transfer_context;
117};
118
119/* Copy Engine internal state */
120struct ce_state {
121 struct ath10k *ar;
122 unsigned int id;
123
124 unsigned int attr_flags;
125
126 u32 ctrl_addr;
127 enum ce_op_state state;
128
129 void (*send_cb) (struct ce_state *ce_state,
130 void *per_transfer_send_context,
131 u32 buffer,
132 unsigned int nbytes,
133 unsigned int transfer_id);
134 void (*recv_cb) (struct ce_state *ce_state,
135 void *per_transfer_recv_context,
136 u32 buffer,
137 unsigned int nbytes,
138 unsigned int transfer_id,
139 unsigned int flags);
140
141 unsigned int src_sz_max;
142 struct ce_ring_state *src_ring;
143 struct ce_ring_state *dest_ring;
144};
145
146struct ce_sendlist_item {
147 /* e.g. buffer or desc list */
148 dma_addr_t data;
149 union {
150 /* simple buffer */
151 unsigned int nbytes;
152 /* Rx descriptor list */
153 unsigned int ndesc;
154 } u;
155 /* externally-specified flags; OR-ed with internal flags */
156 u32 flags;
157};
158
159struct ce_sendlist {
160 unsigned int num_items;
161 struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
162};
163
164/* Copy Engine settable attributes */
165struct ce_attr;
166
167/*==================Send====================*/
168
169/* ath10k_ce_send flags */
170#define CE_SEND_FLAG_BYTE_SWAP 1
171
172/*
173 * Queue a source buffer to be sent to an anonymous destination buffer.
174 * ce - which copy engine to use
175 * buffer - address of buffer
176 * nbytes - number of bytes to send
177 * transfer_id - arbitrary ID; reflected to destination
178 * flags - CE_SEND_FLAG_* values
179 * Returns 0 on success; otherwise an error status.
180 *
181 * Note: If no flags are specified, use CE's default data swap mode.
182 *
183 * Implementation note: pushes 1 buffer to Source ring
184 */
185int ath10k_ce_send(struct ce_state *ce_state,
186 void *per_transfer_send_context,
187 u32 buffer,
188 unsigned int nbytes,
189 /* 14 bits */
190 unsigned int transfer_id,
191 unsigned int flags);
192
193void ath10k_ce_send_cb_register(struct ce_state *ce_state,
194 void (*send_cb) (struct ce_state *ce_state,
195 void *transfer_context,
196 u32 buffer,
197 unsigned int nbytes,
198 unsigned int transfer_id),
199 int disable_interrupts);
200
201/* Append a simple buffer (address/length) to a sendlist. */
202void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
203 u32 buffer,
204 unsigned int nbytes,
205 /* OR-ed with internal flags */
206 u32 flags);
207
208/*
209 * Queue a "sendlist" of buffers to be sent using gather to a single
210 * anonymous destination buffer
211 * ce - which copy engine to use
212 * sendlist - list of simple buffers to send using gather
213 * transfer_id - arbitrary ID; reflected to destination
214 * Returns 0 on success; otherwise an error status.
215 *
216 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
217 */
218int ath10k_ce_sendlist_send(struct ce_state *ce_state,
219 void *per_transfer_send_context,
220 struct ce_sendlist *sendlist,
221 /* 14 bits */
222 unsigned int transfer_id);
223
224/*==================Recv=======================*/
225
226/*
227 * Make a buffer available to receive. The buffer must be at least of a
228 * minimal size appropriate for this copy engine (src_sz_max attribute).
229 * ce - which copy engine to use
230 * per_transfer_recv_context - context passed back to caller's recv_cb
231 * buffer - address of buffer in CE space
232 * Returns 0 on success; otherwise an error status.
233 *
234 * Implemenation note: Pushes a buffer to Dest ring.
235 */
236int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
237 void *per_transfer_recv_context,
238 u32 buffer);
239
240void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
241 void (*recv_cb) (struct ce_state *ce_state,
242 void *transfer_context,
243 u32 buffer,
244 unsigned int nbytes,
245 unsigned int transfer_id,
246 unsigned int flags));
247
248/* recv flags */
249/* Data is byte-swapped */
250#define CE_RECV_FLAG_SWAPPED 1
251
252/*
253 * Supply data for the next completed unprocessed receive descriptor.
254 * Pops buffer from Dest ring.
255 */
256int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
257 void **per_transfer_contextp,
258 u32 *bufferp,
259 unsigned int *nbytesp,
260 unsigned int *transfer_idp,
261 unsigned int *flagsp);
262/*
263 * Supply data for the next completed unprocessed send descriptor.
264 * Pops 1 completed send buffer from Source ring.
265 */
266int ath10k_ce_completed_send_next(struct ce_state *ce_state,
267 void **per_transfer_contextp,
268 u32 *bufferp,
269 unsigned int *nbytesp,
270 unsigned int *transfer_idp);
271
272/*==================CE Engine Initialization=======================*/
273
274/* Initialize an instance of a CE */
275struct ce_state *ath10k_ce_init(struct ath10k *ar,
276 unsigned int ce_id,
277 const struct ce_attr *attr);
278
279/*==================CE Engine Shutdown=======================*/
280/*
281 * Support clean shutdown by allowing the caller to revoke
282 * receive buffers. Target DMA must be stopped before using
283 * this API.
284 */
285int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
286 void **per_transfer_contextp,
287 u32 *bufferp);
288
289/*
290 * Support clean shutdown by allowing the caller to cancel
291 * pending sends. Target DMA must be stopped before using
292 * this API.
293 */
294int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
295 void **per_transfer_contextp,
296 u32 *bufferp,
297 unsigned int *nbytesp,
298 unsigned int *transfer_idp);
299
300void ath10k_ce_deinit(struct ce_state *ce_state);
301
302/*==================CE Interrupt Handlers====================*/
303void ath10k_ce_per_engine_service_any(struct ath10k *ar);
304void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
305void ath10k_ce_disable_interrupts(struct ath10k *ar);
306
307/* ce_attr.flags values */
308/* Use NonSnooping PCIe accesses? */
309#define CE_ATTR_NO_SNOOP 1
310
311/* Byte swap data words */
312#define CE_ATTR_BYTE_SWAP_DATA 2
313
314/* Swizzle descriptors? */
315#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
316
317/* no interrupt on copy completion */
318#define CE_ATTR_DIS_INTR 8
319
320/* Attributes of an instance of a Copy Engine */
321struct ce_attr {
322 /* CE_ATTR_* values */
323 unsigned int flags;
324
325 /* currently not in use */
326 unsigned int priority;
327
328 /* #entries in source ring - Must be a power of 2 */
329 unsigned int src_nentries;
330
331 /*
332 * Max source send size for this CE.
333 * This is also the minimum size of a destination buffer.
334 */
335 unsigned int src_sz_max;
336
337 /* #entries in destination ring - Must be a power of 2 */
338 unsigned int dest_nentries;
339
340 /* Future use */
341 void *reserved;
342};
343
344/*
345 * When using sendlist_send to transfer multiple buffer fragments, the
346 * transfer context of each fragment, except last one, will be filled
347 * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
348 * each fragment done with send and the transfer context would be
349 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
350 * status of a send completion.
351 */
352#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
353
354#define SR_BA_ADDRESS 0x0000
355#define SR_SIZE_ADDRESS 0x0004
356#define DR_BA_ADDRESS 0x0008
357#define DR_SIZE_ADDRESS 0x000c
358#define CE_CMD_ADDRESS 0x0018
359
360#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
361#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
362#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
363#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
364 (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
365 CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
366
367#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
368#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
369#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
370#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
371 (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
372 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
373#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
374 (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
375 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
376
377#define CE_CTRL1_DMAX_LENGTH_MSB 15
378#define CE_CTRL1_DMAX_LENGTH_LSB 0
379#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
380#define CE_CTRL1_DMAX_LENGTH_GET(x) \
381 (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
382#define CE_CTRL1_DMAX_LENGTH_SET(x) \
383 (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
384
385#define CE_CTRL1_ADDRESS 0x0010
386#define CE_CTRL1_HW_MASK 0x0007ffff
387#define CE_CTRL1_SW_MASK 0x0007ffff
388#define CE_CTRL1_HW_WRITE_MASK 0x00000000
389#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
390#define CE_CTRL1_RSTMASK 0xffffffff
391#define CE_CTRL1_RESET 0x00000080
392
393#define CE_CMD_HALT_STATUS_MSB 3
394#define CE_CMD_HALT_STATUS_LSB 3
395#define CE_CMD_HALT_STATUS_MASK 0x00000008
396#define CE_CMD_HALT_STATUS_GET(x) \
397 (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
398#define CE_CMD_HALT_STATUS_SET(x) \
399 (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
400#define CE_CMD_HALT_STATUS_RESET 0
401#define CE_CMD_HALT_MSB 0
402#define CE_CMD_HALT_MASK 0x00000001
403
404#define HOST_IE_COPY_COMPLETE_MSB 0
405#define HOST_IE_COPY_COMPLETE_LSB 0
406#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
407#define HOST_IE_COPY_COMPLETE_GET(x) \
408 (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
409#define HOST_IE_COPY_COMPLETE_SET(x) \
410 (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
411#define HOST_IE_COPY_COMPLETE_RESET 0
412#define HOST_IE_ADDRESS 0x002c
413
414#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
415#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
416#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
417#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
418#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
419#define HOST_IS_ADDRESS 0x0030
420
421#define MISC_IE_ADDRESS 0x0034
422
423#define MISC_IS_AXI_ERR_MASK 0x00000400
424
425#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
426#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
427#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
428#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
429#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
430
431#define MISC_IS_ADDRESS 0x0038
432
433#define SR_WR_INDEX_ADDRESS 0x003c
434
435#define DST_WR_INDEX_ADDRESS 0x0040
436
437#define CURRENT_SRRI_ADDRESS 0x0044
438
439#define CURRENT_DRRI_ADDRESS 0x0048
440
441#define SRC_WATERMARK_LOW_MSB 31
442#define SRC_WATERMARK_LOW_LSB 16
443#define SRC_WATERMARK_LOW_MASK 0xffff0000
444#define SRC_WATERMARK_LOW_GET(x) \
445 (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
446#define SRC_WATERMARK_LOW_SET(x) \
447 (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
448#define SRC_WATERMARK_LOW_RESET 0
449#define SRC_WATERMARK_HIGH_MSB 15
450#define SRC_WATERMARK_HIGH_LSB 0
451#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
452#define SRC_WATERMARK_HIGH_GET(x) \
453 (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
454#define SRC_WATERMARK_HIGH_SET(x) \
455 (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
456#define SRC_WATERMARK_HIGH_RESET 0
457#define SRC_WATERMARK_ADDRESS 0x004c
458
459#define DST_WATERMARK_LOW_LSB 16
460#define DST_WATERMARK_LOW_MASK 0xffff0000
461#define DST_WATERMARK_LOW_SET(x) \
462 (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
463#define DST_WATERMARK_LOW_RESET 0
464#define DST_WATERMARK_HIGH_MSB 15
465#define DST_WATERMARK_HIGH_LSB 0
466#define DST_WATERMARK_HIGH_MASK 0x0000ffff
467#define DST_WATERMARK_HIGH_GET(x) \
468 (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
469#define DST_WATERMARK_HIGH_SET(x) \
470 (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
471#define DST_WATERMARK_HIGH_RESET 0
472#define DST_WATERMARK_ADDRESS 0x0050
473
474
475static inline u32 ath10k_ce_base_address(unsigned int ce_id)
476{
477 return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
478}
479
480#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
481 HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
482 HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
483 HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
484
485#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
486 MISC_IS_DST_ADDR_ERR_MASK | \
487 MISC_IS_SRC_LEN_ERR_MASK | \
488 MISC_IS_DST_MAX_LEN_VIO_MASK | \
489 MISC_IS_DST_RING_OVERFLOW_MASK | \
490 MISC_IS_SRC_RING_OVERFLOW_MASK)
491
492#define CE_SRC_RING_TO_DESC(baddr, idx) \
493 (&(((struct ce_desc *)baddr)[idx]))
494
495#define CE_DEST_RING_TO_DESC(baddr, idx) \
496 (&(((struct ce_desc *)baddr)[idx]))
497
498/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
499#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
500 (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
501
502#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
503
504#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
505#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
506#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
507 (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
508 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
509#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
510
511#define CE_INTERRUPT_SUMMARY(ar) \
512 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
513 ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
514 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
515
516#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
new file mode 100644
index 000000000000..2b3426b1ff3f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -0,0 +1,665 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/module.h>
19#include <linux/firmware.h>
20
21#include "core.h"
22#include "mac.h"
23#include "htc.h"
24#include "hif.h"
25#include "wmi.h"
26#include "bmi.h"
27#include "debug.h"
28#include "htt.h"
29
30unsigned int ath10k_debug_mask;
31static bool uart_print;
32static unsigned int ath10k_p2p;
33module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
34module_param(uart_print, bool, 0644);
35module_param_named(p2p, ath10k_p2p, uint, 0644);
36MODULE_PARM_DESC(debug_mask, "Debugging mask");
37MODULE_PARM_DESC(uart_print, "Uart target debugging");
38MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
39
40static const struct ath10k_hw_params ath10k_hw_params_list[] = {
41 {
42 .id = QCA988X_HW_1_0_VERSION,
43 .name = "qca988x hw1.0",
44 .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
45 .fw = {
46 .dir = QCA988X_HW_1_0_FW_DIR,
47 .fw = QCA988X_HW_1_0_FW_FILE,
48 .otp = QCA988X_HW_1_0_OTP_FILE,
49 .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
50 },
51 },
52 {
53 .id = QCA988X_HW_2_0_VERSION,
54 .name = "qca988x hw2.0",
55 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
56 .fw = {
57 .dir = QCA988X_HW_2_0_FW_DIR,
58 .fw = QCA988X_HW_2_0_FW_FILE,
59 .otp = QCA988X_HW_2_0_OTP_FILE,
60 .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
61 },
62 },
63};
64
65static void ath10k_send_suspend_complete(struct ath10k *ar)
66{
67 ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
68
69 ar->is_target_paused = true;
70 wake_up(&ar->event_queue);
71}
72
73static int ath10k_check_fw_version(struct ath10k *ar)
74{
75 char version[32];
76
77 if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
78 ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
79 ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
80 ar->fw_version_build >= SUPPORTED_FW_BUILD)
81 return 0;
82
83 snprintf(version, sizeof(version), "%u.%u.%u.%u",
84 SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
85 SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
86
87 ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
88 ar->hw->wiphy->fw_version);
89 ath10k_warn("Please upgrade to version %s (or newer)\n", version);
90
91 return 0;
92}
93
94static int ath10k_init_connect_htc(struct ath10k *ar)
95{
96 int status;
97
98 status = ath10k_wmi_connect_htc_service(ar);
99 if (status)
100 goto conn_fail;
101
102 /* Start HTC */
103 status = ath10k_htc_start(ar->htc);
104 if (status)
105 goto conn_fail;
106
107 /* Wait for WMI event to be ready */
108 status = ath10k_wmi_wait_for_service_ready(ar);
109 if (status <= 0) {
110 ath10k_warn("wmi service ready event not received");
111 status = -ETIMEDOUT;
112 goto timeout;
113 }
114
115 ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
116 return 0;
117
118timeout:
119 ath10k_htc_stop(ar->htc);
120conn_fail:
121 return status;
122}
123
124static int ath10k_init_configure_target(struct ath10k *ar)
125{
126 u32 param_host;
127 int ret;
128
129 /* tell target which HTC version it is used*/
130 ret = ath10k_bmi_write32(ar, hi_app_host_interest,
131 HTC_PROTOCOL_VERSION);
132 if (ret) {
133 ath10k_err("settings HTC version failed\n");
134 return ret;
135 }
136
137 /* set the firmware mode to STA/IBSS/AP */
138 ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
139 if (ret) {
140 ath10k_err("setting firmware mode (1/2) failed\n");
141 return ret;
142 }
143
144 /* TODO following parameters need to be re-visited. */
145 /* num_device */
146 param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
147 /* Firmware mode */
148 /* FIXME: Why FW_MODE_AP ??.*/
149 param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
150 /* mac_addr_method */
151 param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
152 /* firmware_bridge */
153 param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
154 /* fwsubmode */
155 param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
156
157 ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
158 if (ret) {
159 ath10k_err("setting firmware mode (2/2) failed\n");
160 return ret;
161 }
162
163 /* We do all byte-swapping on the host */
164 ret = ath10k_bmi_write32(ar, hi_be, 0);
165 if (ret) {
166 ath10k_err("setting host CPU BE mode failed\n");
167 return ret;
168 }
169
170 /* FW descriptor/Data swap flags */
171 ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
172
173 if (ret) {
174 ath10k_err("setting FW data/desc swap flags failed\n");
175 return ret;
176 }
177
178 return 0;
179}
180
181static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
182 const char *dir,
183 const char *file)
184{
185 char filename[100];
186 const struct firmware *fw;
187 int ret;
188
189 if (file == NULL)
190 return ERR_PTR(-ENOENT);
191
192 if (dir == NULL)
193 dir = ".";
194
195 snprintf(filename, sizeof(filename), "%s/%s", dir, file);
196 ret = request_firmware(&fw, filename, ar->dev);
197 if (ret)
198 return ERR_PTR(ret);
199
200 return fw;
201}
202
203static int ath10k_push_board_ext_data(struct ath10k *ar,
204 const struct firmware *fw)
205{
206 u32 board_data_size = QCA988X_BOARD_DATA_SZ;
207 u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
208 u32 board_ext_data_addr;
209 int ret;
210
211 ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
212 if (ret) {
213 ath10k_err("could not read board ext data addr (%d)\n", ret);
214 return ret;
215 }
216
217 ath10k_dbg(ATH10K_DBG_CORE,
218 "ath10k: Board extended Data download addr: 0x%x\n",
219 board_ext_data_addr);
220
221 if (board_ext_data_addr == 0)
222 return 0;
223
224 if (fw->size != (board_data_size + board_ext_data_size)) {
225 ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
226 fw->size, board_data_size, board_ext_data_size);
227 return -EINVAL;
228 }
229
230 ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
231 fw->data + board_data_size,
232 board_ext_data_size);
233 if (ret) {
234 ath10k_err("could not write board ext data (%d)\n", ret);
235 return ret;
236 }
237
238 ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
239 (board_ext_data_size << 16) | 1);
240 if (ret) {
241 ath10k_err("could not write board ext data bit (%d)\n", ret);
242 return ret;
243 }
244
245 return 0;
246}
247
248static int ath10k_download_board_data(struct ath10k *ar)
249{
250 u32 board_data_size = QCA988X_BOARD_DATA_SZ;
251 u32 address;
252 const struct firmware *fw;
253 int ret;
254
255 fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
256 ar->hw_params.fw.board);
257 if (IS_ERR(fw)) {
258 ath10k_err("could not fetch board data fw file (%ld)\n",
259 PTR_ERR(fw));
260 return PTR_ERR(fw);
261 }
262
263 ret = ath10k_push_board_ext_data(ar, fw);
264 if (ret) {
265 ath10k_err("could not push board ext data (%d)\n", ret);
266 goto exit;
267 }
268
269 ret = ath10k_bmi_read32(ar, hi_board_data, &address);
270 if (ret) {
271 ath10k_err("could not read board data addr (%d)\n", ret);
272 goto exit;
273 }
274
275 ret = ath10k_bmi_write_memory(ar, address, fw->data,
276 min_t(u32, board_data_size, fw->size));
277 if (ret) {
278 ath10k_err("could not write board data (%d)\n", ret);
279 goto exit;
280 }
281
282 ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
283 if (ret) {
284 ath10k_err("could not write board data bit (%d)\n", ret);
285 goto exit;
286 }
287
288exit:
289 release_firmware(fw);
290 return ret;
291}
292
293static int ath10k_download_and_run_otp(struct ath10k *ar)
294{
295 const struct firmware *fw;
296 u32 address;
297 u32 exec_param;
298 int ret;
299
300 /* OTP is optional */
301
302 if (ar->hw_params.fw.otp == NULL) {
303 ath10k_info("otp file not defined\n");
304 return 0;
305 }
306
307 address = ar->hw_params.patch_load_addr;
308
309 fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
310 ar->hw_params.fw.otp);
311 if (IS_ERR(fw)) {
312 ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
313 return 0;
314 }
315
316 ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
317 if (ret) {
318 ath10k_err("could not write otp (%d)\n", ret);
319 goto exit;
320 }
321
322 exec_param = 0;
323 ret = ath10k_bmi_execute(ar, address, &exec_param);
324 if (ret) {
325 ath10k_err("could not execute otp (%d)\n", ret);
326 goto exit;
327 }
328
329exit:
330 release_firmware(fw);
331 return ret;
332}
333
334static int ath10k_download_fw(struct ath10k *ar)
335{
336 const struct firmware *fw;
337 u32 address;
338 int ret;
339
340 if (ar->hw_params.fw.fw == NULL)
341 return -EINVAL;
342
343 address = ar->hw_params.patch_load_addr;
344
345 fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
346 ar->hw_params.fw.fw);
347 if (IS_ERR(fw)) {
348 ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
349 return PTR_ERR(fw);
350 }
351
352 ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
353 if (ret) {
354 ath10k_err("could not write fw (%d)\n", ret);
355 goto exit;
356 }
357
358exit:
359 release_firmware(fw);
360 return ret;
361}
362
363static int ath10k_init_download_firmware(struct ath10k *ar)
364{
365 int ret;
366
367 ret = ath10k_download_board_data(ar);
368 if (ret)
369 return ret;
370
371 ret = ath10k_download_and_run_otp(ar);
372 if (ret)
373 return ret;
374
375 ret = ath10k_download_fw(ar);
376 if (ret)
377 return ret;
378
379 return ret;
380}
381
382static int ath10k_init_uart(struct ath10k *ar)
383{
384 int ret;
385
386 /*
387 * Explicitly setting UART prints to zero as target turns it on
388 * based on scratch registers.
389 */
390 ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
391 if (ret) {
392 ath10k_warn("could not disable UART prints (%d)\n", ret);
393 return ret;
394 }
395
396 if (!uart_print) {
397 ath10k_info("UART prints disabled\n");
398 return 0;
399 }
400
401 ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
402 if (ret) {
403 ath10k_warn("could not enable UART prints (%d)\n", ret);
404 return ret;
405 }
406
407 ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
408 if (ret) {
409 ath10k_warn("could not enable UART prints (%d)\n", ret);
410 return ret;
411 }
412
413 ath10k_info("UART prints enabled\n");
414 return 0;
415}
416
417static int ath10k_init_hw_params(struct ath10k *ar)
418{
419 const struct ath10k_hw_params *uninitialized_var(hw_params);
420 int i;
421
422 for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
423 hw_params = &ath10k_hw_params_list[i];
424
425 if (hw_params->id == ar->target_version)
426 break;
427 }
428
429 if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
430 ath10k_err("Unsupported hardware version: 0x%x\n",
431 ar->target_version);
432 return -EINVAL;
433 }
434
435 ar->hw_params = *hw_params;
436
437 ath10k_info("Hardware name %s version 0x%x\n",
438 ar->hw_params.name, ar->target_version);
439
440 return 0;
441}
442
443struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
444 enum ath10k_bus bus,
445 const struct ath10k_hif_ops *hif_ops)
446{
447 struct ath10k *ar;
448
449 ar = ath10k_mac_create();
450 if (!ar)
451 return NULL;
452
453 ar->ath_common.priv = ar;
454 ar->ath_common.hw = ar->hw;
455
456 ar->p2p = !!ath10k_p2p;
457 ar->dev = dev;
458
459 ar->hif.priv = hif_priv;
460 ar->hif.ops = hif_ops;
461 ar->hif.bus = bus;
462
463 ar->free_vdev_map = 0xFF; /* 8 vdevs */
464
465 init_completion(&ar->scan.started);
466 init_completion(&ar->scan.completed);
467 init_completion(&ar->scan.on_channel);
468
469 init_completion(&ar->install_key_done);
470 init_completion(&ar->vdev_setup_done);
471
472 setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
473
474 ar->workqueue = create_singlethread_workqueue("ath10k_wq");
475 if (!ar->workqueue)
476 goto err_wq;
477
478 mutex_init(&ar->conf_mutex);
479 spin_lock_init(&ar->data_lock);
480
481 INIT_LIST_HEAD(&ar->peers);
482 init_waitqueue_head(&ar->peer_mapping_wq);
483
484 init_completion(&ar->offchan_tx_completed);
485 INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
486 skb_queue_head_init(&ar->offchan_tx_queue);
487
488 init_waitqueue_head(&ar->event_queue);
489
490 return ar;
491
492err_wq:
493 ath10k_mac_destroy(ar);
494 return NULL;
495}
496EXPORT_SYMBOL(ath10k_core_create);
497
498void ath10k_core_destroy(struct ath10k *ar)
499{
500 flush_workqueue(ar->workqueue);
501 destroy_workqueue(ar->workqueue);
502
503 ath10k_mac_destroy(ar);
504}
505EXPORT_SYMBOL(ath10k_core_destroy);
506
507
508int ath10k_core_register(struct ath10k *ar)
509{
510 struct ath10k_htc_ops htc_ops;
511 struct bmi_target_info target_info;
512 int status;
513
514 memset(&target_info, 0, sizeof(target_info));
515 status = ath10k_bmi_get_target_info(ar, &target_info);
516 if (status)
517 goto err;
518
519 ar->target_version = target_info.version;
520 ar->hw->wiphy->hw_version = target_info.version;
521
522 status = ath10k_init_hw_params(ar);
523 if (status)
524 goto err;
525
526 if (ath10k_init_configure_target(ar)) {
527 status = -EINVAL;
528 goto err;
529 }
530
531 status = ath10k_init_download_firmware(ar);
532 if (status)
533 goto err;
534
535 status = ath10k_init_uart(ar);
536 if (status)
537 goto err;
538
539 htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
540
541 ar->htc = ath10k_htc_create(ar, &htc_ops);
542 if (IS_ERR(ar->htc)) {
543 status = PTR_ERR(ar->htc);
544 ath10k_err("could not create HTC (%d)\n", status);
545 goto err;
546 }
547
548 status = ath10k_bmi_done(ar);
549 if (status)
550 goto err_htc_destroy;
551
552 status = ath10k_wmi_attach(ar);
553 if (status) {
554 ath10k_err("WMI attach failed: %d\n", status);
555 goto err_htc_destroy;
556 }
557
558 status = ath10k_htc_wait_target(ar->htc);
559 if (status)
560 goto err_wmi_detach;
561
562 ar->htt = ath10k_htt_attach(ar);
563 if (!ar->htt) {
564 status = -ENOMEM;
565 goto err_wmi_detach;
566 }
567
568 status = ath10k_init_connect_htc(ar);
569 if (status)
570 goto err_htt_detach;
571
572 ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
573
574 status = ath10k_check_fw_version(ar);
575 if (status)
576 goto err_disconnect_htc;
577
578 status = ath10k_wmi_cmd_init(ar);
579 if (status) {
580 ath10k_err("could not send WMI init command (%d)\n", status);
581 goto err_disconnect_htc;
582 }
583
584 status = ath10k_wmi_wait_for_unified_ready(ar);
585 if (status <= 0) {
586 ath10k_err("wmi unified ready event not received\n");
587 status = -ETIMEDOUT;
588 goto err_disconnect_htc;
589 }
590
591 status = ath10k_htt_attach_target(ar->htt);
592 if (status)
593 goto err_disconnect_htc;
594
595 status = ath10k_mac_register(ar);
596 if (status)
597 goto err_disconnect_htc;
598
599 status = ath10k_debug_create(ar);
600 if (status) {
601 ath10k_err("unable to initialize debugfs\n");
602 goto err_unregister_mac;
603 }
604
605 return 0;
606
607err_unregister_mac:
608 ath10k_mac_unregister(ar);
609err_disconnect_htc:
610 ath10k_htc_stop(ar->htc);
611err_htt_detach:
612 ath10k_htt_detach(ar->htt);
613err_wmi_detach:
614 ath10k_wmi_detach(ar);
615err_htc_destroy:
616 ath10k_htc_destroy(ar->htc);
617err:
618 return status;
619}
620EXPORT_SYMBOL(ath10k_core_register);
621
622void ath10k_core_unregister(struct ath10k *ar)
623{
624 /* We must unregister from mac80211 before we stop HTC and HIF.
625 * Otherwise we will fail to submit commands to FW and mac80211 will be
626 * unhappy about callback failures. */
627 ath10k_mac_unregister(ar);
628 ath10k_htc_stop(ar->htc);
629 ath10k_htt_detach(ar->htt);
630 ath10k_wmi_detach(ar);
631 ath10k_htc_destroy(ar->htc);
632}
633EXPORT_SYMBOL(ath10k_core_unregister);
634
635int ath10k_core_target_suspend(struct ath10k *ar)
636{
637 int ret;
638
639 ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
640
641 ret = ath10k_wmi_pdev_suspend_target(ar);
642 if (ret)
643 ath10k_warn("could not suspend target (%d)\n", ret);
644
645 return ret;
646}
647EXPORT_SYMBOL(ath10k_core_target_suspend);
648
649int ath10k_core_target_resume(struct ath10k *ar)
650{
651 int ret;
652
653 ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
654
655 ret = ath10k_wmi_pdev_resume_target(ar);
656 if (ret)
657 ath10k_warn("could not resume target (%d)\n", ret);
658
659 return ret;
660}
661EXPORT_SYMBOL(ath10k_core_target_resume);
662
663MODULE_AUTHOR("Qualcomm Atheros");
664MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
665MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
new file mode 100644
index 000000000000..539336d1be4b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -0,0 +1,369 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _CORE_H_
19#define _CORE_H_
20
21#include <linux/completion.h>
22#include <linux/if_ether.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25
26#include "htc.h"
27#include "hw.h"
28#include "targaddrs.h"
29#include "wmi.h"
30#include "../ath.h"
31#include "../regd.h"
32
33#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
34#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
35#define WO(_f) ((_f##_OFFSET) >> 2)
36
37#define ATH10K_SCAN_ID 0
38#define WMI_READY_TIMEOUT (5 * HZ)
39#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
40
41/* Antenna noise floor */
42#define ATH10K_DEFAULT_NOISE_FLOOR -95
43
44struct ath10k;
45
46enum ath10k_bus {
47 ATH10K_BUS_PCI,
48};
49
50struct ath10k_skb_cb {
51 dma_addr_t paddr;
52 bool is_mapped;
53 bool is_aborted;
54
55 struct {
56 u8 vdev_id;
57 u16 msdu_id;
58 u8 tid;
59 bool is_offchan;
60 bool is_conf;
61 bool discard;
62 bool no_ack;
63 u8 refcount;
64 struct sk_buff *txfrag;
65 struct sk_buff *msdu;
66 } __packed htt;
67
68 /* 4 bytes left on 64bit arch */
69} __packed;
70
71static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
72{
73 BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
74 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
75 return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
76}
77
78static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
79{
80 if (ATH10K_SKB_CB(skb)->is_mapped)
81 return -EINVAL;
82
83 ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
84 DMA_TO_DEVICE);
85
86 if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
87 return -EIO;
88
89 ATH10K_SKB_CB(skb)->is_mapped = true;
90 return 0;
91}
92
93static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
94{
95 if (!ATH10K_SKB_CB(skb)->is_mapped)
96 return -EINVAL;
97
98 dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
99 DMA_TO_DEVICE);
100 ATH10K_SKB_CB(skb)->is_mapped = false;
101 return 0;
102}
103
104static inline u32 host_interest_item_address(u32 item_offset)
105{
106 return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
107}
108
109struct ath10k_bmi {
110 bool done_sent;
111};
112
113struct ath10k_wmi {
114 enum ath10k_htc_ep_id eid;
115 struct completion service_ready;
116 struct completion unified_ready;
117 atomic_t pending_tx_count;
118 wait_queue_head_t wq;
119
120 struct sk_buff_head wmi_event_list;
121 struct work_struct wmi_event_work;
122};
123
124struct ath10k_peer_stat {
125 u8 peer_macaddr[ETH_ALEN];
126 u32 peer_rssi;
127 u32 peer_tx_rate;
128};
129
130struct ath10k_target_stats {
131 /* PDEV stats */
132 s32 ch_noise_floor;
133 u32 tx_frame_count;
134 u32 rx_frame_count;
135 u32 rx_clear_count;
136 u32 cycle_count;
137 u32 phy_err_count;
138 u32 chan_tx_power;
139
140 /* PDEV TX stats */
141 s32 comp_queued;
142 s32 comp_delivered;
143 s32 msdu_enqued;
144 s32 mpdu_enqued;
145 s32 wmm_drop;
146 s32 local_enqued;
147 s32 local_freed;
148 s32 hw_queued;
149 s32 hw_reaped;
150 s32 underrun;
151 s32 tx_abort;
152 s32 mpdus_requed;
153 u32 tx_ko;
154 u32 data_rc;
155 u32 self_triggers;
156 u32 sw_retry_failure;
157 u32 illgl_rate_phy_err;
158 u32 pdev_cont_xretry;
159 u32 pdev_tx_timeout;
160 u32 pdev_resets;
161 u32 phy_underrun;
162 u32 txop_ovf;
163
164 /* PDEV RX stats */
165 s32 mid_ppdu_route_change;
166 s32 status_rcvd;
167 s32 r0_frags;
168 s32 r1_frags;
169 s32 r2_frags;
170 s32 r3_frags;
171 s32 htt_msdus;
172 s32 htt_mpdus;
173 s32 loc_msdus;
174 s32 loc_mpdus;
175 s32 oversize_amsdu;
176 s32 phy_errs;
177 s32 phy_err_drop;
178 s32 mpdu_errs;
179
180 /* VDEV STATS */
181
182 /* PEER STATS */
183 u8 peers;
184 struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
185
186 /* TODO: Beacon filter stats */
187
188};
189
190#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
191
192struct ath10k_peer {
193 struct list_head list;
194 int vdev_id;
195 u8 addr[ETH_ALEN];
196 DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
197 struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
198};
199
200#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
201
202struct ath10k_vif {
203 u32 vdev_id;
204 enum wmi_vdev_type vdev_type;
205 enum wmi_vdev_subtype vdev_subtype;
206 u32 beacon_interval;
207 u32 dtim_period;
208
209 struct ath10k *ar;
210 struct ieee80211_vif *vif;
211
212 struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
213 u8 def_wep_key_index;
214
215 u16 tx_seq_no;
216
217 union {
218 struct {
219 u8 bssid[ETH_ALEN];
220 u32 uapsd;
221 } sta;
222 struct {
223 /* 127 stations; wmi limit */
224 u8 tim_bitmap[16];
225 u8 tim_len;
226 u32 ssid_len;
227 u8 ssid[IEEE80211_MAX_SSID_LEN];
228 bool hidden_ssid;
229 /* P2P_IE with NoA attribute for P2P_GO case */
230 u32 noa_len;
231 u8 *noa_data;
232 } ap;
233 struct {
234 u8 bssid[ETH_ALEN];
235 } ibss;
236 } u;
237};
238
239struct ath10k_vif_iter {
240 u32 vdev_id;
241 struct ath10k_vif *arvif;
242};
243
244struct ath10k_debug {
245 struct dentry *debugfs_phy;
246
247 struct ath10k_target_stats target_stats;
248 u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
249
250 struct completion event_stats_compl;
251};
252
253struct ath10k {
254 struct ath_common ath_common;
255 struct ieee80211_hw *hw;
256 struct device *dev;
257 u8 mac_addr[ETH_ALEN];
258
259 u32 target_version;
260 u8 fw_version_major;
261 u32 fw_version_minor;
262 u16 fw_version_release;
263 u16 fw_version_build;
264 u32 phy_capability;
265 u32 hw_min_tx_power;
266 u32 hw_max_tx_power;
267 u32 ht_cap_info;
268 u32 vht_cap_info;
269
270 struct targetdef *targetdef;
271 struct hostdef *hostdef;
272
273 bool p2p;
274
275 struct {
276 void *priv;
277 enum ath10k_bus bus;
278 const struct ath10k_hif_ops *ops;
279 } hif;
280
281 struct ath10k_wmi wmi;
282
283 wait_queue_head_t event_queue;
284 bool is_target_paused;
285
286 struct ath10k_bmi bmi;
287
288 struct ath10k_htc *htc;
289 struct ath10k_htt *htt;
290
291 struct ath10k_hw_params {
292 u32 id;
293 const char *name;
294 u32 patch_load_addr;
295
296 struct ath10k_hw_params_fw {
297 const char *dir;
298 const char *fw;
299 const char *otp;
300 const char *board;
301 } fw;
302 } hw_params;
303
304 struct {
305 struct completion started;
306 struct completion completed;
307 struct completion on_channel;
308 struct timer_list timeout;
309 bool is_roc;
310 bool in_progress;
311 bool aborting;
312 int vdev_id;
313 int roc_freq;
314 } scan;
315
316 struct {
317 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
318 } mac;
319
320 /* should never be NULL; needed for regular htt rx */
321 struct ieee80211_channel *rx_channel;
322
323 /* valid during scan; needed for mgmt rx during scan */
324 struct ieee80211_channel *scan_channel;
325
326 int free_vdev_map;
327 int monitor_vdev_id;
328 bool monitor_enabled;
329 bool monitor_present;
330 unsigned int filter_flags;
331
332 struct wmi_pdev_set_wmm_params_arg wmm_params;
333 struct completion install_key_done;
334
335 struct completion vdev_setup_done;
336
337 struct workqueue_struct *workqueue;
338
339 /* prevents concurrent FW reconfiguration */
340 struct mutex conf_mutex;
341
342 /* protects shared structure data */
343 spinlock_t data_lock;
344
345 struct list_head peers;
346 wait_queue_head_t peer_mapping_wq;
347
348 struct work_struct offchan_tx_work;
349 struct sk_buff_head offchan_tx_queue;
350 struct completion offchan_tx_completed;
351 struct sk_buff *offchan_tx_skb;
352
353#ifdef CONFIG_ATH10K_DEBUGFS
354 struct ath10k_debug debug;
355#endif
356};
357
358struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
359 enum ath10k_bus bus,
360 const struct ath10k_hif_ops *hif_ops);
361void ath10k_core_destroy(struct ath10k *ar);
362
363int ath10k_core_register(struct ath10k *ar);
364void ath10k_core_unregister(struct ath10k *ar);
365
366int ath10k_core_target_suspend(struct ath10k *ar);
367int ath10k_core_target_resume(struct ath10k *ar);
368
369#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
new file mode 100644
index 000000000000..499034b873d1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -0,0 +1,503 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/module.h>
19#include <linux/debugfs.h>
20
21#include "core.h"
22#include "debug.h"
23
24static int ath10k_printk(const char *level, const char *fmt, ...)
25{
26 struct va_format vaf;
27 va_list args;
28 int rtn;
29
30 va_start(args, fmt);
31
32 vaf.fmt = fmt;
33 vaf.va = &args;
34
35 rtn = printk("%sath10k: %pV", level, &vaf);
36
37 va_end(args);
38
39 return rtn;
40}
41
42int ath10k_info(const char *fmt, ...)
43{
44 struct va_format vaf = {
45 .fmt = fmt,
46 };
47 va_list args;
48 int ret;
49
50 va_start(args, fmt);
51 vaf.va = &args;
52 ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
53 trace_ath10k_log_info(&vaf);
54 va_end(args);
55
56 return ret;
57}
58EXPORT_SYMBOL(ath10k_info);
59
60int ath10k_err(const char *fmt, ...)
61{
62 struct va_format vaf = {
63 .fmt = fmt,
64 };
65 va_list args;
66 int ret;
67
68 va_start(args, fmt);
69 vaf.va = &args;
70 ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
71 trace_ath10k_log_err(&vaf);
72 va_end(args);
73
74 return ret;
75}
76EXPORT_SYMBOL(ath10k_err);
77
78int ath10k_warn(const char *fmt, ...)
79{
80 struct va_format vaf = {
81 .fmt = fmt,
82 };
83 va_list args;
84 int ret = 0;
85
86 va_start(args, fmt);
87 vaf.va = &args;
88
89 if (net_ratelimit())
90 ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
91
92 trace_ath10k_log_warn(&vaf);
93
94 va_end(args);
95
96 return ret;
97}
98EXPORT_SYMBOL(ath10k_warn);
99
100#ifdef CONFIG_ATH10K_DEBUGFS
101
102void ath10k_debug_read_service_map(struct ath10k *ar,
103 void *service_map,
104 size_t map_size)
105{
106 memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
107}
108
109static ssize_t ath10k_read_wmi_services(struct file *file,
110 char __user *user_buf,
111 size_t count, loff_t *ppos)
112{
113 struct ath10k *ar = file->private_data;
114 char *buf;
115 unsigned int len = 0, buf_len = 1500;
116 const char *status;
117 ssize_t ret_cnt;
118 int i;
119
120 buf = kzalloc(buf_len, GFP_KERNEL);
121 if (!buf)
122 return -ENOMEM;
123
124 mutex_lock(&ar->conf_mutex);
125
126 if (len > buf_len)
127 len = buf_len;
128
129 for (i = 0; i < WMI_SERVICE_LAST; i++) {
130 if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
131 status = "enabled";
132 else
133 status = "disabled";
134
135 len += scnprintf(buf + len, buf_len - len,
136 "0x%02x - %20s - %s\n",
137 i, wmi_service_name(i), status);
138 }
139
140 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
141
142 mutex_unlock(&ar->conf_mutex);
143
144 kfree(buf);
145 return ret_cnt;
146}
147
148static const struct file_operations fops_wmi_services = {
149 .read = ath10k_read_wmi_services,
150 .open = simple_open,
151 .owner = THIS_MODULE,
152 .llseek = default_llseek,
153};
154
155void ath10k_debug_read_target_stats(struct ath10k *ar,
156 struct wmi_stats_event *ev)
157{
158 u8 *tmp = ev->data;
159 struct ath10k_target_stats *stats;
160 int num_pdev_stats, num_vdev_stats, num_peer_stats;
161 struct wmi_pdev_stats *ps;
162 int i;
163
164 mutex_lock(&ar->conf_mutex);
165
166 stats = &ar->debug.target_stats;
167
168 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
169 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
170 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
171
172 if (num_pdev_stats) {
173 ps = (struct wmi_pdev_stats *)tmp;
174
175 stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
176 stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
177 stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
178 stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
179 stats->cycle_count = __le32_to_cpu(ps->cycle_count);
180 stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
181 stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
182
183 stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
184 stats->comp_delivered =
185 __le32_to_cpu(ps->wal.tx.comp_delivered);
186 stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
187 stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
188 stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
189 stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
190 stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
191 stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
192 stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
193 stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
194 stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
195 stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
196 stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
197 stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
198 stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
199 stats->sw_retry_failure =
200 __le32_to_cpu(ps->wal.tx.sw_retry_failure);
201 stats->illgl_rate_phy_err =
202 __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
203 stats->pdev_cont_xretry =
204 __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
205 stats->pdev_tx_timeout =
206 __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
207 stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
208 stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
209 stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
210
211 stats->mid_ppdu_route_change =
212 __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
213 stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
214 stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
215 stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
216 stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
217 stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
218 stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
219 stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
220 stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
221 stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
222 stats->oversize_amsdu =
223 __le32_to_cpu(ps->wal.rx.oversize_amsdu);
224 stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
225 stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
226 stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
227
228 tmp += sizeof(struct wmi_pdev_stats);
229 }
230
231 /* 0 or max vdevs */
232 /* Currently firmware does not support VDEV stats */
233 if (num_vdev_stats) {
234 struct wmi_vdev_stats *vdev_stats;
235
236 for (i = 0; i < num_vdev_stats; i++) {
237 vdev_stats = (struct wmi_vdev_stats *)tmp;
238 tmp += sizeof(struct wmi_vdev_stats);
239 }
240 }
241
242 if (num_peer_stats) {
243 struct wmi_peer_stats *peer_stats;
244 struct ath10k_peer_stat *s;
245
246 stats->peers = num_peer_stats;
247
248 for (i = 0; i < num_peer_stats; i++) {
249 peer_stats = (struct wmi_peer_stats *)tmp;
250 s = &stats->peer_stat[i];
251
252 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
253 s->peer_macaddr);
254 s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
255 s->peer_tx_rate =
256 __le32_to_cpu(peer_stats->peer_tx_rate);
257
258 tmp += sizeof(struct wmi_peer_stats);
259 }
260 }
261
262 mutex_unlock(&ar->conf_mutex);
263 complete(&ar->debug.event_stats_compl);
264}
265
266static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
267 size_t count, loff_t *ppos)
268{
269 struct ath10k *ar = file->private_data;
270 struct ath10k_target_stats *fw_stats;
271 char *buf;
272 unsigned int len = 0, buf_len = 2500;
273 ssize_t ret_cnt;
274 long left;
275 int i;
276 int ret;
277
278 fw_stats = &ar->debug.target_stats;
279
280 buf = kzalloc(buf_len, GFP_KERNEL);
281 if (!buf)
282 return -ENOMEM;
283
284 ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
285 if (ret) {
286 ath10k_warn("could not request stats (%d)\n", ret);
287 kfree(buf);
288 return -EIO;
289 }
290
291 left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
292
293 if (left <= 0) {
294 kfree(buf);
295 return -ETIMEDOUT;
296 }
297
298 mutex_lock(&ar->conf_mutex);
299
300 len += scnprintf(buf + len, buf_len - len, "\n");
301 len += scnprintf(buf + len, buf_len - len, "%30s\n",
302 "ath10k PDEV stats");
303 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
304 "=================");
305
306 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
307 "Channel noise floor", fw_stats->ch_noise_floor);
308 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
309 "Channel TX power", fw_stats->chan_tx_power);
310 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
311 "TX frame count", fw_stats->tx_frame_count);
312 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
313 "RX frame count", fw_stats->rx_frame_count);
314 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
315 "RX clear count", fw_stats->rx_clear_count);
316 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
317 "Cycle count", fw_stats->cycle_count);
318 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
319 "PHY error count", fw_stats->phy_err_count);
320
321 len += scnprintf(buf + len, buf_len - len, "\n");
322 len += scnprintf(buf + len, buf_len - len, "%30s\n",
323 "ath10k PDEV TX stats");
324 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
325 "=================");
326
327 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
328 "HTT cookies queued", fw_stats->comp_queued);
329 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
330 "HTT cookies disp.", fw_stats->comp_delivered);
331 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
332 "MSDU queued", fw_stats->msdu_enqued);
333 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
334 "MPDU queued", fw_stats->mpdu_enqued);
335 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
336 "MSDUs dropped", fw_stats->wmm_drop);
337 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
338 "Local enqued", fw_stats->local_enqued);
339 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
340 "Local freed", fw_stats->local_freed);
341 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
342 "HW queued", fw_stats->hw_queued);
343 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
344 "PPDUs reaped", fw_stats->hw_reaped);
345 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
346 "Num underruns", fw_stats->underrun);
347 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
348 "PPDUs cleaned", fw_stats->tx_abort);
349 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
350 "MPDUs requed", fw_stats->mpdus_requed);
351 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
352 "Excessive retries", fw_stats->tx_ko);
353 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
354 "HW rate", fw_stats->data_rc);
355 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
356 "Sched self tiggers", fw_stats->self_triggers);
357 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
358 "Dropped due to SW retries",
359 fw_stats->sw_retry_failure);
360 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
361 "Illegal rate phy errors",
362 fw_stats->illgl_rate_phy_err);
363 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
364 "Pdev continous xretry", fw_stats->pdev_cont_xretry);
365 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
366 "TX timeout", fw_stats->pdev_tx_timeout);
367 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
368 "PDEV resets", fw_stats->pdev_resets);
369 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
370 "PHY underrun", fw_stats->phy_underrun);
371 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
372 "MPDU is more than txop limit", fw_stats->txop_ovf);
373
374 len += scnprintf(buf + len, buf_len - len, "\n");
375 len += scnprintf(buf + len, buf_len - len, "%30s\n",
376 "ath10k PDEV RX stats");
377 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
378 "=================");
379
380 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
381 "Mid PPDU route change",
382 fw_stats->mid_ppdu_route_change);
383 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
384 "Tot. number of statuses", fw_stats->status_rcvd);
385 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
386 "Extra frags on rings 0", fw_stats->r0_frags);
387 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
388 "Extra frags on rings 1", fw_stats->r1_frags);
389 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
390 "Extra frags on rings 2", fw_stats->r2_frags);
391 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
392 "Extra frags on rings 3", fw_stats->r3_frags);
393 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
394 "MSDUs delivered to HTT", fw_stats->htt_msdus);
395 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
396 "MPDUs delivered to HTT", fw_stats->htt_mpdus);
397 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
398 "MSDUs delivered to stack", fw_stats->loc_msdus);
399 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
400 "MPDUs delivered to stack", fw_stats->loc_mpdus);
401 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
402 "Oversized AMSUs", fw_stats->oversize_amsdu);
403 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
404 "PHY errors", fw_stats->phy_errs);
405 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
406 "PHY errors drops", fw_stats->phy_err_drop);
407 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
408 "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
409
410 len += scnprintf(buf + len, buf_len - len, "\n");
411 len += scnprintf(buf + len, buf_len - len, "%30s\n",
412 "ath10k PEER stats");
413 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
414 "=================");
415
416 for (i = 0; i < fw_stats->peers; i++) {
417 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
418 "Peer MAC address",
419 fw_stats->peer_stat[i].peer_macaddr);
420 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
421 "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
422 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
423 "Peer TX rate",
424 fw_stats->peer_stat[i].peer_tx_rate);
425 len += scnprintf(buf + len, buf_len - len, "\n");
426 }
427
428 if (len > buf_len)
429 len = buf_len;
430
431 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
432
433 mutex_unlock(&ar->conf_mutex);
434
435 kfree(buf);
436 return ret_cnt;
437}
438
439static const struct file_operations fops_fw_stats = {
440 .read = ath10k_read_fw_stats,
441 .open = simple_open,
442 .owner = THIS_MODULE,
443 .llseek = default_llseek,
444};
445
446int ath10k_debug_create(struct ath10k *ar)
447{
448 ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
449 ar->hw->wiphy->debugfsdir);
450
451 if (!ar->debug.debugfs_phy)
452 return -ENOMEM;
453
454 init_completion(&ar->debug.event_stats_compl);
455
456 debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
457 &fops_fw_stats);
458
459 debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
460 &fops_wmi_services);
461
462 return 0;
463}
464#endif /* CONFIG_ATH10K_DEBUGFS */
465
466#ifdef CONFIG_ATH10K_DEBUG
467void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
468{
469 struct va_format vaf;
470 va_list args;
471
472 va_start(args, fmt);
473
474 vaf.fmt = fmt;
475 vaf.va = &args;
476
477 if (ath10k_debug_mask & mask)
478 ath10k_printk(KERN_DEBUG, "%pV", &vaf);
479
480 trace_ath10k_log_dbg(mask, &vaf);
481
482 va_end(args);
483}
484EXPORT_SYMBOL(ath10k_dbg);
485
486void ath10k_dbg_dump(enum ath10k_debug_mask mask,
487 const char *msg, const char *prefix,
488 const void *buf, size_t len)
489{
490 if (ath10k_debug_mask & mask) {
491 if (msg)
492 ath10k_dbg(mask, "%s\n", msg);
493
494 print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
495 }
496
497 /* tracing code doesn't like null strings :/ */
498 trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
499 buf, len);
500}
501EXPORT_SYMBOL(ath10k_dbg_dump);
502
503#endif /* CONFIG_ATH10K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
new file mode 100644
index 000000000000..168140c54028
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -0,0 +1,90 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _DEBUG_H_
19#define _DEBUG_H_
20
21#include <linux/types.h>
22#include "trace.h"
23
24enum ath10k_debug_mask {
25 ATH10K_DBG_PCI = 0x00000001,
26 ATH10K_DBG_WMI = 0x00000002,
27 ATH10K_DBG_HTC = 0x00000004,
28 ATH10K_DBG_HTT = 0x00000008,
29 ATH10K_DBG_MAC = 0x00000010,
30 ATH10K_DBG_CORE = 0x00000020,
31 ATH10K_DBG_PCI_DUMP = 0x00000040,
32 ATH10K_DBG_HTT_DUMP = 0x00000080,
33 ATH10K_DBG_MGMT = 0x00000100,
34 ATH10K_DBG_DATA = 0x00000200,
35 ATH10K_DBG_ANY = 0xffffffff,
36};
37
38extern unsigned int ath10k_debug_mask;
39
40extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
41extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
42extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
43
44#ifdef CONFIG_ATH10K_DEBUGFS
45int ath10k_debug_create(struct ath10k *ar);
46void ath10k_debug_read_service_map(struct ath10k *ar,
47 void *service_map,
48 size_t map_size);
49void ath10k_debug_read_target_stats(struct ath10k *ar,
50 struct wmi_stats_event *ev);
51
52#else
53static inline int ath10k_debug_create(struct ath10k *ar)
54{
55 return 0;
56}
57
58static inline void ath10k_debug_read_service_map(struct ath10k *ar,
59 void *service_map,
60 size_t map_size)
61{
62}
63
64static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
65 struct wmi_stats_event *ev)
66{
67}
68#endif /* CONFIG_ATH10K_DEBUGFS */
69
70#ifdef CONFIG_ATH10K_DEBUG
71extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
72 const char *fmt, ...);
73void ath10k_dbg_dump(enum ath10k_debug_mask mask,
74 const char *msg, const char *prefix,
75 const void *buf, size_t len);
76#else /* CONFIG_ATH10K_DEBUG */
77
78static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
79 const char *fmt, ...)
80{
81 return 0;
82}
83
84static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
85 const char *msg, const char *prefix,
86 const void *buf, size_t len)
87{
88}
89#endif /* CONFIG_ATH10K_DEBUG */
90#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
new file mode 100644
index 000000000000..73a24d44d1b4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -0,0 +1,137 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _HIF_H_
19#define _HIF_H_
20
21#include <linux/kernel.h>
22#include "core.h"
23
24struct ath10k_hif_cb {
25 int (*tx_completion)(struct ath10k *ar,
26 struct sk_buff *wbuf,
27 unsigned transfer_id);
28 int (*rx_completion)(struct ath10k *ar,
29 struct sk_buff *wbuf,
30 u8 pipe_id);
31};
32
33struct ath10k_hif_ops {
34 /* Send the head of a buffer to HIF for transmission to the target. */
35 int (*send_head)(struct ath10k *ar, u8 pipe_id,
36 unsigned int transfer_id,
37 unsigned int nbytes,
38 struct sk_buff *buf);
39
40 /*
41 * API to handle HIF-specific BMI message exchanges, this API is
42 * synchronous and only allowed to be called from a context that
43 * can block (sleep)
44 */
45 int (*exchange_bmi_msg)(struct ath10k *ar,
46 void *request, u32 request_len,
47 void *response, u32 *response_len);
48
49 int (*start)(struct ath10k *ar);
50
51 void (*stop)(struct ath10k *ar);
52
53 int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
54 u8 *ul_pipe, u8 *dl_pipe,
55 int *ul_is_polled, int *dl_is_polled);
56
57 void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
58
59 /*
60 * Check if prior sends have completed.
61 *
62 * Check whether the pipe in question has any completed
63 * sends that have not yet been processed.
64 * This function is only relevant for HIF pipes that are configured
65 * to be polled rather than interrupt-driven.
66 */
67 void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
68
69 void (*init)(struct ath10k *ar,
70 struct ath10k_hif_cb *callbacks);
71
72 u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
73};
74
75
76static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
77 unsigned int transfer_id,
78 unsigned int nbytes,
79 struct sk_buff *buf)
80{
81 return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
82}
83
84static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
85 void *request, u32 request_len,
86 void *response, u32 *response_len)
87{
88 return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
89 response, response_len);
90}
91
92static inline int ath10k_hif_start(struct ath10k *ar)
93{
94 return ar->hif.ops->start(ar);
95}
96
97static inline void ath10k_hif_stop(struct ath10k *ar)
98{
99 return ar->hif.ops->stop(ar);
100}
101
102static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
103 u16 service_id,
104 u8 *ul_pipe, u8 *dl_pipe,
105 int *ul_is_polled,
106 int *dl_is_polled)
107{
108 return ar->hif.ops->map_service_to_pipe(ar, service_id,
109 ul_pipe, dl_pipe,
110 ul_is_polled, dl_is_polled);
111}
112
113static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
114 u8 *ul_pipe, u8 *dl_pipe)
115{
116 ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
117}
118
119static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
120 u8 pipe_id, int force)
121{
122 ar->hif.ops->send_complete_check(ar, pipe_id, force);
123}
124
125static inline void ath10k_hif_init(struct ath10k *ar,
126 struct ath10k_hif_cb *callbacks)
127{
128 ar->hif.ops->init(ar, callbacks);
129}
130
131static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
132 u8 pipe_id)
133{
134 return ar->hif.ops->get_free_queue_number(ar, pipe_id);
135}
136
137#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
new file mode 100644
index 000000000000..74363c949392
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -0,0 +1,1000 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "core.h"
19#include "hif.h"
20#include "debug.h"
21
22/********/
23/* Send */
24/********/
25
26static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
27 int force)
28{
29 /*
30 * Check whether HIF has any prior sends that have finished,
31 * have not had the post-processing done.
32 */
33 ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
34}
35
36static void ath10k_htc_control_tx_complete(struct ath10k *ar,
37 struct sk_buff *skb)
38{
39 kfree_skb(skb);
40}
41
42static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
43{
44 struct sk_buff *skb;
45 struct ath10k_skb_cb *skb_cb;
46
47 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
48 if (!skb) {
49 ath10k_warn("Unable to allocate ctrl skb\n");
50 return NULL;
51 }
52
53 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
54 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
55
56 skb_cb = ATH10K_SKB_CB(skb);
57 memset(skb_cb, 0, sizeof(*skb_cb));
58
59 ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
60 return skb;
61}
62
63static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
64 struct sk_buff *skb)
65{
66 ath10k_skb_unmap(htc->ar->dev, skb);
67 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
68}
69
70static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
71 struct sk_buff *skb)
72{
73 ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
74 ep->eid, skb);
75
76 ath10k_htc_restore_tx_skb(ep->htc, skb);
77
78 if (!ep->ep_ops.ep_tx_complete) {
79 ath10k_warn("no tx handler for eid %d\n", ep->eid);
80 dev_kfree_skb_any(skb);
81 return;
82 }
83
84 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
85}
86
87/* assumes tx_lock is held */
88static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
89{
90 if (!ep->tx_credit_flow_enabled)
91 return false;
92 if (ep->tx_credits >= ep->tx_credits_per_max_message)
93 return false;
94
95 ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
96 ep->eid);
97 return true;
98}
99
100static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
101 struct sk_buff *skb)
102{
103 struct ath10k_htc_hdr *hdr;
104
105 hdr = (struct ath10k_htc_hdr *)skb->data;
106 memset(hdr, 0, sizeof(*hdr));
107
108 hdr->eid = ep->eid;
109 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
110
111 spin_lock_bh(&ep->htc->tx_lock);
112 hdr->seq_no = ep->seq_no++;
113
114 if (ath10k_htc_ep_need_credit_update(ep))
115 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
116
117 spin_unlock_bh(&ep->htc->tx_lock);
118}
119
120static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
121 struct ath10k_htc_ep *ep,
122 struct sk_buff *skb,
123 u8 credits)
124{
125 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
126 int ret;
127
128 ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
129 ep->eid, skb);
130
131 ath10k_htc_prepare_tx_skb(ep, skb);
132
133 ret = ath10k_skb_map(htc->ar->dev, skb);
134 if (ret)
135 goto err;
136
137 ret = ath10k_hif_send_head(htc->ar,
138 ep->ul_pipe_id,
139 ep->eid,
140 skb->len,
141 skb);
142 if (unlikely(ret))
143 goto err;
144
145 return 0;
146err:
147 ath10k_warn("HTC issue failed: %d\n", ret);
148
149 spin_lock_bh(&htc->tx_lock);
150 ep->tx_credits += credits;
151 spin_unlock_bh(&htc->tx_lock);
152
153 /* this is the simplest way to handle out-of-resources for non-credit
154 * based endpoints. credit based endpoints can still get -ENOSR, but
155 * this is highly unlikely as credit reservation should prevent that */
156 if (ret == -ENOSR) {
157 spin_lock_bh(&htc->tx_lock);
158 __skb_queue_head(&ep->tx_queue, skb);
159 spin_unlock_bh(&htc->tx_lock);
160
161 return ret;
162 }
163
164 skb_cb->is_aborted = true;
165 ath10k_htc_notify_tx_completion(ep, skb);
166
167 return ret;
168}
169
170static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
171 struct ath10k_htc_ep *ep,
172 u8 *credits)
173{
174 struct sk_buff *skb;
175 struct ath10k_skb_cb *skb_cb;
176 int credits_required;
177 int remainder;
178 unsigned int transfer_len;
179
180 lockdep_assert_held(&htc->tx_lock);
181
182 skb = __skb_dequeue(&ep->tx_queue);
183 if (!skb)
184 return NULL;
185
186 skb_cb = ATH10K_SKB_CB(skb);
187 transfer_len = skb->len;
188
189 if (likely(transfer_len <= htc->target_credit_size)) {
190 credits_required = 1;
191 } else {
192 /* figure out how many credits this message requires */
193 credits_required = transfer_len / htc->target_credit_size;
194 remainder = transfer_len % htc->target_credit_size;
195
196 if (remainder)
197 credits_required++;
198 }
199
200 ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
201 credits_required, ep->tx_credits);
202
203 if (ep->tx_credits < credits_required) {
204 __skb_queue_head(&ep->tx_queue, skb);
205 return NULL;
206 }
207
208 ep->tx_credits -= credits_required;
209 *credits = credits_required;
210 return skb;
211}
212
213static void ath10k_htc_send_work(struct work_struct *work)
214{
215 struct ath10k_htc_ep *ep = container_of(work,
216 struct ath10k_htc_ep, send_work);
217 struct ath10k_htc *htc = ep->htc;
218 struct sk_buff *skb;
219 u8 credits = 0;
220 int ret;
221
222 while (true) {
223 if (ep->ul_is_polled)
224 ath10k_htc_send_complete_check(ep, 0);
225
226 spin_lock_bh(&htc->tx_lock);
227 if (ep->tx_credit_flow_enabled)
228 skb = ath10k_htc_get_skb_credit_based(htc, ep,
229 &credits);
230 else
231 skb = __skb_dequeue(&ep->tx_queue);
232 spin_unlock_bh(&htc->tx_lock);
233
234 if (!skb)
235 break;
236
237 ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
238 if (ret == -ENOSR)
239 break;
240 }
241}
242
243int ath10k_htc_send(struct ath10k_htc *htc,
244 enum ath10k_htc_ep_id eid,
245 struct sk_buff *skb)
246{
247 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
248
249 if (eid >= ATH10K_HTC_EP_COUNT) {
250 ath10k_warn("Invalid endpoint id: %d\n", eid);
251 return -ENOENT;
252 }
253
254 skb_push(skb, sizeof(struct ath10k_htc_hdr));
255
256 spin_lock_bh(&htc->tx_lock);
257 __skb_queue_tail(&ep->tx_queue, skb);
258 spin_unlock_bh(&htc->tx_lock);
259
260 queue_work(htc->ar->workqueue, &ep->send_work);
261 return 0;
262}
263
264static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
265 struct sk_buff *skb,
266 unsigned int eid)
267{
268 struct ath10k_htc *htc = ar->htc;
269 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
270 bool stopping;
271
272 ath10k_htc_notify_tx_completion(ep, skb);
273 /* the skb now belongs to the completion handler */
274
275 spin_lock_bh(&htc->tx_lock);
276 stopping = htc->stopping;
277 spin_unlock_bh(&htc->tx_lock);
278
279 if (!ep->tx_credit_flow_enabled && !stopping)
280 /*
281 * note: when using TX credit flow, the re-checking of
282 * queues happens when credits flow back from the target.
283 * in the non-TX credit case, we recheck after the packet
284 * completes
285 */
286 queue_work(ar->workqueue, &ep->send_work);
287
288 return 0;
289}
290
291/* flush endpoint TX queue */
292static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
293 struct ath10k_htc_ep *ep)
294{
295 struct sk_buff *skb;
296 struct ath10k_skb_cb *skb_cb;
297
298 spin_lock_bh(&htc->tx_lock);
299 for (;;) {
300 skb = __skb_dequeue(&ep->tx_queue);
301 if (!skb)
302 break;
303
304 skb_cb = ATH10K_SKB_CB(skb);
305 skb_cb->is_aborted = true;
306 ath10k_htc_notify_tx_completion(ep, skb);
307 }
308 spin_unlock_bh(&htc->tx_lock);
309
310 cancel_work_sync(&ep->send_work);
311}
312
313/***********/
314/* Receive */
315/***********/
316
317static void
318ath10k_htc_process_credit_report(struct ath10k_htc *htc,
319 const struct ath10k_htc_credit_report *report,
320 int len,
321 enum ath10k_htc_ep_id eid)
322{
323 struct ath10k_htc_ep *ep;
324 int i, n_reports;
325
326 if (len % sizeof(*report))
327 ath10k_warn("Uneven credit report len %d", len);
328
329 n_reports = len / sizeof(*report);
330
331 spin_lock_bh(&htc->tx_lock);
332 for (i = 0; i < n_reports; i++, report++) {
333 if (report->eid >= ATH10K_HTC_EP_COUNT)
334 break;
335
336 ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
337 report->eid, report->credits);
338
339 ep = &htc->endpoint[report->eid];
340 ep->tx_credits += report->credits;
341
342 if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
343 queue_work(htc->ar->workqueue, &ep->send_work);
344 }
345 spin_unlock_bh(&htc->tx_lock);
346}
347
348static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
349 u8 *buffer,
350 int length,
351 enum ath10k_htc_ep_id src_eid)
352{
353 int status = 0;
354 struct ath10k_htc_record *record;
355 u8 *orig_buffer;
356 int orig_length;
357 size_t len;
358
359 orig_buffer = buffer;
360 orig_length = length;
361
362 while (length > 0) {
363 record = (struct ath10k_htc_record *)buffer;
364
365 if (length < sizeof(record->hdr)) {
366 status = -EINVAL;
367 break;
368 }
369
370 if (record->hdr.len > length) {
371 /* no room left in buffer for record */
372 ath10k_warn("Invalid record length: %d\n",
373 record->hdr.len);
374 status = -EINVAL;
375 break;
376 }
377
378 switch (record->hdr.id) {
379 case ATH10K_HTC_RECORD_CREDITS:
380 len = sizeof(struct ath10k_htc_credit_report);
381 if (record->hdr.len < len) {
382 ath10k_warn("Credit report too long\n");
383 status = -EINVAL;
384 break;
385 }
386 ath10k_htc_process_credit_report(htc,
387 record->credit_report,
388 record->hdr.len,
389 src_eid);
390 break;
391 default:
392 ath10k_warn("Unhandled record: id:%d length:%d\n",
393 record->hdr.id, record->hdr.len);
394 break;
395 }
396
397 if (status)
398 break;
399
400 /* multiple records may be present in a trailer */
401 buffer += sizeof(record->hdr) + record->hdr.len;
402 length -= sizeof(record->hdr) + record->hdr.len;
403 }
404
405 if (status)
406 ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
407 orig_buffer, orig_length);
408
409 return status;
410}
411
412static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
413 struct sk_buff *skb,
414 u8 pipe_id)
415{
416 int status = 0;
417 struct ath10k_htc *htc = ar->htc;
418 struct ath10k_htc_hdr *hdr;
419 struct ath10k_htc_ep *ep;
420 u16 payload_len;
421 u32 trailer_len = 0;
422 size_t min_len;
423 u8 eid;
424 bool trailer_present;
425
426 hdr = (struct ath10k_htc_hdr *)skb->data;
427 skb_pull(skb, sizeof(*hdr));
428
429 eid = hdr->eid;
430
431 if (eid >= ATH10K_HTC_EP_COUNT) {
432 ath10k_warn("HTC Rx: invalid eid %d\n", eid);
433 ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
434 hdr, sizeof(*hdr));
435 status = -EINVAL;
436 goto out;
437 }
438
439 ep = &htc->endpoint[eid];
440
441 /*
442 * If this endpoint that received a message from the target has
443 * a to-target HIF pipe whose send completions are polled rather
444 * than interrupt-driven, this is a good point to ask HIF to check
445 * whether it has any completed sends to handle.
446 */
447 if (ep->ul_is_polled)
448 ath10k_htc_send_complete_check(ep, 1);
449
450 payload_len = __le16_to_cpu(hdr->len);
451
452 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
453 ath10k_warn("HTC rx frame too long, len: %zu\n",
454 payload_len + sizeof(*hdr));
455 ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
456 hdr, sizeof(*hdr));
457 status = -EINVAL;
458 goto out;
459 }
460
461 if (skb->len < payload_len) {
462 ath10k_dbg(ATH10K_DBG_HTC,
463 "HTC Rx: insufficient length, got %d, expected %d\n",
464 skb->len, payload_len);
465 ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
466 "", hdr, sizeof(*hdr));
467 status = -EINVAL;
468 goto out;
469 }
470
471 /* get flags to check for trailer */
472 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
473 if (trailer_present) {
474 u8 *trailer;
475
476 trailer_len = hdr->trailer_len;
477 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
478
479 if ((trailer_len < min_len) ||
480 (trailer_len > payload_len)) {
481 ath10k_warn("Invalid trailer length: %d\n",
482 trailer_len);
483 status = -EPROTO;
484 goto out;
485 }
486
487 trailer = (u8 *)hdr;
488 trailer += sizeof(*hdr);
489 trailer += payload_len;
490 trailer -= trailer_len;
491 status = ath10k_htc_process_trailer(htc, trailer,
492 trailer_len, hdr->eid);
493 if (status)
494 goto out;
495
496 skb_trim(skb, skb->len - trailer_len);
497 }
498
499 if (((int)payload_len - (int)trailer_len) <= 0)
500 /* zero length packet with trailer data, just drop these */
501 goto out;
502
503 if (eid == ATH10K_HTC_EP_0) {
504 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
505
506 switch (__le16_to_cpu(msg->hdr.message_id)) {
507 default:
508 /* handle HTC control message */
509 if (completion_done(&htc->ctl_resp)) {
510 /*
511 * this is a fatal error, target should not be
512 * sending unsolicited messages on the ep 0
513 */
514 ath10k_warn("HTC rx ctrl still processing\n");
515 status = -EINVAL;
516 complete(&htc->ctl_resp);
517 goto out;
518 }
519
520 htc->control_resp_len =
521 min_t(int, skb->len,
522 ATH10K_HTC_MAX_CTRL_MSG_LEN);
523
524 memcpy(htc->control_resp_buffer, skb->data,
525 htc->control_resp_len);
526
527 complete(&htc->ctl_resp);
528 break;
529 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
530 htc->htc_ops.target_send_suspend_complete(ar);
531 }
532 goto out;
533 }
534
535 ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
536 eid, skb);
537 ep->ep_ops.ep_rx_complete(ar, skb);
538
539 /* skb is now owned by the rx completion handler */
540 skb = NULL;
541out:
542 kfree_skb(skb);
543
544 return status;
545}
546
547static void ath10k_htc_control_rx_complete(struct ath10k *ar,
548 struct sk_buff *skb)
549{
550 /* This is unexpected. FW is not supposed to send regular rx on this
551 * endpoint. */
552 ath10k_warn("unexpected htc rx\n");
553 kfree_skb(skb);
554}
555
556/***************/
557/* Init/Deinit */
558/***************/
559
560static const char *htc_service_name(enum ath10k_htc_svc_id id)
561{
562 switch (id) {
563 case ATH10K_HTC_SVC_ID_RESERVED:
564 return "Reserved";
565 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
566 return "Control";
567 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
568 return "WMI";
569 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
570 return "DATA BE";
571 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
572 return "DATA BK";
573 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
574 return "DATA VI";
575 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
576 return "DATA VO";
577 case ATH10K_HTC_SVC_ID_NMI_CONTROL:
578 return "NMI Control";
579 case ATH10K_HTC_SVC_ID_NMI_DATA:
580 return "NMI Data";
581 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
582 return "HTT Data";
583 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
584 return "RAW";
585 }
586
587 return "Unknown";
588}
589
590static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
591{
592 struct ath10k_htc_ep *ep;
593 int i;
594
595 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
596 ep = &htc->endpoint[i];
597 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
598 ep->max_ep_message_len = 0;
599 ep->max_tx_queue_depth = 0;
600 ep->eid = i;
601 skb_queue_head_init(&ep->tx_queue);
602 ep->htc = htc;
603 ep->tx_credit_flow_enabled = true;
604 INIT_WORK(&ep->send_work, ath10k_htc_send_work);
605 }
606}
607
608static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
609{
610 struct ath10k_htc_svc_tx_credits *entry;
611
612 entry = &htc->service_tx_alloc[0];
613
614 /*
615 * for PCIE allocate all credists/HTC buffers to WMI.
616 * no buffers are used/required for data. data always
617 * remains on host.
618 */
619 entry++;
620 entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
621 entry->credit_allocation = htc->total_transmit_credits;
622}
623
624static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
625 u16 service_id)
626{
627 u8 allocation = 0;
628 int i;
629
630 for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
631 if (htc->service_tx_alloc[i].service_id == service_id)
632 allocation =
633 htc->service_tx_alloc[i].credit_allocation;
634 }
635
636 return allocation;
637}
638
639int ath10k_htc_wait_target(struct ath10k_htc *htc)
640{
641 int status = 0;
642 struct ath10k_htc_svc_conn_req conn_req;
643 struct ath10k_htc_svc_conn_resp conn_resp;
644 struct ath10k_htc_msg *msg;
645 u16 message_id;
646 u16 credit_count;
647 u16 credit_size;
648
649 INIT_COMPLETION(htc->ctl_resp);
650
651 status = ath10k_hif_start(htc->ar);
652 if (status) {
653 ath10k_err("could not start HIF (%d)\n", status);
654 goto err_start;
655 }
656
657 status = wait_for_completion_timeout(&htc->ctl_resp,
658 ATH10K_HTC_WAIT_TIMEOUT_HZ);
659 if (status <= 0) {
660 if (status == 0)
661 status = -ETIMEDOUT;
662
663 ath10k_err("ctl_resp never came in (%d)\n", status);
664 goto err_target;
665 }
666
667 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
668 ath10k_err("Invalid HTC ready msg len:%d\n",
669 htc->control_resp_len);
670
671 status = -ECOMM;
672 goto err_target;
673 }
674
675 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
676 message_id = __le16_to_cpu(msg->hdr.message_id);
677 credit_count = __le16_to_cpu(msg->ready.credit_count);
678 credit_size = __le16_to_cpu(msg->ready.credit_size);
679
680 if (message_id != ATH10K_HTC_MSG_READY_ID) {
681 ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
682 status = -ECOMM;
683 goto err_target;
684 }
685
686 htc->total_transmit_credits = credit_count;
687 htc->target_credit_size = credit_size;
688
689 ath10k_dbg(ATH10K_DBG_HTC,
690 "Target ready! transmit resources: %d size:%d\n",
691 htc->total_transmit_credits,
692 htc->target_credit_size);
693
694 if ((htc->total_transmit_credits == 0) ||
695 (htc->target_credit_size == 0)) {
696 status = -ECOMM;
697 ath10k_err("Invalid credit size received\n");
698 goto err_target;
699 }
700
701 ath10k_htc_setup_target_buffer_assignments(htc);
702
703 /* setup our pseudo HTC control endpoint connection */
704 memset(&conn_req, 0, sizeof(conn_req));
705 memset(&conn_resp, 0, sizeof(conn_resp));
706 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
707 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
708 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
709 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
710
711 /* connect fake service */
712 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
713 if (status) {
714 ath10k_err("could not connect to htc service (%d)\n", status);
715 goto err_target;
716 }
717
718 return 0;
719err_target:
720 ath10k_hif_stop(htc->ar);
721err_start:
722 return status;
723}
724
725int ath10k_htc_connect_service(struct ath10k_htc *htc,
726 struct ath10k_htc_svc_conn_req *conn_req,
727 struct ath10k_htc_svc_conn_resp *conn_resp)
728{
729 struct ath10k_htc_msg *msg;
730 struct ath10k_htc_conn_svc *req_msg;
731 struct ath10k_htc_conn_svc_response resp_msg_dummy;
732 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
733 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
734 struct ath10k_htc_ep *ep;
735 struct sk_buff *skb;
736 unsigned int max_msg_size = 0;
737 int length, status;
738 bool disable_credit_flow_ctrl = false;
739 u16 message_id, service_id, flags = 0;
740 u8 tx_alloc = 0;
741
742 /* special case for HTC pseudo control service */
743 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
744 disable_credit_flow_ctrl = true;
745 assigned_eid = ATH10K_HTC_EP_0;
746 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
747 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
748 goto setup;
749 }
750
751 tx_alloc = ath10k_htc_get_credit_allocation(htc,
752 conn_req->service_id);
753 if (!tx_alloc)
754 ath10k_warn("HTC Service %s does not allocate target credits\n",
755 htc_service_name(conn_req->service_id));
756
757 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
758 if (!skb) {
759 ath10k_err("Failed to allocate HTC packet\n");
760 return -ENOMEM;
761 }
762
763 length = sizeof(msg->hdr) + sizeof(msg->connect_service);
764 skb_put(skb, length);
765 memset(skb->data, 0, length);
766
767 msg = (struct ath10k_htc_msg *)skb->data;
768 msg->hdr.message_id =
769 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
770
771 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
772
773 req_msg = &msg->connect_service;
774 req_msg->flags = __cpu_to_le16(flags);
775 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
776
777 /* Only enable credit flow control for WMI ctrl service */
778 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
779 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
780 disable_credit_flow_ctrl = true;
781 }
782
783 INIT_COMPLETION(htc->ctl_resp);
784
785 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
786 if (status) {
787 kfree_skb(skb);
788 return status;
789 }
790
791 /* wait for response */
792 status = wait_for_completion_timeout(&htc->ctl_resp,
793 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
794 if (status <= 0) {
795 if (status == 0)
796 status = -ETIMEDOUT;
797 ath10k_err("Service connect timeout: %d\n", status);
798 return status;
799 }
800
801 /* we controlled the buffer creation, it's aligned */
802 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
803 resp_msg = &msg->connect_service_response;
804 message_id = __le16_to_cpu(msg->hdr.message_id);
805 service_id = __le16_to_cpu(resp_msg->service_id);
806
807 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
808 (htc->control_resp_len < sizeof(msg->hdr) +
809 sizeof(msg->connect_service_response))) {
810 ath10k_err("Invalid resp message ID 0x%x", message_id);
811 return -EPROTO;
812 }
813
814 ath10k_dbg(ATH10K_DBG_HTC,
815 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
816 htc_service_name(service_id),
817 resp_msg->status, resp_msg->eid);
818
819 conn_resp->connect_resp_code = resp_msg->status;
820
821 /* check response status */
822 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
823 ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
824 htc_service_name(service_id),
825 resp_msg->status);
826 return -EPROTO;
827 }
828
829 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
830 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
831
832setup:
833
834 if (assigned_eid >= ATH10K_HTC_EP_COUNT)
835 return -EPROTO;
836
837 if (max_msg_size == 0)
838 return -EPROTO;
839
840 ep = &htc->endpoint[assigned_eid];
841 ep->eid = assigned_eid;
842
843 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
844 return -EPROTO;
845
846 /* return assigned endpoint to caller */
847 conn_resp->eid = assigned_eid;
848 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
849
850 /* setup the endpoint */
851 ep->service_id = conn_req->service_id;
852 ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
853 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
854 ep->tx_credits = tx_alloc;
855 ep->tx_credit_size = htc->target_credit_size;
856 ep->tx_credits_per_max_message = ep->max_ep_message_len /
857 htc->target_credit_size;
858
859 if (ep->max_ep_message_len % htc->target_credit_size)
860 ep->tx_credits_per_max_message++;
861
862 /* copy all the callbacks */
863 ep->ep_ops = conn_req->ep_ops;
864
865 status = ath10k_hif_map_service_to_pipe(htc->ar,
866 ep->service_id,
867 &ep->ul_pipe_id,
868 &ep->dl_pipe_id,
869 &ep->ul_is_polled,
870 &ep->dl_is_polled);
871 if (status)
872 return status;
873
874 ath10k_dbg(ATH10K_DBG_HTC,
875 "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
876 htc_service_name(ep->service_id), ep->ul_pipe_id,
877 ep->dl_pipe_id, ep->eid);
878
879 ath10k_dbg(ATH10K_DBG_HTC,
880 "EP %d UL polled: %d, DL polled: %d\n",
881 ep->eid, ep->ul_is_polled, ep->dl_is_polled);
882
883 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
884 ep->tx_credit_flow_enabled = false;
885 ath10k_dbg(ATH10K_DBG_HTC,
886 "HTC service: %s eid: %d TX flow control disabled\n",
887 htc_service_name(ep->service_id), assigned_eid);
888 }
889
890 return status;
891}
892
893struct sk_buff *ath10k_htc_alloc_skb(int size)
894{
895 struct sk_buff *skb;
896
897 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
898 if (!skb) {
899 ath10k_warn("could not allocate HTC tx skb\n");
900 return NULL;
901 }
902
903 skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
904
905 /* FW/HTC requires 4-byte aligned streams */
906 if (!IS_ALIGNED((unsigned long)skb->data, 4))
907 ath10k_warn("Unaligned HTC tx skb\n");
908
909 return skb;
910}
911
912int ath10k_htc_start(struct ath10k_htc *htc)
913{
914 struct sk_buff *skb;
915 int status = 0;
916 struct ath10k_htc_msg *msg;
917
918 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
919 if (!skb)
920 return -ENOMEM;
921
922 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
923 memset(skb->data, 0, skb->len);
924
925 msg = (struct ath10k_htc_msg *)skb->data;
926 msg->hdr.message_id =
927 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
928
929 ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
930
931 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
932 if (status) {
933 kfree_skb(skb);
934 return status;
935 }
936
937 return 0;
938}
939
940/*
941 * stop HTC communications, i.e. stop interrupt reception, and flush all
942 * queued buffers
943 */
944void ath10k_htc_stop(struct ath10k_htc *htc)
945{
946 int i;
947 struct ath10k_htc_ep *ep;
948
949 spin_lock_bh(&htc->tx_lock);
950 htc->stopping = true;
951 spin_unlock_bh(&htc->tx_lock);
952
953 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
954 ep = &htc->endpoint[i];
955 ath10k_htc_flush_endpoint_tx(htc, ep);
956 }
957
958 ath10k_hif_stop(htc->ar);
959 ath10k_htc_reset_endpoint_states(htc);
960}
961
962/* registered target arrival callback from the HIF layer */
963struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
964 struct ath10k_htc_ops *htc_ops)
965{
966 struct ath10k_hif_cb htc_callbacks;
967 struct ath10k_htc_ep *ep = NULL;
968 struct ath10k_htc *htc = NULL;
969
970 /* FIXME: use struct ath10k instead */
971 htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
972 if (!htc)
973 return ERR_PTR(-ENOMEM);
974
975 spin_lock_init(&htc->tx_lock);
976
977 memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
978
979 ath10k_htc_reset_endpoint_states(htc);
980
981 /* setup HIF layer callbacks */
982 htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
983 htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
984 htc->ar = ar;
985
986 /* Get HIF default pipe for HTC message exchange */
987 ep = &htc->endpoint[ATH10K_HTC_EP_0];
988
989 ath10k_hif_init(ar, &htc_callbacks);
990 ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
991
992 init_completion(&htc->ctl_resp);
993
994 return htc;
995}
996
997void ath10k_htc_destroy(struct ath10k_htc *htc)
998{
999 kfree(htc);
1000}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
new file mode 100644
index 000000000000..fa45844b59fb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -0,0 +1,368 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _HTC_H_
19#define _HTC_H_
20
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/bug.h>
24#include <linux/skbuff.h>
25#include <linux/semaphore.h>
26#include <linux/timer.h>
27
28struct ath10k;
29
30/****************/
31/* HTC protocol */
32/****************/
33
34/*
35 * HTC - host-target control protocol
36 *
37 * tx packets are generally <htc_hdr><payload>
38 * rx packets are more complex: <htc_hdr><payload><trailer>
39 *
40 * The payload + trailer length is stored in len.
41 * To get payload-only length one needs to payload - trailer_len.
42 *
43 * Trailer contains (possibly) multiple <htc_record>.
44 * Each record is a id-len-value.
45 *
46 * HTC header flags, control_byte0, control_byte1
47 * have different meaning depending whether its tx
48 * or rx.
49 *
50 * Alignment: htc_hdr, payload and trailer are
51 * 4-byte aligned.
52 */
53
54enum ath10k_htc_tx_flags {
55 ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
56 ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
57};
58
59enum ath10k_htc_rx_flags {
60 ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
61 ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
62};
63
64struct ath10k_htc_hdr {
65 u8 eid; /* @enum ath10k_htc_ep_id */
66 u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
67 __le16 len;
68 union {
69 u8 trailer_len; /* for rx */
70 u8 control_byte0;
71 } __packed;
72 union {
73 u8 seq_no; /* for tx */
74 u8 control_byte1;
75 } __packed;
76 u8 pad0;
77 u8 pad1;
78} __packed __aligned(4);
79
80enum ath10k_ath10k_htc_msg_id {
81 ATH10K_HTC_MSG_READY_ID = 1,
82 ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2,
83 ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
84 ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4,
85 ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
86 ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
87};
88
89enum ath10k_htc_version {
90 ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
91 ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
92};
93
94enum ath10k_htc_conn_flags {
95 ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
96 ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
97 ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
98 ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
99#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
100 ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
101 ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
102#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
103#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
104};
105
106enum ath10k_htc_conn_svc_status {
107 ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
108 ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
109 ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2,
110 ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
111 ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
112};
113
114struct ath10k_ath10k_htc_msg_hdr {
115 __le16 message_id; /* @enum htc_message_id */
116} __packed;
117
118struct ath10k_htc_unknown {
119 u8 pad0;
120 u8 pad1;
121} __packed;
122
123struct ath10k_htc_ready {
124 __le16 credit_count;
125 __le16 credit_size;
126 u8 max_endpoints;
127 u8 pad0;
128} __packed;
129
130struct ath10k_htc_ready_extended {
131 struct ath10k_htc_ready base;
132 u8 htc_version; /* @enum ath10k_htc_version */
133 u8 max_msgs_per_htc_bundle;
134 u8 pad0;
135 u8 pad1;
136} __packed;
137
138struct ath10k_htc_conn_svc {
139 __le16 service_id;
140 __le16 flags; /* @enum ath10k_htc_conn_flags */
141 u8 pad0;
142 u8 pad1;
143} __packed;
144
145struct ath10k_htc_conn_svc_response {
146 __le16 service_id;
147 u8 status; /* @enum ath10k_htc_conn_svc_status */
148 u8 eid;
149 __le16 max_msg_size;
150} __packed;
151
152struct ath10k_htc_setup_complete_extended {
153 u8 pad0;
154 u8 pad1;
155 __le32 flags; /* @enum htc_setup_complete_flags */
156 u8 max_msgs_per_bundled_recv;
157 u8 pad2;
158 u8 pad3;
159 u8 pad4;
160} __packed;
161
162struct ath10k_htc_msg {
163 struct ath10k_ath10k_htc_msg_hdr hdr;
164 union {
165 /* host-to-target */
166 struct ath10k_htc_conn_svc connect_service;
167 struct ath10k_htc_ready ready;
168 struct ath10k_htc_ready_extended ready_ext;
169 struct ath10k_htc_unknown unknown;
170 struct ath10k_htc_setup_complete_extended setup_complete_ext;
171
172 /* target-to-host */
173 struct ath10k_htc_conn_svc_response connect_service_response;
174 };
175} __packed __aligned(4);
176
177enum ath10k_ath10k_htc_record_id {
178 ATH10K_HTC_RECORD_NULL = 0,
179 ATH10K_HTC_RECORD_CREDITS = 1
180};
181
182struct ath10k_ath10k_htc_record_hdr {
183 u8 id; /* @enum ath10k_ath10k_htc_record_id */
184 u8 len;
185 u8 pad0;
186 u8 pad1;
187} __packed;
188
189struct ath10k_htc_credit_report {
190 u8 eid; /* @enum ath10k_htc_ep_id */
191 u8 credits;
192 u8 pad0;
193 u8 pad1;
194} __packed;
195
196struct ath10k_htc_record {
197 struct ath10k_ath10k_htc_record_hdr hdr;
198 union {
199 struct ath10k_htc_credit_report credit_report[0];
200 u8 pauload[0];
201 };
202} __packed __aligned(4);
203
204/*
205 * note: the trailer offset is dynamic depending
206 * on payload length. this is only a struct layout draft
207 */
208struct ath10k_htc_frame {
209 struct ath10k_htc_hdr hdr;
210 union {
211 struct ath10k_htc_msg msg;
212 u8 payload[0];
213 };
214 struct ath10k_htc_record trailer[0];
215} __packed __aligned(4);
216
217
218/*******************/
219/* Host-side stuff */
220/*******************/
221
222enum ath10k_htc_svc_gid {
223 ATH10K_HTC_SVC_GRP_RSVD = 0,
224 ATH10K_HTC_SVC_GRP_WMI = 1,
225 ATH10K_HTC_SVC_GRP_NMI = 2,
226 ATH10K_HTC_SVC_GRP_HTT = 3,
227
228 ATH10K_HTC_SVC_GRP_TEST = 254,
229 ATH10K_HTC_SVC_GRP_LAST = 255,
230};
231
232#define SVC(group, idx) \
233 (int)(((int)(group) << 8) | (int)(idx))
234
235enum ath10k_htc_svc_id {
236 /* NOTE: service ID of 0x0000 is reserved and should never be used */
237 ATH10K_HTC_SVC_ID_RESERVED = 0x0000,
238 ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED,
239
240 ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
241 ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
242 ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
243 ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
244 ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
245 ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
246
247 ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
248 ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
249
250 ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
251
252 /* raw stream service (i.e. flash, tcmd, calibration apps) */
253 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
254};
255
256#undef SVC
257
258enum ath10k_htc_ep_id {
259 ATH10K_HTC_EP_UNUSED = -1,
260 ATH10K_HTC_EP_0 = 0,
261 ATH10K_HTC_EP_1 = 1,
262 ATH10K_HTC_EP_2,
263 ATH10K_HTC_EP_3,
264 ATH10K_HTC_EP_4,
265 ATH10K_HTC_EP_5,
266 ATH10K_HTC_EP_6,
267 ATH10K_HTC_EP_7,
268 ATH10K_HTC_EP_8,
269 ATH10K_HTC_EP_COUNT,
270};
271
272struct ath10k_htc_ops {
273 void (*target_send_suspend_complete)(struct ath10k *ar);
274};
275
276struct ath10k_htc_ep_ops {
277 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
278 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
279};
280
281/* service connection information */
282struct ath10k_htc_svc_conn_req {
283 u16 service_id;
284 struct ath10k_htc_ep_ops ep_ops;
285 int max_send_queue_depth;
286};
287
288/* service connection response information */
289struct ath10k_htc_svc_conn_resp {
290 u8 buffer_len;
291 u8 actual_len;
292 enum ath10k_htc_ep_id eid;
293 unsigned int max_msg_len;
294 u8 connect_resp_code;
295};
296
297#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
298#define ATH10K_HTC_MAX_LEN 4096
299#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
300#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
301#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
302 sizeof(struct ath10k_htc_hdr))
303#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
304
305struct ath10k_htc_ep {
306 struct ath10k_htc *htc;
307 enum ath10k_htc_ep_id eid;
308 enum ath10k_htc_svc_id service_id;
309 struct ath10k_htc_ep_ops ep_ops;
310
311 int max_tx_queue_depth;
312 int max_ep_message_len;
313 u8 ul_pipe_id;
314 u8 dl_pipe_id;
315 int ul_is_polled; /* call HIF to get tx completions */
316 int dl_is_polled; /* call HIF to fetch rx (not implemented) */
317
318 struct sk_buff_head tx_queue;
319
320 u8 seq_no; /* for debugging */
321 int tx_credits;
322 int tx_credit_size;
323 int tx_credits_per_max_message;
324 bool tx_credit_flow_enabled;
325
326 struct work_struct send_work;
327};
328
329struct ath10k_htc_svc_tx_credits {
330 u16 service_id;
331 u8 credit_allocation;
332};
333
334struct ath10k_htc {
335 struct ath10k *ar;
336 struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
337
338 /* protects endpoint and stopping fields */
339 spinlock_t tx_lock;
340
341 struct ath10k_htc_ops htc_ops;
342
343 u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
344 int control_resp_len;
345
346 struct completion ctl_resp;
347
348 int total_transmit_credits;
349 struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
350 int target_credit_size;
351
352 bool stopping;
353};
354
355struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
356 struct ath10k_htc_ops *htc_ops);
357int ath10k_htc_wait_target(struct ath10k_htc *htc);
358int ath10k_htc_start(struct ath10k_htc *htc);
359int ath10k_htc_connect_service(struct ath10k_htc *htc,
360 struct ath10k_htc_svc_conn_req *conn_req,
361 struct ath10k_htc_svc_conn_resp *conn_resp);
362int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
363 struct sk_buff *packet);
364void ath10k_htc_stop(struct ath10k_htc *htc);
365void ath10k_htc_destroy(struct ath10k_htc *htc);
366struct sk_buff *ath10k_htc_alloc_skb(int size);
367
368#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
new file mode 100644
index 000000000000..185a5468a2f2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -0,0 +1,152 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/slab.h>
19
20#include "htt.h"
21#include "core.h"
22#include "debug.h"
23
24static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
25{
26 struct ath10k_htc_svc_conn_req conn_req;
27 struct ath10k_htc_svc_conn_resp conn_resp;
28 int status;
29
30 memset(&conn_req, 0, sizeof(conn_req));
31 memset(&conn_resp, 0, sizeof(conn_resp));
32
33 conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
34 conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
35
36 /* connect to control service */
37 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
38
39 status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
40 &conn_resp);
41
42 if (status)
43 return status;
44
45 htt->eid = conn_resp.eid;
46
47 return 0;
48}
49
50struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
51{
52 struct ath10k_htt *htt;
53 int ret;
54
55 htt = kzalloc(sizeof(*htt), GFP_KERNEL);
56 if (!htt)
57 return NULL;
58
59 htt->ar = ar;
60 htt->max_throughput_mbps = 800;
61
62 /*
63 * Connect to HTC service.
64 * This has to be done before calling ath10k_htt_rx_attach,
65 * since ath10k_htt_rx_attach involves sending a rx ring configure
66 * message to the target.
67 */
68 if (ath10k_htt_htc_attach(htt))
69 goto err_htc_attach;
70
71 ret = ath10k_htt_tx_attach(htt);
72 if (ret) {
73 ath10k_err("could not attach htt tx (%d)\n", ret);
74 goto err_htc_attach;
75 }
76
77 if (ath10k_htt_rx_attach(htt))
78 goto err_rx_attach;
79
80 /*
81 * Prefetch enough data to satisfy target
82 * classification engine.
83 * This is for LL chips. HL chips will probably
84 * transfer all frame in the tx fragment.
85 */
86 htt->prefetch_len =
87 36 + /* 802.11 + qos + ht */
88 4 + /* 802.1q */
89 8 + /* llc snap */
90 2; /* ip4 dscp or ip6 priority */
91
92 return htt;
93
94err_rx_attach:
95 ath10k_htt_tx_detach(htt);
96err_htc_attach:
97 kfree(htt);
98 return NULL;
99}
100
101#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
102
103static int ath10k_htt_verify_version(struct ath10k_htt *htt)
104{
105 ath10k_dbg(ATH10K_DBG_HTT,
106 "htt target version %d.%d; host version %d.%d\n",
107 htt->target_version_major,
108 htt->target_version_minor,
109 HTT_CURRENT_VERSION_MAJOR,
110 HTT_CURRENT_VERSION_MINOR);
111
112 if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
113 ath10k_err("htt major versions are incompatible!\n");
114 return -ENOTSUPP;
115 }
116
117 if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
118 ath10k_warn("htt minor version differ but still compatible\n");
119
120 return 0;
121}
122
123int ath10k_htt_attach_target(struct ath10k_htt *htt)
124{
125 int status;
126
127 init_completion(&htt->target_version_received);
128
129 status = ath10k_htt_h2t_ver_req_msg(htt);
130 if (status)
131 return status;
132
133 status = wait_for_completion_timeout(&htt->target_version_received,
134 HTT_TARGET_VERSION_TIMEOUT_HZ);
135 if (status <= 0) {
136 ath10k_warn("htt version request timed out\n");
137 return -ETIMEDOUT;
138 }
139
140 status = ath10k_htt_verify_version(htt);
141 if (status)
142 return status;
143
144 return ath10k_htt_send_rx_ring_cfg_ll(htt);
145}
146
147void ath10k_htt_detach(struct ath10k_htt *htt)
148{
149 ath10k_htt_rx_detach(htt);
150 ath10k_htt_tx_detach(htt);
151 kfree(htt);
152}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
new file mode 100644
index 000000000000..a7a7aa040536
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -0,0 +1,1338 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _HTT_H_
19#define _HTT_H_
20
21#include <linux/bug.h>
22
23#include "core.h"
24#include "htc.h"
25#include "rx_desc.h"
26
27#define HTT_CURRENT_VERSION_MAJOR 2
28#define HTT_CURRENT_VERSION_MINOR 1
29
30enum htt_dbg_stats_type {
31 HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
32 HTT_DBG_STATS_RX_REORDER = 1 << 1,
33 HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
34 HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
35 HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
36 /* bits 5-23 currently reserved */
37
38 HTT_DBG_NUM_STATS /* keep this last */
39};
40
41enum htt_h2t_msg_type { /* host-to-target */
42 HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
43 HTT_H2T_MSG_TYPE_TX_FRM = 1,
44 HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
45 HTT_H2T_MSG_TYPE_STATS_REQ = 3,
46 HTT_H2T_MSG_TYPE_SYNC = 4,
47 HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
48 HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
49 HTT_H2T_MSG_TYPE_MGMT_TX = 7,
50
51 HTT_H2T_NUM_MSGS /* keep this last */
52};
53
54struct htt_cmd_hdr {
55 u8 msg_type;
56} __packed;
57
58struct htt_ver_req {
59 u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
60} __packed;
61
62/*
63 * HTT tx MSDU descriptor
64 *
65 * The HTT tx MSDU descriptor is created by the host HTT SW for each
66 * tx MSDU. The HTT tx MSDU descriptor contains the information that
67 * the target firmware needs for the FW's tx processing, particularly
68 * for creating the HW msdu descriptor.
69 * The same HTT tx descriptor is used for HL and LL systems, though
70 * a few fields within the tx descriptor are used only by LL or
71 * only by HL.
72 * The HTT tx descriptor is defined in two manners: by a struct with
73 * bitfields, and by a series of [dword offset, bit mask, bit shift]
74 * definitions.
75 * The target should use the struct def, for simplicitly and clarity,
76 * but the host shall use the bit-mast + bit-shift defs, to be endian-
77 * neutral. Specifically, the host shall use the get/set macros built
78 * around the mask + shift defs.
79 */
80struct htt_data_tx_desc_frag {
81 __le32 paddr;
82 __le32 len;
83} __packed;
84
85enum htt_data_tx_desc_flags0 {
86 HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
87 HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
88 HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
89 HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
90 HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
91#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
92#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
93};
94
95enum htt_data_tx_desc_flags1 {
96#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
97#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
98#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
99#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
100#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
101#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
102 HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
103 HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
104 HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
105 HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
106 HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15
107};
108
109enum htt_data_tx_ext_tid {
110 HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
111 HTT_DATA_TX_EXT_TID_MGMT = 17,
112 HTT_DATA_TX_EXT_TID_INVALID = 31
113};
114
115#define HTT_INVALID_PEERID 0xFFFF
116
117/*
118 * htt_data_tx_desc - used for data tx path
119 *
120 * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
121 * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
122 * for special kinds of tids
123 * postponed: only for HL hosts. indicates if this is a resend
124 * (HL hosts manage queues on the host )
125 * more_in_batch: only for HL hosts. indicates if more packets are
126 * pending. this allows target to wait and aggregate
127 */
128struct htt_data_tx_desc {
129 u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
130 __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
131 __le16 len;
132 __le16 id;
133 __le32 frags_paddr;
134 __le32 peerid;
135 u8 prefetch[0]; /* start of frame, for FW classification engine */
136} __packed;
137
138enum htt_rx_ring_flags {
139 HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
140 HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
141 HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
142 HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
143 HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
144 HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
145 HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
146 HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
147 HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
148 HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
149 HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
150 HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
151 HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
152 HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
153 HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
154 HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
155};
156
157struct htt_rx_ring_setup_ring {
158 __le32 fw_idx_shadow_reg_paddr;
159 __le32 rx_ring_base_paddr;
160 __le16 rx_ring_len; /* in 4-byte words */
161 __le16 rx_ring_bufsize; /* rx skb size - in bytes */
162 __le16 flags; /* %HTT_RX_RING_FLAGS_ */
163 __le16 fw_idx_init_val;
164
165 /* the following offsets are in 4-byte units */
166 __le16 mac80211_hdr_offset;
167 __le16 msdu_payload_offset;
168 __le16 ppdu_start_offset;
169 __le16 ppdu_end_offset;
170 __le16 mpdu_start_offset;
171 __le16 mpdu_end_offset;
172 __le16 msdu_start_offset;
173 __le16 msdu_end_offset;
174 __le16 rx_attention_offset;
175 __le16 frag_info_offset;
176} __packed;
177
178struct htt_rx_ring_setup_hdr {
179 u8 num_rings; /* supported values: 1, 2 */
180 __le16 rsvd0;
181} __packed;
182
183struct htt_rx_ring_setup {
184 struct htt_rx_ring_setup_hdr hdr;
185 struct htt_rx_ring_setup_ring rings[0];
186} __packed;
187
188/*
189 * htt_stats_req - request target to send specified statistics
190 *
191 * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
192 * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
193 * so make sure its little-endian.
194 * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
195 * so make sure its little-endian.
196 * @cfg_val: stat_type specific configuration
197 * @stat_type: see %htt_dbg_stats_type
198 * @cookie_lsb: used for confirmation message from target->host
199 * @cookie_msb: ditto as %cookie
200 */
201struct htt_stats_req {
202 u8 upload_types[3];
203 u8 rsvd0;
204 u8 reset_types[3];
205 struct {
206 u8 mpdu_bytes;
207 u8 mpdu_num_msdus;
208 u8 msdu_bytes;
209 } __packed;
210 u8 stat_type;
211 __le32 cookie_lsb;
212 __le32 cookie_msb;
213} __packed;
214
215#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
216
217/*
218 * htt_oob_sync_req - request out-of-band sync
219 *
220 * The HTT SYNC tells the target to suspend processing of subsequent
221 * HTT host-to-target messages until some other target agent locally
222 * informs the target HTT FW that the current sync counter is equal to
223 * or greater than (in a modulo sense) the sync counter specified in
224 * the SYNC message.
225 *
226 * This allows other host-target components to synchronize their operation
227 * with HTT, e.g. to ensure that tx frames don't get transmitted until a
228 * security key has been downloaded to and activated by the target.
229 * In the absence of any explicit synchronization counter value
230 * specification, the target HTT FW will use zero as the default current
231 * sync value.
232 *
233 * The HTT target FW will suspend its host->target message processing as long
234 * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
235 */
236struct htt_oob_sync_req {
237 u8 sync_count;
238 __le16 rsvd0;
239} __packed;
240
241#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F
242#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB 0
243
244struct htt_aggr_conf {
245 u8 max_num_ampdu_subframes;
246 union {
247 /* dont use bitfields; undefined behaviour */
248 u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */
249 u8 max_num_amsdu_subframes:5;
250 } __packed;
251} __packed;
252
253#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
254
255struct htt_mgmt_tx_desc {
256 u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
257 __le32 msdu_paddr;
258 __le32 desc_id;
259 __le32 len;
260 __le32 vdev_id;
261 u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
262} __packed;
263
264enum htt_mgmt_tx_status {
265 HTT_MGMT_TX_STATUS_OK = 0,
266 HTT_MGMT_TX_STATUS_RETRY = 1,
267 HTT_MGMT_TX_STATUS_DROP = 2
268};
269
270/*=== target -> host messages ===============================================*/
271
272
273enum htt_t2h_msg_type {
274 HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
275 HTT_T2H_MSG_TYPE_RX_IND = 0x1,
276 HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
277 HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
278 HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
279 HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
280 HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
281 HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
282 HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
283 HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
284 HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
285 HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
286 HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
287 HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
288 HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
289 HTT_T2H_MSG_TYPE_TEST,
290 /* keep this last */
291 HTT_T2H_NUM_MSGS
292};
293
294/*
295 * htt_resp_hdr - header for target-to-host messages
296 *
297 * msg_type: see htt_t2h_msg_type
298 */
299struct htt_resp_hdr {
300 u8 msg_type;
301} __packed;
302
303#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
304#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
305#define HTT_RESP_HDR_MSG_TYPE_LSB 0
306
307/* htt_ver_resp - response sent for htt_ver_req */
308struct htt_ver_resp {
309 u8 minor;
310 u8 major;
311 u8 rsvd0;
312} __packed;
313
314struct htt_mgmt_tx_completion {
315 u8 rsvd0;
316 u8 rsvd1;
317 u8 rsvd2;
318 __le32 desc_id;
319 __le32 status;
320} __packed;
321
322#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F)
323#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
324#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6)
325#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
326
327#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
328#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
329#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
330#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
331#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
332#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
333#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
334#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
335#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
336#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
337
338struct htt_rx_indication_hdr {
339 u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
340 __le16 peer_id;
341 __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
342} __packed;
343
344#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
345#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
346#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
347#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
348#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
349#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
350
351#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
352#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
353#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
354#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
355
356#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
357#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
358#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
359#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
360
361enum htt_rx_legacy_rate {
362 HTT_RX_OFDM_48 = 0,
363 HTT_RX_OFDM_24 = 1,
364 HTT_RX_OFDM_12,
365 HTT_RX_OFDM_6,
366 HTT_RX_OFDM_54,
367 HTT_RX_OFDM_36,
368 HTT_RX_OFDM_18,
369 HTT_RX_OFDM_9,
370
371 /* long preamble */
372 HTT_RX_CCK_11_LP = 0,
373 HTT_RX_CCK_5_5_LP = 1,
374 HTT_RX_CCK_2_LP,
375 HTT_RX_CCK_1_LP,
376 /* short preamble */
377 HTT_RX_CCK_11_SP,
378 HTT_RX_CCK_5_5_SP,
379 HTT_RX_CCK_2_SP
380};
381
382enum htt_rx_legacy_rate_type {
383 HTT_RX_LEGACY_RATE_OFDM = 0,
384 HTT_RX_LEGACY_RATE_CCK
385};
386
387enum htt_rx_preamble_type {
388 HTT_RX_LEGACY = 0x4,
389 HTT_RX_HT = 0x8,
390 HTT_RX_HT_WITH_TXBF = 0x9,
391 HTT_RX_VHT = 0xC,
392 HTT_RX_VHT_WITH_TXBF = 0xD,
393};
394
395/*
396 * Fields: phy_err_valid, phy_err_code, tsf,
397 * usec_timestamp, sub_usec_timestamp
398 * ..are valid only if end_valid == 1.
399 *
400 * Fields: rssi_chains, legacy_rate_type,
401 * legacy_rate_cck, preamble_type, service,
402 * vht_sig_*
403 * ..are valid only if start_valid == 1;
404 */
405struct htt_rx_indication_ppdu {
406 u8 combined_rssi;
407 u8 sub_usec_timestamp;
408 u8 phy_err_code;
409 u8 info0; /* HTT_RX_INDICATION_INFO0_ */
410 struct {
411 u8 pri20_db;
412 u8 ext20_db;
413 u8 ext40_db;
414 u8 ext80_db;
415 } __packed rssi_chains[4];
416 __le32 tsf;
417 __le32 usec_timestamp;
418 __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
419 __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
420} __packed;
421
422enum htt_rx_mpdu_status {
423 HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
424 HTT_RX_IND_MPDU_STATUS_OK,
425 HTT_RX_IND_MPDU_STATUS_ERR_FCS,
426 HTT_RX_IND_MPDU_STATUS_ERR_DUP,
427 HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
428 HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
429 /* only accept EAPOL frames */
430 HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
431 HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
432 /* Non-data in promiscous mode */
433 HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
434 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
435 HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
436 HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
437 HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
438 HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
439
440 /*
441 * MISC: discard for unspecified reasons.
442 * Leave this enum value last.
443 */
444 HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
445};
446
447struct htt_rx_indication_mpdu_range {
448 u8 mpdu_count;
449 u8 mpdu_range_status; /* %htt_rx_mpdu_status */
450 u8 pad0;
451 u8 pad1;
452} __packed;
453
454struct htt_rx_indication_prefix {
455 __le16 fw_rx_desc_bytes;
456 u8 pad0;
457 u8 pad1;
458};
459
460struct htt_rx_indication {
461 struct htt_rx_indication_hdr hdr;
462 struct htt_rx_indication_ppdu ppdu;
463 struct htt_rx_indication_prefix prefix;
464
465 /*
466 * the following fields are both dynamically sized, so
467 * take care addressing them
468 */
469
470 /* the size of this is %fw_rx_desc_bytes */
471 struct fw_rx_desc_base fw_desc;
472
473 /*
474 * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
475 * and has %num_mpdu_ranges elements.
476 */
477 struct htt_rx_indication_mpdu_range mpdu_ranges[0];
478} __packed;
479
480static inline struct htt_rx_indication_mpdu_range *
481 htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
482{
483 void *ptr = rx_ind;
484
485 ptr += sizeof(rx_ind->hdr)
486 + sizeof(rx_ind->ppdu)
487 + sizeof(rx_ind->prefix)
488 + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
489 return ptr;
490}
491
492enum htt_rx_flush_mpdu_status {
493 HTT_RX_FLUSH_MPDU_DISCARD = 0,
494 HTT_RX_FLUSH_MPDU_REORDER = 1,
495};
496
497/*
498 * htt_rx_flush - discard or reorder given range of mpdus
499 *
500 * Note: host must check if all sequence numbers between
501 * [seq_num_start, seq_num_end-1] are valid.
502 */
503struct htt_rx_flush {
504 __le16 peer_id;
505 u8 tid;
506 u8 rsvd0;
507 u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
508 u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
509 u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
510};
511
512struct htt_rx_peer_map {
513 u8 vdev_id;
514 __le16 peer_id;
515 u8 addr[6];
516 u8 rsvd0;
517 u8 rsvd1;
518} __packed;
519
520struct htt_rx_peer_unmap {
521 u8 rsvd0;
522 __le16 peer_id;
523} __packed;
524
525enum htt_security_types {
526 HTT_SECURITY_NONE,
527 HTT_SECURITY_WEP128,
528 HTT_SECURITY_WEP104,
529 HTT_SECURITY_WEP40,
530 HTT_SECURITY_TKIP,
531 HTT_SECURITY_TKIP_NOMIC,
532 HTT_SECURITY_AES_CCMP,
533 HTT_SECURITY_WAPI,
534
535 HTT_NUM_SECURITY_TYPES /* keep this last! */
536};
537
538enum htt_security_flags {
539#define HTT_SECURITY_TYPE_MASK 0x7F
540#define HTT_SECURITY_TYPE_LSB 0
541 HTT_SECURITY_IS_UNICAST = 1 << 7
542};
543
544struct htt_security_indication {
545 union {
546 /* dont use bitfields; undefined behaviour */
547 u8 flags; /* %htt_security_flags */
548 struct {
549 u8 security_type:7, /* %htt_security_types */
550 is_unicast:1;
551 } __packed;
552 } __packed;
553 __le16 peer_id;
554 u8 michael_key[8];
555 u8 wapi_rsc[16];
556} __packed;
557
558#define HTT_RX_BA_INFO0_TID_MASK 0x000F
559#define HTT_RX_BA_INFO0_TID_LSB 0
560#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
561#define HTT_RX_BA_INFO0_PEER_ID_LSB 4
562
563struct htt_rx_addba {
564 u8 window_size;
565 __le16 info0; /* %HTT_RX_BA_INFO0_ */
566} __packed;
567
568struct htt_rx_delba {
569 u8 rsvd0;
570 __le16 info0; /* %HTT_RX_BA_INFO0_ */
571} __packed;
572
573enum htt_data_tx_status {
574 HTT_DATA_TX_STATUS_OK = 0,
575 HTT_DATA_TX_STATUS_DISCARD = 1,
576 HTT_DATA_TX_STATUS_NO_ACK = 2,
577 HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */
578 HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
579};
580
581enum htt_data_tx_flags {
582#define HTT_DATA_TX_STATUS_MASK 0x07
583#define HTT_DATA_TX_STATUS_LSB 0
584#define HTT_DATA_TX_TID_MASK 0x78
585#define HTT_DATA_TX_TID_LSB 3
586 HTT_DATA_TX_TID_INVALID = 1 << 7
587};
588
589#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
590
591struct htt_data_tx_completion {
592 union {
593 u8 flags;
594 struct {
595 u8 status:3,
596 tid:4,
597 tid_invalid:1;
598 } __packed;
599 } __packed;
600 u8 num_msdus;
601 u8 rsvd0;
602 __le16 msdus[0]; /* variable length based on %num_msdus */
603} __packed;
604
605struct htt_tx_compl_ind_base {
606 u32 hdr;
607 u16 payload[1/*or more*/];
608} __packed;
609
610struct htt_rc_tx_done_params {
611 u32 rate_code;
612 u32 rate_code_flags;
613 u32 flags;
614 u32 num_enqued; /* 1 for non-AMPDU */
615 u32 num_retries;
616 u32 num_failed; /* for AMPDU */
617 u32 ack_rssi;
618 u32 time_stamp;
619 u32 is_probe;
620};
621
622struct htt_rc_update {
623 u8 vdev_id;
624 __le16 peer_id;
625 u8 addr[6];
626 u8 num_elems;
627 u8 rsvd0;
628 struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
629} __packed;
630
631/* see htt_rx_indication for similar fields and descriptions */
632struct htt_rx_fragment_indication {
633 union {
634 u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
635 struct {
636 u8 ext_tid:5,
637 flush_valid:1;
638 } __packed;
639 } __packed;
640 __le16 peer_id;
641 __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
642 __le16 fw_rx_desc_bytes;
643 __le16 rsvd0;
644
645 u8 fw_msdu_rx_desc[0];
646} __packed;
647
648#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
649#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
650#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
651#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
652
653#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
654#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
655#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
656#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
657
658/*
659 * target -> host test message definition
660 *
661 * The following field definitions describe the format of the test
662 * message sent from the target to the host.
663 * The message consists of a 4-octet header, followed by a variable
664 * number of 32-bit integer values, followed by a variable number
665 * of 8-bit character values.
666 *
667 * |31 16|15 8|7 0|
668 * |-----------------------------------------------------------|
669 * | num chars | num ints | msg type |
670 * |-----------------------------------------------------------|
671 * | int 0 |
672 * |-----------------------------------------------------------|
673 * | int 1 |
674 * |-----------------------------------------------------------|
675 * | ... |
676 * |-----------------------------------------------------------|
677 * | char 3 | char 2 | char 1 | char 0 |
678 * |-----------------------------------------------------------|
679 * | | | ... | char 4 |
680 * |-----------------------------------------------------------|
681 * - MSG_TYPE
682 * Bits 7:0
683 * Purpose: identifies this as a test message
684 * Value: HTT_MSG_TYPE_TEST
685 * - NUM_INTS
686 * Bits 15:8
687 * Purpose: indicate how many 32-bit integers follow the message header
688 * - NUM_CHARS
689 * Bits 31:16
690 * Purpose: indicate how many 8-bit charaters follow the series of integers
691 */
692struct htt_rx_test {
693 u8 num_ints;
694 __le16 num_chars;
695
696 /* payload consists of 2 lists:
697 * a) num_ints * sizeof(__le32)
698 * b) num_chars * sizeof(u8) aligned to 4bytes */
699 u8 payload[0];
700} __packed;
701
702static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
703{
704 return (__le32 *)rx_test->payload;
705}
706
707static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
708{
709 return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
710}
711
712/*
713 * target -> host packet log message
714 *
715 * The following field definitions describe the format of the packet log
716 * message sent from the target to the host.
717 * The message consists of a 4-octet header,followed by a variable number
718 * of 32-bit character values.
719 *
720 * |31 24|23 16|15 8|7 0|
721 * |-----------------------------------------------------------|
722 * | | | | msg type |
723 * |-----------------------------------------------------------|
724 * | payload |
725 * |-----------------------------------------------------------|
726 * - MSG_TYPE
727 * Bits 7:0
728 * Purpose: identifies this as a test message
729 * Value: HTT_MSG_TYPE_PACKETLOG
730 */
731struct htt_pktlog_msg {
732 u8 pad[3];
733 __le32 payload[1 /* or more */];
734} __packed;
735
736struct htt_dbg_stats_rx_reorder_stats {
737 /* Non QoS MPDUs received */
738 __le32 deliver_non_qos;
739
740 /* MPDUs received in-order */
741 __le32 deliver_in_order;
742
743 /* Flush due to reorder timer expired */
744 __le32 deliver_flush_timeout;
745
746 /* Flush due to move out of window */
747 __le32 deliver_flush_oow;
748
749 /* Flush due to DELBA */
750 __le32 deliver_flush_delba;
751
752 /* MPDUs dropped due to FCS error */
753 __le32 fcs_error;
754
755 /* MPDUs dropped due to monitor mode non-data packet */
756 __le32 mgmt_ctrl;
757
758 /* MPDUs dropped due to invalid peer */
759 __le32 invalid_peer;
760
761 /* MPDUs dropped due to duplication (non aggregation) */
762 __le32 dup_non_aggr;
763
764 /* MPDUs dropped due to processed before */
765 __le32 dup_past;
766
767 /* MPDUs dropped due to duplicate in reorder queue */
768 __le32 dup_in_reorder;
769
770 /* Reorder timeout happened */
771 __le32 reorder_timeout;
772
773 /* invalid bar ssn */
774 __le32 invalid_bar_ssn;
775
776 /* reorder reset due to bar ssn */
777 __le32 ssn_reset;
778};
779
780struct htt_dbg_stats_wal_tx_stats {
781 /* Num HTT cookies queued to dispatch list */
782 __le32 comp_queued;
783
784 /* Num HTT cookies dispatched */
785 __le32 comp_delivered;
786
787 /* Num MSDU queued to WAL */
788 __le32 msdu_enqued;
789
790 /* Num MPDU queue to WAL */
791 __le32 mpdu_enqued;
792
793 /* Num MSDUs dropped by WMM limit */
794 __le32 wmm_drop;
795
796 /* Num Local frames queued */
797 __le32 local_enqued;
798
799 /* Num Local frames done */
800 __le32 local_freed;
801
802 /* Num queued to HW */
803 __le32 hw_queued;
804
805 /* Num PPDU reaped from HW */
806 __le32 hw_reaped;
807
808 /* Num underruns */
809 __le32 underrun;
810
811 /* Num PPDUs cleaned up in TX abort */
812 __le32 tx_abort;
813
814 /* Num MPDUs requed by SW */
815 __le32 mpdus_requed;
816
817 /* excessive retries */
818 __le32 tx_ko;
819
820 /* data hw rate code */
821 __le32 data_rc;
822
823 /* Scheduler self triggers */
824 __le32 self_triggers;
825
826 /* frames dropped due to excessive sw retries */
827 __le32 sw_retry_failure;
828
829 /* illegal rate phy errors */
830 __le32 illgl_rate_phy_err;
831
832 /* wal pdev continous xretry */
833 __le32 pdev_cont_xretry;
834
835 /* wal pdev continous xretry */
836 __le32 pdev_tx_timeout;
837
838 /* wal pdev resets */
839 __le32 pdev_resets;
840
841 __le32 phy_underrun;
842
843 /* MPDU is more than txop limit */
844 __le32 txop_ovf;
845} __packed;
846
847struct htt_dbg_stats_wal_rx_stats {
848 /* Cnts any change in ring routing mid-ppdu */
849 __le32 mid_ppdu_route_change;
850
851 /* Total number of statuses processed */
852 __le32 status_rcvd;
853
854 /* Extra frags on rings 0-3 */
855 __le32 r0_frags;
856 __le32 r1_frags;
857 __le32 r2_frags;
858 __le32 r3_frags;
859
860 /* MSDUs / MPDUs delivered to HTT */
861 __le32 htt_msdus;
862 __le32 htt_mpdus;
863
864 /* MSDUs / MPDUs delivered to local stack */
865 __le32 loc_msdus;
866 __le32 loc_mpdus;
867
868 /* AMSDUs that have more MSDUs than the status ring size */
869 __le32 oversize_amsdu;
870
871 /* Number of PHY errors */
872 __le32 phy_errs;
873
874 /* Number of PHY errors drops */
875 __le32 phy_err_drop;
876
877 /* Number of mpdu errors - FCS, MIC, ENC etc. */
878 __le32 mpdu_errs;
879} __packed;
880
881struct htt_dbg_stats_wal_peer_stats {
882 __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
883} __packed;
884
885struct htt_dbg_stats_wal_pdev_txrx {
886 struct htt_dbg_stats_wal_tx_stats tx_stats;
887 struct htt_dbg_stats_wal_rx_stats rx_stats;
888 struct htt_dbg_stats_wal_peer_stats peer_stats;
889} __packed;
890
891struct htt_dbg_stats_rx_rate_info {
892 __le32 mcs[10];
893 __le32 sgi[10];
894 __le32 nss[4];
895 __le32 stbc[10];
896 __le32 bw[3];
897 __le32 pream[6];
898 __le32 ldpc;
899 __le32 txbf;
900};
901
902/*
903 * htt_dbg_stats_status -
904 * present - The requested stats have been delivered in full.
905 * This indicates that either the stats information was contained
906 * in its entirety within this message, or else this message
907 * completes the delivery of the requested stats info that was
908 * partially delivered through earlier STATS_CONF messages.
909 * partial - The requested stats have been delivered in part.
910 * One or more subsequent STATS_CONF messages with the same
911 * cookie value will be sent to deliver the remainder of the
912 * information.
913 * error - The requested stats could not be delivered, for example due
914 * to a shortage of memory to construct a message holding the
915 * requested stats.
916 * invalid - The requested stat type is either not recognized, or the
917 * target is configured to not gather the stats type in question.
918 * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
919 * series_done - This special value indicates that no further stats info
920 * elements are present within a series of stats info elems
921 * (within a stats upload confirmation message).
922 */
923enum htt_dbg_stats_status {
924 HTT_DBG_STATS_STATUS_PRESENT = 0,
925 HTT_DBG_STATS_STATUS_PARTIAL = 1,
926 HTT_DBG_STATS_STATUS_ERROR = 2,
927 HTT_DBG_STATS_STATUS_INVALID = 3,
928 HTT_DBG_STATS_STATUS_SERIES_DONE = 7
929};
930
931/*
932 * target -> host statistics upload
933 *
934 * The following field definitions describe the format of the HTT target
935 * to host stats upload confirmation message.
936 * The message contains a cookie echoed from the HTT host->target stats
937 * upload request, which identifies which request the confirmation is
938 * for, and a series of tag-length-value stats information elements.
939 * The tag-length header for each stats info element also includes a
940 * status field, to indicate whether the request for the stat type in
941 * question was fully met, partially met, unable to be met, or invalid
942 * (if the stat type in question is disabled in the target).
943 * A special value of all 1's in this status field is used to indicate
944 * the end of the series of stats info elements.
945 *
946 *
947 * |31 16|15 8|7 5|4 0|
948 * |------------------------------------------------------------|
949 * | reserved | msg type |
950 * |------------------------------------------------------------|
951 * | cookie LSBs |
952 * |------------------------------------------------------------|
953 * | cookie MSBs |
954 * |------------------------------------------------------------|
955 * | stats entry length | reserved | S |stat type|
956 * |------------------------------------------------------------|
957 * | |
958 * | type-specific stats info |
959 * | |
960 * |------------------------------------------------------------|
961 * | stats entry length | reserved | S |stat type|
962 * |------------------------------------------------------------|
963 * | |
964 * | type-specific stats info |
965 * | |
966 * |------------------------------------------------------------|
967 * | n/a | reserved | 111 | n/a |
968 * |------------------------------------------------------------|
969 * Header fields:
970 * - MSG_TYPE
971 * Bits 7:0
972 * Purpose: identifies this is a statistics upload confirmation message
973 * Value: 0x9
974 * - COOKIE_LSBS
975 * Bits 31:0
976 * Purpose: Provide a mechanism to match a target->host stats confirmation
977 * message with its preceding host->target stats request message.
978 * Value: LSBs of the opaque cookie specified by the host-side requestor
979 * - COOKIE_MSBS
980 * Bits 31:0
981 * Purpose: Provide a mechanism to match a target->host stats confirmation
982 * message with its preceding host->target stats request message.
983 * Value: MSBs of the opaque cookie specified by the host-side requestor
984 *
985 * Stats Information Element tag-length header fields:
986 * - STAT_TYPE
987 * Bits 4:0
988 * Purpose: identifies the type of statistics info held in the
989 * following information element
990 * Value: htt_dbg_stats_type
991 * - STATUS
992 * Bits 7:5
993 * Purpose: indicate whether the requested stats are present
994 * Value: htt_dbg_stats_status, including a special value (0x7) to mark
995 * the completion of the stats entry series
996 * - LENGTH
997 * Bits 31:16
998 * Purpose: indicate the stats information size
999 * Value: This field specifies the number of bytes of stats information
1000 * that follows the element tag-length header.
1001 * It is expected but not required that this length is a multiple of
1002 * 4 bytes. Even if the length is not an integer multiple of 4, the
1003 * subsequent stats entry header will begin on a 4-byte aligned
1004 * boundary.
1005 */
1006
1007#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
1008#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0
1009#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0
1010#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5
1011
1012struct htt_stats_conf_item {
1013 union {
1014 u8 info;
1015 struct {
1016 u8 stat_type:5; /* %HTT_DBG_STATS_ */
1017 u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
1018 } __packed;
1019 } __packed;
1020 u8 pad;
1021 __le16 length;
1022 u8 payload[0]; /* roundup(length, 4) long */
1023} __packed;
1024
1025struct htt_stats_conf {
1026 u8 pad[3];
1027 __le32 cookie_lsb;
1028 __le32 cookie_msb;
1029
1030 /* each item has variable length! */
1031 struct htt_stats_conf_item items[0];
1032} __packed;
1033
1034static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
1035 const struct htt_stats_conf_item *item)
1036{
1037 return (void *)item + sizeof(*item) + roundup(item->length, 4);
1038}
1039/*
1040 * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
1041 *
1042 * The following field definitions describe the format of the HTT host
1043 * to target frag_desc/msdu_ext bank configuration message.
1044 * The message contains the based address and the min and max id of the
1045 * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
1046 * MSDU_EXT/FRAG_DESC.
1047 * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
1048 * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
1049 * the hardware does the mapping/translation.
1050 *
1051 * Total banks that can be configured is configured to 16.
1052 *
1053 * This should be called before any TX has be initiated by the HTT
1054 *
1055 * |31 16|15 8|7 5|4 0|
1056 * |------------------------------------------------------------|
1057 * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
1058 * |------------------------------------------------------------|
1059 * | BANK0_BASE_ADDRESS |
1060 * |------------------------------------------------------------|
1061 * | ... |
1062 * |------------------------------------------------------------|
1063 * | BANK15_BASE_ADDRESS |
1064 * |------------------------------------------------------------|
1065 * | BANK0_MAX_ID | BANK0_MIN_ID |
1066 * |------------------------------------------------------------|
1067 * | ... |
1068 * |------------------------------------------------------------|
1069 * | BANK15_MAX_ID | BANK15_MIN_ID |
1070 * |------------------------------------------------------------|
1071 * Header fields:
1072 * - MSG_TYPE
1073 * Bits 7:0
1074 * Value: 0x6
1075 * - BANKx_BASE_ADDRESS
1076 * Bits 31:0
1077 * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
1078 * bank physical/bus address.
1079 * - BANKx_MIN_ID
1080 * Bits 15:0
1081 * Purpose: Provide a mechanism to specify the min index that needs to
1082 * mapped.
1083 * - BANKx_MAX_ID
1084 * Bits 31:16
1085 * Purpose: Provide a mechanism to specify the max index that needs to
1086 *
1087 */
1088struct htt_frag_desc_bank_id {
1089 __le16 bank_min_id;
1090 __le16 bank_max_id;
1091} __packed;
1092
1093/* real is 16 but it wouldn't fit in the max htt message size
1094 * so we use a conservatively safe value for now */
1095#define HTT_FRAG_DESC_BANK_MAX 4
1096
1097#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
1098#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
1099#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2)
1100
1101struct htt_frag_desc_bank_cfg {
1102 u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1103 u8 num_banks;
1104 u8 desc_size;
1105 __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
1106 struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
1107} __packed;
1108
1109union htt_rx_pn_t {
1110 /* WEP: 24-bit PN */
1111 u32 pn24;
1112
1113 /* TKIP or CCMP: 48-bit PN */
1114 u_int64_t pn48;
1115
1116 /* WAPI: 128-bit PN */
1117 u_int64_t pn128[2];
1118};
1119
1120struct htt_cmd {
1121 struct htt_cmd_hdr hdr;
1122 union {
1123 struct htt_ver_req ver_req;
1124 struct htt_mgmt_tx_desc mgmt_tx;
1125 struct htt_data_tx_desc data_tx;
1126 struct htt_rx_ring_setup rx_setup;
1127 struct htt_stats_req stats_req;
1128 struct htt_oob_sync_req oob_sync_req;
1129 struct htt_aggr_conf aggr_conf;
1130 struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
1131 };
1132} __packed;
1133
1134struct htt_resp {
1135 struct htt_resp_hdr hdr;
1136 union {
1137 struct htt_ver_resp ver_resp;
1138 struct htt_mgmt_tx_completion mgmt_tx_completion;
1139 struct htt_data_tx_completion data_tx_completion;
1140 struct htt_rx_indication rx_ind;
1141 struct htt_rx_fragment_indication rx_frag_ind;
1142 struct htt_rx_peer_map peer_map;
1143 struct htt_rx_peer_unmap peer_unmap;
1144 struct htt_rx_flush rx_flush;
1145 struct htt_rx_addba rx_addba;
1146 struct htt_rx_delba rx_delba;
1147 struct htt_security_indication security_indication;
1148 struct htt_rc_update rc_update;
1149 struct htt_rx_test rx_test;
1150 struct htt_pktlog_msg pktlog_msg;
1151 struct htt_stats_conf stats_conf;
1152 };
1153} __packed;
1154
1155
1156/*** host side structures follow ***/
1157
1158struct htt_tx_done {
1159 u32 msdu_id;
1160 bool discard;
1161 bool no_ack;
1162};
1163
1164struct htt_peer_map_event {
1165 u8 vdev_id;
1166 u16 peer_id;
1167 u8 addr[ETH_ALEN];
1168};
1169
1170struct htt_peer_unmap_event {
1171 u16 peer_id;
1172};
1173
1174struct htt_rx_info {
1175 struct sk_buff *skb;
1176 enum htt_rx_mpdu_status status;
1177 enum htt_rx_mpdu_encrypt_type encrypt_type;
1178 s8 signal;
1179 struct {
1180 u8 info0;
1181 u32 info1;
1182 u32 info2;
1183 } rate;
1184 bool fcs_err;
1185};
1186
1187struct ath10k_htt {
1188 struct ath10k *ar;
1189 enum ath10k_htc_ep_id eid;
1190
1191 int max_throughput_mbps;
1192 u8 target_version_major;
1193 u8 target_version_minor;
1194 struct completion target_version_received;
1195
1196 struct {
1197 /*
1198 * Ring of network buffer objects - This ring is
1199 * used exclusively by the host SW. This ring
1200 * mirrors the dev_addrs_ring that is shared
1201 * between the host SW and the MAC HW. The host SW
1202 * uses this netbufs ring to locate the network
1203 * buffer objects whose data buffers the HW has
1204 * filled.
1205 */
1206 struct sk_buff **netbufs_ring;
1207 /*
1208 * Ring of buffer addresses -
1209 * This ring holds the "physical" device address of the
1210 * rx buffers the host SW provides for the MAC HW to
1211 * fill.
1212 */
1213 __le32 *paddrs_ring;
1214
1215 /*
1216 * Base address of ring, as a "physical" device address
1217 * rather than a CPU address.
1218 */
1219 dma_addr_t base_paddr;
1220
1221 /* how many elems in the ring (power of 2) */
1222 int size;
1223
1224 /* size - 1 */
1225 unsigned size_mask;
1226
1227 /* how many rx buffers to keep in the ring */
1228 int fill_level;
1229
1230 /* how many rx buffers (full+empty) are in the ring */
1231 int fill_cnt;
1232
1233 /*
1234 * alloc_idx - where HTT SW has deposited empty buffers
1235 * This is allocated in consistent mem, so that the FW can
1236 * read this variable, and program the HW's FW_IDX reg with
1237 * the value of this shadow register.
1238 */
1239 struct {
1240 __le32 *vaddr;
1241 dma_addr_t paddr;
1242 } alloc_idx;
1243
1244 /* where HTT SW has processed bufs filled by rx MAC DMA */
1245 struct {
1246 unsigned msdu_payld;
1247 } sw_rd_idx;
1248
1249 /*
1250 * refill_retry_timer - timer triggered when the ring is
1251 * not refilled to the level expected
1252 */
1253 struct timer_list refill_retry_timer;
1254
1255 /* Protects access to all rx ring buffer state variables */
1256 spinlock_t lock;
1257 } rx_ring;
1258
1259 unsigned int prefetch_len;
1260
1261 /* Protects access to %pending_tx, %used_msdu_ids */
1262 spinlock_t tx_lock;
1263 int max_num_pending_tx;
1264 int num_pending_tx;
1265 struct sk_buff **pending_tx;
1266 unsigned long *used_msdu_ids; /* bitmap */
1267 wait_queue_head_t empty_tx_wq;
1268
1269 /* set if host-fw communication goes haywire
1270 * used to avoid further failures */
1271 bool rx_confused;
1272};
1273
1274#define RX_HTT_HDR_STATUS_LEN 64
1275
1276/* This structure layout is programmed via rx ring setup
1277 * so that FW knows how to transfer the rx descriptor to the host.
1278 * Buffers like this are placed on the rx ring. */
1279struct htt_rx_desc {
1280 union {
1281 /* This field is filled on the host using the msdu buffer
1282 * from htt_rx_indication */
1283 struct fw_rx_desc_base fw_desc;
1284 u32 pad;
1285 } __packed;
1286 struct {
1287 struct rx_attention attention;
1288 struct rx_frag_info frag_info;
1289 struct rx_mpdu_start mpdu_start;
1290 struct rx_msdu_start msdu_start;
1291 struct rx_msdu_end msdu_end;
1292 struct rx_mpdu_end mpdu_end;
1293 struct rx_ppdu_start ppdu_start;
1294 struct rx_ppdu_end ppdu_end;
1295 } __packed;
1296 u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
1297 u8 msdu_payload[0];
1298};
1299
1300#define HTT_RX_DESC_ALIGN 8
1301
1302#define HTT_MAC_ADDR_LEN 6
1303
1304/*
1305 * FIX THIS
1306 * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
1307 * rounded up to a cache line size.
1308 */
1309#define HTT_RX_BUF_SIZE 1920
1310#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
1311
1312/*
1313 * DMA_MAP expects the buffer to be an integral number of cache lines.
1314 * Rather than checking the actual cache line size, this code makes a
1315 * conservative estimate of what the cache line size could be.
1316 */
1317#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
1318#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
1319
1320struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar);
1321int ath10k_htt_attach_target(struct ath10k_htt *htt);
1322void ath10k_htt_detach(struct ath10k_htt *htt);
1323
1324int ath10k_htt_tx_attach(struct ath10k_htt *htt);
1325void ath10k_htt_tx_detach(struct ath10k_htt *htt);
1326int ath10k_htt_rx_attach(struct ath10k_htt *htt);
1327void ath10k_htt_rx_detach(struct ath10k_htt *htt);
1328void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
1329void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
1330int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
1331int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
1332
1333void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
1334int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
1335void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
1336int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
1337int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
1338#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
new file mode 100644
index 000000000000..de058d7adca8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -0,0 +1,1167 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "htc.h"
19#include "htt.h"
20#include "txrx.h"
21#include "debug.h"
22
23#include <linux/log2.h>
24
25/* slightly larger than one large A-MPDU */
26#define HTT_RX_RING_SIZE_MIN 128
27
28/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
29#define HTT_RX_RING_SIZE_MAX 2048
30
31#define HTT_RX_AVG_FRM_BYTES 1000
32
33/* ms, very conservative */
34#define HTT_RX_HOST_LATENCY_MAX_MS 20
35
36/* ms, conservative */
37#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
38
39/* when under memory pressure rx ring refill may fail and needs a retry */
40#define HTT_RX_RING_REFILL_RETRY_MS 50
41
42static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
43{
44 int size;
45
46 /*
47 * It is expected that the host CPU will typically be able to
48 * service the rx indication from one A-MPDU before the rx
49 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
50 * later. However, the rx ring should be sized very conservatively,
51 * to accomodate the worst reasonable delay before the host CPU
52 * services a rx indication interrupt.
53 *
54 * The rx ring need not be kept full of empty buffers. In theory,
55 * the htt host SW can dynamically track the low-water mark in the
56 * rx ring, and dynamically adjust the level to which the rx ring
57 * is filled with empty buffers, to dynamically meet the desired
58 * low-water mark.
59 *
60 * In contrast, it's difficult to resize the rx ring itself, once
61 * it's in use. Thus, the ring itself should be sized very
62 * conservatively, while the degree to which the ring is filled
63 * with empty buffers should be sized moderately conservatively.
64 */
65
66 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
67 size =
68 htt->max_throughput_mbps +
69 1000 /
70 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
71
72 if (size < HTT_RX_RING_SIZE_MIN)
73 size = HTT_RX_RING_SIZE_MIN;
74
75 if (size > HTT_RX_RING_SIZE_MAX)
76 size = HTT_RX_RING_SIZE_MAX;
77
78 size = roundup_pow_of_two(size);
79
80 return size;
81}
82
83static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
84{
85 int size;
86
87 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
88 size =
89 htt->max_throughput_mbps *
90 1000 /
91 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
92
93 /*
94 * Make sure the fill level is at least 1 less than the ring size.
95 * Leaving 1 element empty allows the SW to easily distinguish
96 * between a full ring vs. an empty ring.
97 */
98 if (size >= htt->rx_ring.size)
99 size = htt->rx_ring.size - 1;
100
101 return size;
102}
103
104static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
105{
106 struct sk_buff *skb;
107 struct ath10k_skb_cb *cb;
108 int i;
109
110 for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
111 skb = htt->rx_ring.netbufs_ring[i];
112 cb = ATH10K_SKB_CB(skb);
113 dma_unmap_single(htt->ar->dev, cb->paddr,
114 skb->len + skb_tailroom(skb),
115 DMA_FROM_DEVICE);
116 dev_kfree_skb_any(skb);
117 }
118
119 htt->rx_ring.fill_cnt = 0;
120}
121
122static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
123{
124 struct htt_rx_desc *rx_desc;
125 struct sk_buff *skb;
126 dma_addr_t paddr;
127 int ret = 0, idx;
128
129 idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
130 while (num > 0) {
131 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
132 if (!skb) {
133 ret = -ENOMEM;
134 goto fail;
135 }
136
137 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
138 skb_pull(skb,
139 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
140 skb->data);
141
142 /* Clear rx_desc attention word before posting to Rx ring */
143 rx_desc = (struct htt_rx_desc *)skb->data;
144 rx_desc->attention.flags = __cpu_to_le32(0);
145
146 paddr = dma_map_single(htt->ar->dev, skb->data,
147 skb->len + skb_tailroom(skb),
148 DMA_FROM_DEVICE);
149
150 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
151 dev_kfree_skb_any(skb);
152 ret = -ENOMEM;
153 goto fail;
154 }
155
156 ATH10K_SKB_CB(skb)->paddr = paddr;
157 htt->rx_ring.netbufs_ring[idx] = skb;
158 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
159 htt->rx_ring.fill_cnt++;
160
161 num--;
162 idx++;
163 idx &= htt->rx_ring.size_mask;
164 }
165
166fail:
167 *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
168 return ret;
169}
170
171static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
172{
173 lockdep_assert_held(&htt->rx_ring.lock);
174 return __ath10k_htt_rx_ring_fill_n(htt, num);
175}
176
177static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
178{
179 int ret, num_to_fill;
180
181 spin_lock_bh(&htt->rx_ring.lock);
182 num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
183 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
184 if (ret == -ENOMEM) {
185 /*
186 * Failed to fill it to the desired level -
187 * we'll start a timer and try again next time.
188 * As long as enough buffers are left in the ring for
189 * another A-MPDU rx, no special recovery is needed.
190 */
191 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
192 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
193 }
194 spin_unlock_bh(&htt->rx_ring.lock);
195}
196
197static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
198{
199 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
200 ath10k_htt_rx_msdu_buff_replenish(htt);
201}
202
203static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
204{
205 return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
206 htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
207}
208
209void ath10k_htt_rx_detach(struct ath10k_htt *htt)
210{
211 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
212
213 del_timer_sync(&htt->rx_ring.refill_retry_timer);
214
215 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
216 struct sk_buff *skb =
217 htt->rx_ring.netbufs_ring[sw_rd_idx];
218 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
219
220 dma_unmap_single(htt->ar->dev, cb->paddr,
221 skb->len + skb_tailroom(skb),
222 DMA_FROM_DEVICE);
223 dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
224 sw_rd_idx++;
225 sw_rd_idx &= htt->rx_ring.size_mask;
226 }
227
228 dma_free_coherent(htt->ar->dev,
229 (htt->rx_ring.size *
230 sizeof(htt->rx_ring.paddrs_ring)),
231 htt->rx_ring.paddrs_ring,
232 htt->rx_ring.base_paddr);
233
234 dma_free_coherent(htt->ar->dev,
235 sizeof(*htt->rx_ring.alloc_idx.vaddr),
236 htt->rx_ring.alloc_idx.vaddr,
237 htt->rx_ring.alloc_idx.paddr);
238
239 kfree(htt->rx_ring.netbufs_ring);
240}
241
242static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
243{
244 int idx;
245 struct sk_buff *msdu;
246
247 spin_lock_bh(&htt->rx_ring.lock);
248
249 if (ath10k_htt_rx_ring_elems(htt) == 0)
250 ath10k_warn("htt rx ring is empty!\n");
251
252 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
253 msdu = htt->rx_ring.netbufs_ring[idx];
254
255 idx++;
256 idx &= htt->rx_ring.size_mask;
257 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
258 htt->rx_ring.fill_cnt--;
259
260 spin_unlock_bh(&htt->rx_ring.lock);
261 return msdu;
262}
263
264static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
265{
266 struct sk_buff *next;
267
268 while (skb) {
269 next = skb->next;
270 dev_kfree_skb_any(skb);
271 skb = next;
272 }
273}
274
275static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
276 u8 **fw_desc, int *fw_desc_len,
277 struct sk_buff **head_msdu,
278 struct sk_buff **tail_msdu)
279{
280 int msdu_len, msdu_chaining = 0;
281 struct sk_buff *msdu;
282 struct htt_rx_desc *rx_desc;
283
284 if (ath10k_htt_rx_ring_elems(htt) == 0)
285 ath10k_warn("htt rx ring is empty!\n");
286
287 if (htt->rx_confused) {
288 ath10k_warn("htt is confused. refusing rx\n");
289 return 0;
290 }
291
292 msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
293 while (msdu) {
294 int last_msdu, msdu_len_invalid, msdu_chained;
295
296 dma_unmap_single(htt->ar->dev,
297 ATH10K_SKB_CB(msdu)->paddr,
298 msdu->len + skb_tailroom(msdu),
299 DMA_FROM_DEVICE);
300
301 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
302 msdu->data, msdu->len + skb_tailroom(msdu));
303
304 rx_desc = (struct htt_rx_desc *)msdu->data;
305
306 /* FIXME: we must report msdu payload since this is what caller
307 * expects now */
308 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
309 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
310
311 /*
312 * Sanity check - confirm the HW is finished filling in the
313 * rx data.
314 * If the HW and SW are working correctly, then it's guaranteed
315 * that the HW's MAC DMA is done before this point in the SW.
316 * To prevent the case that we handle a stale Rx descriptor,
317 * just assert for now until we have a way to recover.
318 */
319 if (!(__le32_to_cpu(rx_desc->attention.flags)
320 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
321 ath10k_htt_rx_free_msdu_chain(*head_msdu);
322 *head_msdu = NULL;
323 msdu = NULL;
324 ath10k_err("htt rx stopped. cannot recover\n");
325 htt->rx_confused = true;
326 break;
327 }
328
329 /*
330 * Copy the FW rx descriptor for this MSDU from the rx
331 * indication message into the MSDU's netbuf. HL uses the
332 * same rx indication message definition as LL, and simply
333 * appends new info (fields from the HW rx desc, and the
334 * MSDU payload itself). So, the offset into the rx
335 * indication message only has to account for the standard
336 * offset of the per-MSDU FW rx desc info within the
337 * message, and how many bytes of the per-MSDU FW rx desc
338 * info have already been consumed. (And the endianness of
339 * the host, since for a big-endian host, the rx ind
340 * message contents, including the per-MSDU rx desc bytes,
341 * were byteswapped during upload.)
342 */
343 if (*fw_desc_len > 0) {
344 rx_desc->fw_desc.info0 = **fw_desc;
345 /*
346 * The target is expected to only provide the basic
347 * per-MSDU rx descriptors. Just to be sure, verify
348 * that the target has not attached extension data
349 * (e.g. LRO flow ID).
350 */
351
352 /* or more, if there's extension data */
353 (*fw_desc)++;
354 (*fw_desc_len)--;
355 } else {
356 /*
357 * When an oversized AMSDU happened, FW will lost
358 * some of MSDU status - in this case, the FW
359 * descriptors provided will be less than the
360 * actual MSDUs inside this MPDU. Mark the FW
361 * descriptors so that it will still deliver to
362 * upper stack, if no CRC error for this MPDU.
363 *
364 * FIX THIS - the FW descriptors are actually for
365 * MSDUs in the end of this A-MSDU instead of the
366 * beginning.
367 */
368 rx_desc->fw_desc.info0 = 0;
369 }
370
371 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
372 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
373 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
374 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
375 RX_MSDU_START_INFO0_MSDU_LENGTH);
376 msdu_chained = rx_desc->frag_info.ring2_more_count;
377
378 if (msdu_len_invalid)
379 msdu_len = 0;
380
381 skb_trim(msdu, 0);
382 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
383 msdu_len -= msdu->len;
384
385 /* FIXME: Do chained buffers include htt_rx_desc or not? */
386 while (msdu_chained--) {
387 struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
388
389 dma_unmap_single(htt->ar->dev,
390 ATH10K_SKB_CB(next)->paddr,
391 next->len + skb_tailroom(next),
392 DMA_FROM_DEVICE);
393
394 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
395 next->data,
396 next->len + skb_tailroom(next));
397
398 skb_trim(next, 0);
399 skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
400 msdu_len -= next->len;
401
402 msdu->next = next;
403 msdu = next;
404 msdu_chaining = 1;
405 }
406
407 if (msdu_len > 0) {
408 /* This may suggest FW bug? */
409 ath10k_warn("htt rx msdu len not consumed (%d)\n",
410 msdu_len);
411 }
412
413 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
414 RX_MSDU_END_INFO0_LAST_MSDU;
415
416 if (last_msdu) {
417 msdu->next = NULL;
418 break;
419 } else {
420 struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
421 msdu->next = next;
422 msdu = next;
423 }
424 }
425 *tail_msdu = msdu;
426
427 /*
428 * Don't refill the ring yet.
429 *
430 * First, the elements popped here are still in use - it is not
431 * safe to overwrite them until the matching call to
432 * mpdu_desc_list_next. Second, for efficiency it is preferable to
433 * refill the rx ring with 1 PPDU's worth of rx buffers (something
434 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
435 * (something like 3 buffers). Consequently, we'll rely on the txrx
436 * SW to tell us when it is done pulling all the PPDU's rx buffers
437 * out of the rx ring, and then refill it just once.
438 */
439
440 return msdu_chaining;
441}
442
443int ath10k_htt_rx_attach(struct ath10k_htt *htt)
444{
445 dma_addr_t paddr;
446 void *vaddr;
447 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
448
449 htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
450 if (!is_power_of_2(htt->rx_ring.size)) {
451 ath10k_warn("htt rx ring size is not power of 2\n");
452 return -EINVAL;
453 }
454
455 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
456
457 /*
458 * Set the initial value for the level to which the rx ring
459 * should be filled, based on the max throughput and the
460 * worst likely latency for the host to fill the rx ring
461 * with new buffers. In theory, this fill level can be
462 * dynamically adjusted from the initial value set here, to
463 * reflect the actual host latency rather than a
464 * conservative assumption about the host latency.
465 */
466 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
467
468 htt->rx_ring.netbufs_ring =
469 kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
470 GFP_KERNEL);
471 if (!htt->rx_ring.netbufs_ring)
472 goto err_netbuf;
473
474 vaddr = dma_alloc_coherent(htt->ar->dev,
475 (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
476 &paddr, GFP_DMA);
477 if (!vaddr)
478 goto err_dma_ring;
479
480 htt->rx_ring.paddrs_ring = vaddr;
481 htt->rx_ring.base_paddr = paddr;
482
483 vaddr = dma_alloc_coherent(htt->ar->dev,
484 sizeof(*htt->rx_ring.alloc_idx.vaddr),
485 &paddr, GFP_DMA);
486 if (!vaddr)
487 goto err_dma_idx;
488
489 htt->rx_ring.alloc_idx.vaddr = vaddr;
490 htt->rx_ring.alloc_idx.paddr = paddr;
491 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
492 *htt->rx_ring.alloc_idx.vaddr = 0;
493
494 /* Initialize the Rx refill retry timer */
495 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
496
497 spin_lock_init(&htt->rx_ring.lock);
498
499 htt->rx_ring.fill_cnt = 0;
500 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
501 goto err_fill_ring;
502
503 ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
504 htt->rx_ring.size, htt->rx_ring.fill_level);
505 return 0;
506
507err_fill_ring:
508 ath10k_htt_rx_ring_free(htt);
509 dma_free_coherent(htt->ar->dev,
510 sizeof(*htt->rx_ring.alloc_idx.vaddr),
511 htt->rx_ring.alloc_idx.vaddr,
512 htt->rx_ring.alloc_idx.paddr);
513err_dma_idx:
514 dma_free_coherent(htt->ar->dev,
515 (htt->rx_ring.size *
516 sizeof(htt->rx_ring.paddrs_ring)),
517 htt->rx_ring.paddrs_ring,
518 htt->rx_ring.base_paddr);
519err_dma_ring:
520 kfree(htt->rx_ring.netbufs_ring);
521err_netbuf:
522 return -ENOMEM;
523}
524
525static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
526{
527 switch (type) {
528 case HTT_RX_MPDU_ENCRYPT_WEP40:
529 case HTT_RX_MPDU_ENCRYPT_WEP104:
530 return 4;
531 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
532 case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
533 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
534 case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
535 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
536 return 8;
537 case HTT_RX_MPDU_ENCRYPT_NONE:
538 return 0;
539 }
540
541 ath10k_warn("unknown encryption type %d\n", type);
542 return 0;
543}
544
545static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
546{
547 switch (type) {
548 case HTT_RX_MPDU_ENCRYPT_NONE:
549 case HTT_RX_MPDU_ENCRYPT_WEP40:
550 case HTT_RX_MPDU_ENCRYPT_WEP104:
551 case HTT_RX_MPDU_ENCRYPT_WEP128:
552 case HTT_RX_MPDU_ENCRYPT_WAPI:
553 return 0;
554 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
555 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
556 return 4;
557 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
558 return 8;
559 }
560
561 ath10k_warn("unknown encryption type %d\n", type);
562 return 0;
563}
564
565/* Applies for first msdu in chain, before altering it. */
566static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
567{
568 struct htt_rx_desc *rxd;
569 enum rx_msdu_decap_format fmt;
570
571 rxd = (void *)skb->data - sizeof(*rxd);
572 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
573 RX_MSDU_START_INFO1_DECAP_FORMAT);
574
575 if (fmt == RX_MSDU_DECAP_RAW)
576 return (void *)skb->data;
577 else
578 return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
579}
580
581/* This function only applies for first msdu in an msdu chain */
582static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
583{
584 if (ieee80211_is_data_qos(hdr->frame_control)) {
585 u8 *qc = ieee80211_get_qos_ctl(hdr);
586 if (qc[0] & 0x80)
587 return true;
588 }
589 return false;
590}
591
592static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
593 struct htt_rx_info *info)
594{
595 struct htt_rx_desc *rxd;
596 struct sk_buff *amsdu;
597 struct sk_buff *first;
598 struct ieee80211_hdr *hdr;
599 struct sk_buff *skb = info->skb;
600 enum rx_msdu_decap_format fmt;
601 enum htt_rx_mpdu_encrypt_type enctype;
602 unsigned int hdr_len;
603 int crypto_len;
604
605 rxd = (void *)skb->data - sizeof(*rxd);
606 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
607 RX_MSDU_START_INFO1_DECAP_FORMAT);
608 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
609 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
610
611 /* FIXME: No idea what assumptions are safe here. Need logs */
612 if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
613 (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
614 ath10k_htt_rx_free_msdu_chain(skb->next);
615 skb->next = NULL;
616 return -ENOTSUPP;
617 }
618
619 /* A-MSDU max is a little less than 8K */
620 amsdu = dev_alloc_skb(8*1024);
621 if (!amsdu) {
622 ath10k_warn("A-MSDU allocation failed\n");
623 ath10k_htt_rx_free_msdu_chain(skb->next);
624 skb->next = NULL;
625 return -ENOMEM;
626 }
627
628 if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
629 int hdrlen;
630
631 hdr = (void *)rxd->rx_hdr_status;
632 hdrlen = ieee80211_hdrlen(hdr->frame_control);
633 memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
634 }
635
636 first = skb;
637 while (skb) {
638 void *decap_hdr;
639 int decap_len = 0;
640
641 rxd = (void *)skb->data - sizeof(*rxd);
642 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
643 RX_MSDU_START_INFO1_DECAP_FORMAT);
644 decap_hdr = (void *)rxd->rx_hdr_status;
645
646 if (skb == first) {
647 /* We receive linked A-MSDU subframe skbuffs. The
648 * first one contains the original 802.11 header (and
649 * possible crypto param) in the RX descriptor. The
650 * A-MSDU subframe header follows that. Each part is
651 * aligned to 4 byte boundary. */
652
653 hdr = (void *)amsdu->data;
654 hdr_len = ieee80211_hdrlen(hdr->frame_control);
655 crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
656
657 decap_hdr += roundup(hdr_len, 4);
658 decap_hdr += roundup(crypto_len, 4);
659 }
660
661 if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
662 /* Ethernet2 decap inserts ethernet header in place of
663 * A-MSDU subframe header. */
664 skb_pull(skb, 6 + 6 + 2);
665
666 /* A-MSDU subframe header length */
667 decap_len += 6 + 6 + 2;
668
669 /* Ethernet2 decap also strips the LLC/SNAP so we need
670 * to re-insert it. The LLC/SNAP follows A-MSDU
671 * subframe header. */
672 /* FIXME: Not all LLCs are 8 bytes long */
673 decap_len += 8;
674
675 memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
676 }
677
678 if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
679 /* Native Wifi decap inserts regular 802.11 header
680 * in place of A-MSDU subframe header. */
681 hdr = (struct ieee80211_hdr *)skb->data;
682 skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
683
684 /* A-MSDU subframe header length */
685 decap_len += 6 + 6 + 2;
686
687 memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
688 }
689
690 if (fmt == RX_MSDU_DECAP_RAW)
691 skb_trim(skb, skb->len - 4); /* remove FCS */
692
693 memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
694
695 /* A-MSDU subframes are padded to 4bytes
696 * but relative to first subframe, not the whole MPDU */
697 if (skb->next && ((decap_len + skb->len) & 3)) {
698 int padlen = 4 - ((decap_len + skb->len) & 3);
699 memset(skb_put(amsdu, padlen), 0, padlen);
700 }
701
702 skb = skb->next;
703 }
704
705 info->skb = amsdu;
706 info->encrypt_type = enctype;
707
708 ath10k_htt_rx_free_msdu_chain(first);
709
710 return 0;
711}
712
713static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
714{
715 struct sk_buff *skb = info->skb;
716 struct htt_rx_desc *rxd;
717 struct ieee80211_hdr *hdr;
718 enum rx_msdu_decap_format fmt;
719 enum htt_rx_mpdu_encrypt_type enctype;
720
721 /* This shouldn't happen. If it does than it may be a FW bug. */
722 if (skb->next) {
723 ath10k_warn("received chained non A-MSDU frame\n");
724 ath10k_htt_rx_free_msdu_chain(skb->next);
725 skb->next = NULL;
726 }
727
728 rxd = (void *)skb->data - sizeof(*rxd);
729 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
730 RX_MSDU_START_INFO1_DECAP_FORMAT);
731 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
732 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
733 hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
734
735 switch (fmt) {
736 case RX_MSDU_DECAP_RAW:
737 /* remove trailing FCS */
738 skb_trim(skb, skb->len - 4);
739 break;
740 case RX_MSDU_DECAP_NATIVE_WIFI:
741 /* nothing to do here */
742 break;
743 case RX_MSDU_DECAP_ETHERNET2_DIX:
744 /* macaddr[6] + macaddr[6] + ethertype[2] */
745 skb_pull(skb, 6 + 6 + 2);
746 break;
747 case RX_MSDU_DECAP_8023_SNAP_LLC:
748 /* macaddr[6] + macaddr[6] + len[2] */
749 /* we don't need this for non-A-MSDU */
750 skb_pull(skb, 6 + 6 + 2);
751 break;
752 }
753
754 if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
755 void *llc;
756 int llclen;
757
758 llclen = 8;
759 llc = hdr;
760 llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
761 llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
762
763 skb_push(skb, llclen);
764 memcpy(skb->data, llc, llclen);
765 }
766
767 if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
768 int len = ieee80211_hdrlen(hdr->frame_control);
769 skb_push(skb, len);
770 memcpy(skb->data, hdr, len);
771 }
772
773 info->skb = skb;
774 info->encrypt_type = enctype;
775 return 0;
776}
777
778static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
779{
780 struct htt_rx_desc *rxd;
781 u32 flags;
782
783 rxd = (void *)skb->data - sizeof(*rxd);
784 flags = __le32_to_cpu(rxd->attention.flags);
785
786 if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
787 return true;
788
789 return false;
790}
791
792static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
793{
794 struct htt_rx_desc *rxd;
795 u32 flags;
796
797 rxd = (void *)skb->data - sizeof(*rxd);
798 flags = __le32_to_cpu(rxd->attention.flags);
799
800 if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
801 return true;
802
803 return false;
804}
805
806static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
807 struct htt_rx_indication *rx)
808{
809 struct htt_rx_info info;
810 struct htt_rx_indication_mpdu_range *mpdu_ranges;
811 struct ieee80211_hdr *hdr;
812 int num_mpdu_ranges;
813 int fw_desc_len;
814 u8 *fw_desc;
815 int i, j;
816 int ret;
817
818 memset(&info, 0, sizeof(info));
819
820 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
821 fw_desc = (u8 *)&rx->fw_desc;
822
823 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
824 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
825 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
826
827 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
828 rx, sizeof(*rx) +
829 (sizeof(struct htt_rx_indication_mpdu_range) *
830 num_mpdu_ranges));
831
832 for (i = 0; i < num_mpdu_ranges; i++) {
833 info.status = mpdu_ranges[i].mpdu_range_status;
834
835 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
836 struct sk_buff *msdu_head, *msdu_tail;
837 enum htt_rx_mpdu_status status;
838 int msdu_chaining;
839
840 msdu_head = NULL;
841 msdu_tail = NULL;
842 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
843 &fw_desc,
844 &fw_desc_len,
845 &msdu_head,
846 &msdu_tail);
847
848 if (!msdu_head) {
849 ath10k_warn("htt rx no data!\n");
850 continue;
851 }
852
853 if (msdu_head->len == 0) {
854 ath10k_dbg(ATH10K_DBG_HTT,
855 "htt rx dropping due to zero-len\n");
856 ath10k_htt_rx_free_msdu_chain(msdu_head);
857 continue;
858 }
859
860 if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
861 ath10k_htt_rx_free_msdu_chain(msdu_head);
862 continue;
863 }
864
865 status = info.status;
866
867 /* Skip mgmt frames while we handle this in WMI */
868 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
869 ath10k_htt_rx_free_msdu_chain(msdu_head);
870 continue;
871 }
872
873 if (status != HTT_RX_IND_MPDU_STATUS_OK &&
874 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
875 !htt->ar->monitor_enabled) {
876 ath10k_dbg(ATH10K_DBG_HTT,
877 "htt rx ignoring frame w/ status %d\n",
878 status);
879 ath10k_htt_rx_free_msdu_chain(msdu_head);
880 continue;
881 }
882
883 /* FIXME: we do not support chaining yet.
884 * this needs investigation */
885 if (msdu_chaining) {
886 ath10k_warn("msdu_chaining is true\n");
887 ath10k_htt_rx_free_msdu_chain(msdu_head);
888 continue;
889 }
890
891 info.skb = msdu_head;
892 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
893 info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
894 info.signal += rx->ppdu.combined_rssi;
895
896 info.rate.info0 = rx->ppdu.info0;
897 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
898 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
899
900 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
901
902 if (ath10k_htt_rx_hdr_is_amsdu(hdr))
903 ret = ath10k_htt_rx_amsdu(htt, &info);
904 else
905 ret = ath10k_htt_rx_msdu(htt, &info);
906
907 if (ret && !info.fcs_err) {
908 ath10k_warn("error processing msdus %d\n", ret);
909 dev_kfree_skb_any(info.skb);
910 continue;
911 }
912
913 if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
914 ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
915
916 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
917 info.skb->data, info.skb->len);
918 ath10k_process_rx(htt->ar, &info);
919 }
920 }
921
922 ath10k_htt_rx_msdu_buff_replenish(htt);
923}
924
925static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
926 struct htt_rx_fragment_indication *frag)
927{
928 struct sk_buff *msdu_head, *msdu_tail;
929 struct htt_rx_desc *rxd;
930 enum rx_msdu_decap_format fmt;
931 struct htt_rx_info info = {};
932 struct ieee80211_hdr *hdr;
933 int msdu_chaining;
934 bool tkip_mic_err;
935 bool decrypt_err;
936 u8 *fw_desc;
937 int fw_desc_len, hdrlen, paramlen;
938 int trim;
939
940 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
941 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
942
943 msdu_head = NULL;
944 msdu_tail = NULL;
945 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
946 &msdu_head, &msdu_tail);
947
948 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
949
950 if (!msdu_head) {
951 ath10k_warn("htt rx frag no data\n");
952 return;
953 }
954
955 if (msdu_chaining || msdu_head != msdu_tail) {
956 ath10k_warn("aggregation with fragmentation?!\n");
957 ath10k_htt_rx_free_msdu_chain(msdu_head);
958 return;
959 }
960
961 /* FIXME: implement signal strength */
962
963 hdr = (struct ieee80211_hdr *)msdu_head->data;
964 rxd = (void *)msdu_head->data - sizeof(*rxd);
965 tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
966 RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
967 decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
968 RX_ATTENTION_FLAGS_DECRYPT_ERR);
969 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
970 RX_MSDU_START_INFO1_DECAP_FORMAT);
971
972 if (fmt != RX_MSDU_DECAP_RAW) {
973 ath10k_warn("we dont support non-raw fragmented rx yet\n");
974 dev_kfree_skb_any(msdu_head);
975 goto end;
976 }
977
978 info.skb = msdu_head;
979 info.status = HTT_RX_IND_MPDU_STATUS_OK;
980 info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
981 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
982
983 if (tkip_mic_err) {
984 ath10k_warn("tkip mic error\n");
985 info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
986 }
987
988 if (decrypt_err) {
989 ath10k_warn("decryption err in fragmented rx\n");
990 dev_kfree_skb_any(info.skb);
991 goto end;
992 }
993
994 if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
995 hdrlen = ieee80211_hdrlen(hdr->frame_control);
996 paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
997
998 /* It is more efficient to move the header than the payload */
999 memmove((void *)info.skb->data + paramlen,
1000 (void *)info.skb->data,
1001 hdrlen);
1002 skb_pull(info.skb, paramlen);
1003 hdr = (struct ieee80211_hdr *)info.skb->data;
1004 }
1005
1006 /* remove trailing FCS */
1007 trim = 4;
1008
1009 /* remove crypto trailer */
1010 trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
1011
1012 /* last fragment of TKIP frags has MIC */
1013 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1014 info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1015 trim += 8;
1016
1017 if (trim > info.skb->len) {
1018 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1019 dev_kfree_skb_any(info.skb);
1020 goto end;
1021 }
1022
1023 skb_trim(info.skb, info.skb->len - trim);
1024
1025 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
1026 info.skb->data, info.skb->len);
1027 ath10k_process_rx(htt->ar, &info);
1028
1029end:
1030 if (fw_desc_len > 0) {
1031 ath10k_dbg(ATH10K_DBG_HTT,
1032 "expecting more fragmented rx in one indication %d\n",
1033 fw_desc_len);
1034 }
1035}
1036
1037void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1038{
1039 struct ath10k_htt *htt = ar->htt;
1040 struct htt_resp *resp = (struct htt_resp *)skb->data;
1041
1042 /* confirm alignment */
1043 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1044 ath10k_warn("unaligned htt message, expect trouble\n");
1045
1046 ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
1047 resp->hdr.msg_type);
1048 switch (resp->hdr.msg_type) {
1049 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1050 htt->target_version_major = resp->ver_resp.major;
1051 htt->target_version_minor = resp->ver_resp.minor;
1052 complete(&htt->target_version_received);
1053 break;
1054 }
1055 case HTT_T2H_MSG_TYPE_RX_IND: {
1056 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1057 break;
1058 }
1059 case HTT_T2H_MSG_TYPE_PEER_MAP: {
1060 struct htt_peer_map_event ev = {
1061 .vdev_id = resp->peer_map.vdev_id,
1062 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1063 };
1064 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1065 ath10k_peer_map_event(htt, &ev);
1066 break;
1067 }
1068 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1069 struct htt_peer_unmap_event ev = {
1070 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1071 };
1072 ath10k_peer_unmap_event(htt, &ev);
1073 break;
1074 }
1075 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1076 struct htt_tx_done tx_done = {};
1077 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1078
1079 tx_done.msdu_id =
1080 __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1081
1082 switch (status) {
1083 case HTT_MGMT_TX_STATUS_OK:
1084 break;
1085 case HTT_MGMT_TX_STATUS_RETRY:
1086 tx_done.no_ack = true;
1087 break;
1088 case HTT_MGMT_TX_STATUS_DROP:
1089 tx_done.discard = true;
1090 break;
1091 }
1092
1093 ath10k_txrx_tx_completed(htt, &tx_done);
1094 break;
1095 }
1096 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
1097 struct htt_tx_done tx_done = {};
1098 int status = MS(resp->data_tx_completion.flags,
1099 HTT_DATA_TX_STATUS);
1100 __le16 msdu_id;
1101 int i;
1102
1103 switch (status) {
1104 case HTT_DATA_TX_STATUS_NO_ACK:
1105 tx_done.no_ack = true;
1106 break;
1107 case HTT_DATA_TX_STATUS_OK:
1108 break;
1109 case HTT_DATA_TX_STATUS_DISCARD:
1110 case HTT_DATA_TX_STATUS_POSTPONE:
1111 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1112 tx_done.discard = true;
1113 break;
1114 default:
1115 ath10k_warn("unhandled tx completion status %d\n",
1116 status);
1117 tx_done.discard = true;
1118 break;
1119 }
1120
1121 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1122 resp->data_tx_completion.num_msdus);
1123
1124 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1125 msdu_id = resp->data_tx_completion.msdus[i];
1126 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1127 ath10k_txrx_tx_completed(htt, &tx_done);
1128 }
1129 break;
1130 }
1131 case HTT_T2H_MSG_TYPE_SEC_IND: {
1132 struct ath10k *ar = htt->ar;
1133 struct htt_security_indication *ev = &resp->security_indication;
1134
1135 ath10k_dbg(ATH10K_DBG_HTT,
1136 "sec ind peer_id %d unicast %d type %d\n",
1137 __le16_to_cpu(ev->peer_id),
1138 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1139 MS(ev->flags, HTT_SECURITY_TYPE));
1140 complete(&ar->install_key_done);
1141 break;
1142 }
1143 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1144 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1145 skb->data, skb->len);
1146 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1147 break;
1148 }
1149 case HTT_T2H_MSG_TYPE_TEST:
1150 /* FIX THIS */
1151 break;
1152 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1153 case HTT_T2H_MSG_TYPE_STATS_CONF:
1154 case HTT_T2H_MSG_TYPE_RX_ADDBA:
1155 case HTT_T2H_MSG_TYPE_RX_DELBA:
1156 case HTT_T2H_MSG_TYPE_RX_FLUSH:
1157 default:
1158 ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
1159 resp->hdr.msg_type);
1160 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1161 skb->data, skb->len);
1162 break;
1163 };
1164
1165 /* Free the indication buffer */
1166 dev_kfree_skb_any(skb);
1167}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
new file mode 100644
index 000000000000..ef79106db247
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -0,0 +1,510 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
25void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
26{
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
29 ieee80211_wake_queues(htt->ar->hw);
30}
31
32static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
33{
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
37}
38
39static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
40{
41 int ret = 0;
42
43 spin_lock_bh(&htt->tx_lock);
44
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
46 ret = -EBUSY;
47 goto exit;
48 }
49
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
52 ieee80211_stop_queues(htt->ar->hw);
53
54exit:
55 spin_unlock_bh(&htt->tx_lock);
56 return ret;
57}
58
59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
60{
61 int msdu_id;
62
63 lockdep_assert_held(&htt->tx_lock);
64
65 msdu_id = find_first_zero_bit(htt->used_msdu_ids,
66 htt->max_num_pending_tx);
67 if (msdu_id == htt->max_num_pending_tx)
68 return -ENOBUFS;
69
70 ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
71 __set_bit(msdu_id, htt->used_msdu_ids);
72 return msdu_id;
73}
74
75void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
76{
77 lockdep_assert_held(&htt->tx_lock);
78
79 if (!test_bit(msdu_id, htt->used_msdu_ids))
80 ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
81
82 ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
83 __clear_bit(msdu_id, htt->used_msdu_ids);
84}
85
86int ath10k_htt_tx_attach(struct ath10k_htt *htt)
87{
88 u8 pipe;
89
90 spin_lock_init(&htt->tx_lock);
91 init_waitqueue_head(&htt->empty_tx_wq);
92
93 /* At the beginning free queue number should hint us the maximum
94 * queue length */
95 pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
96 htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
97 pipe);
98
99 ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
100 htt->max_num_pending_tx);
101
102 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
103 htt->max_num_pending_tx, GFP_KERNEL);
104 if (!htt->pending_tx)
105 return -ENOMEM;
106
107 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
108 BITS_TO_LONGS(htt->max_num_pending_tx),
109 GFP_KERNEL);
110 if (!htt->used_msdu_ids) {
111 kfree(htt->pending_tx);
112 return -ENOMEM;
113 }
114
115 return 0;
116}
117
118static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
119{
120 struct sk_buff *txdesc;
121 int msdu_id;
122
123 /* No locks needed. Called after communication with the device has
124 * been stopped. */
125
126 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
127 if (!test_bit(msdu_id, htt->used_msdu_ids))
128 continue;
129
130 txdesc = htt->pending_tx[msdu_id];
131 if (!txdesc)
132 continue;
133
134 ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
135 msdu_id);
136
137 if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
138 ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
139
140 ATH10K_SKB_CB(txdesc)->htt.discard = true;
141 ath10k_txrx_tx_unref(htt, txdesc);
142 }
143}
144
145void ath10k_htt_tx_detach(struct ath10k_htt *htt)
146{
147 ath10k_htt_tx_cleanup_pending(htt);
148 kfree(htt->pending_tx);
149 kfree(htt->used_msdu_ids);
150 return;
151}
152
153void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
154{
155 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
156 struct ath10k_htt *htt = ar->htt;
157
158 if (skb_cb->htt.is_conf) {
159 dev_kfree_skb_any(skb);
160 return;
161 }
162
163 if (skb_cb->is_aborted) {
164 skb_cb->htt.discard = true;
165
166 /* if the skbuff is aborted we need to make sure we'll free up
167 * the tx resources, we can't simply run tx_unref() 2 times
168 * because if htt tx completion came in earlier we'd access
169 * unallocated memory */
170 if (skb_cb->htt.refcount > 1)
171 skb_cb->htt.refcount = 1;
172 }
173
174 ath10k_txrx_tx_unref(htt, skb);
175}
176
177int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
178{
179 struct sk_buff *skb;
180 struct htt_cmd *cmd;
181 int len = 0;
182 int ret;
183
184 len += sizeof(cmd->hdr);
185 len += sizeof(cmd->ver_req);
186
187 skb = ath10k_htc_alloc_skb(len);
188 if (!skb)
189 return -ENOMEM;
190
191 skb_put(skb, len);
192 cmd = (struct htt_cmd *)skb->data;
193 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
194
195 ATH10K_SKB_CB(skb)->htt.is_conf = true;
196
197 ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
198 if (ret) {
199 dev_kfree_skb_any(skb);
200 return ret;
201 }
202
203 return 0;
204}
205
206int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
207{
208 struct sk_buff *skb;
209 struct htt_cmd *cmd;
210 struct htt_rx_ring_setup_ring *ring;
211 const int num_rx_ring = 1;
212 u16 flags;
213 u32 fw_idx;
214 int len;
215 int ret;
216
217 /*
218 * the HW expects the buffer to be an integral number of 4-byte
219 * "words"
220 */
221 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
222 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
223
224 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
225 + (sizeof(*ring) * num_rx_ring);
226 skb = ath10k_htc_alloc_skb(len);
227 if (!skb)
228 return -ENOMEM;
229
230 skb_put(skb, len);
231
232 cmd = (struct htt_cmd *)skb->data;
233 ring = &cmd->rx_setup.rings[0];
234
235 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
236 cmd->rx_setup.hdr.num_rings = 1;
237
238 /* FIXME: do we need all of this? */
239 flags = 0;
240 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
241 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
242 flags |= HTT_RX_RING_FLAGS_PPDU_START;
243 flags |= HTT_RX_RING_FLAGS_PPDU_END;
244 flags |= HTT_RX_RING_FLAGS_MPDU_START;
245 flags |= HTT_RX_RING_FLAGS_MPDU_END;
246 flags |= HTT_RX_RING_FLAGS_MSDU_START;
247 flags |= HTT_RX_RING_FLAGS_MSDU_END;
248 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
249 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
250 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
251 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
252 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
253 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
254 flags |= HTT_RX_RING_FLAGS_NULL_RX;
255 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
256
257 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
258
259 ring->fw_idx_shadow_reg_paddr =
260 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
261 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
262 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
263 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
264 ring->flags = __cpu_to_le16(flags);
265 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
266
267#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
268
269 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
270 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
271 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
272 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
273 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
274 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
275 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
276 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
277 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
278 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
279
280#undef desc_offset
281
282 ATH10K_SKB_CB(skb)->htt.is_conf = true;
283
284 ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
285 if (ret) {
286 dev_kfree_skb_any(skb);
287 return ret;
288 }
289
290 return 0;
291}
292
293int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
294{
295 struct device *dev = htt->ar->dev;
296 struct ath10k_skb_cb *skb_cb;
297 struct sk_buff *txdesc = NULL;
298 struct htt_cmd *cmd;
299 u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
300 int len = 0;
301 int msdu_id = -1;
302 int res;
303
304
305 res = ath10k_htt_tx_inc_pending(htt);
306 if (res)
307 return res;
308
309 len += sizeof(cmd->hdr);
310 len += sizeof(cmd->mgmt_tx);
311
312 txdesc = ath10k_htc_alloc_skb(len);
313 if (!txdesc) {
314 res = -ENOMEM;
315 goto err;
316 }
317
318 spin_lock_bh(&htt->tx_lock);
319 msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
320 if (msdu_id < 0) {
321 spin_unlock_bh(&htt->tx_lock);
322 res = msdu_id;
323 goto err;
324 }
325 htt->pending_tx[msdu_id] = txdesc;
326 spin_unlock_bh(&htt->tx_lock);
327
328 res = ath10k_skb_map(dev, msdu);
329 if (res)
330 goto err;
331
332 skb_put(txdesc, len);
333 cmd = (struct htt_cmd *)txdesc->data;
334 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
335 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
336 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
337 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
338 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
339 memcpy(cmd->mgmt_tx.hdr, msdu->data,
340 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
341
342 /* refcount is decremented by HTC and HTT completions until it reaches
343 * zero and is freed */
344 skb_cb = ATH10K_SKB_CB(txdesc);
345 skb_cb->htt.msdu_id = msdu_id;
346 skb_cb->htt.refcount = 2;
347 skb_cb->htt.msdu = msdu;
348
349 res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
350 if (res)
351 goto err;
352
353 return 0;
354
355err:
356 ath10k_skb_unmap(dev, msdu);
357
358 if (txdesc)
359 dev_kfree_skb_any(txdesc);
360 if (msdu_id >= 0) {
361 spin_lock_bh(&htt->tx_lock);
362 htt->pending_tx[msdu_id] = NULL;
363 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
364 spin_unlock_bh(&htt->tx_lock);
365 }
366 ath10k_htt_tx_dec_pending(htt);
367 return res;
368}
369
370int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
371{
372 struct device *dev = htt->ar->dev;
373 struct htt_cmd *cmd;
374 struct htt_data_tx_desc_frag *tx_frags;
375 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
376 struct ath10k_skb_cb *skb_cb;
377 struct sk_buff *txdesc = NULL;
378 struct sk_buff *txfrag = NULL;
379 u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
380 u8 tid;
381 int prefetch_len, desc_len, frag_len;
382 dma_addr_t frags_paddr;
383 int msdu_id = -1;
384 int res;
385 u8 flags0;
386 u16 flags1;
387
388 res = ath10k_htt_tx_inc_pending(htt);
389 if (res)
390 return res;
391
392 prefetch_len = min(htt->prefetch_len, msdu->len);
393 prefetch_len = roundup(prefetch_len, 4);
394
395 desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
396 frag_len = sizeof(*tx_frags) * 2;
397
398 txdesc = ath10k_htc_alloc_skb(desc_len);
399 if (!txdesc) {
400 res = -ENOMEM;
401 goto err;
402 }
403
404 txfrag = dev_alloc_skb(frag_len);
405 if (!txfrag) {
406 res = -ENOMEM;
407 goto err;
408 }
409
410 if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
411 ath10k_warn("htt alignment check failed. dropping packet.\n");
412 res = -EIO;
413 goto err;
414 }
415
416 spin_lock_bh(&htt->tx_lock);
417 msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
418 if (msdu_id < 0) {
419 spin_unlock_bh(&htt->tx_lock);
420 res = msdu_id;
421 goto err;
422 }
423 htt->pending_tx[msdu_id] = txdesc;
424 spin_unlock_bh(&htt->tx_lock);
425
426 res = ath10k_skb_map(dev, msdu);
427 if (res)
428 goto err;
429
430 /* tx fragment list must be terminated with zero-entry */
431 skb_put(txfrag, frag_len);
432 tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
433 tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
434 tx_frags[0].len = __cpu_to_le32(msdu->len);
435 tx_frags[1].paddr = __cpu_to_le32(0);
436 tx_frags[1].len = __cpu_to_le32(0);
437
438 res = ath10k_skb_map(dev, txfrag);
439 if (res)
440 goto err;
441
442 ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
443 (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
444 (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
445 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
446 txfrag->data, frag_len);
447 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
448 msdu->data, msdu->len);
449
450 skb_put(txdesc, desc_len);
451 cmd = (struct htt_cmd *)txdesc->data;
452 memset(cmd, 0, desc_len);
453
454 tid = ATH10K_SKB_CB(msdu)->htt.tid;
455
456 ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
457
458 flags0 = 0;
459 if (!ieee80211_has_protected(hdr->frame_control))
460 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
461 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
462 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
463 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
464
465 flags1 = 0;
466 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
467 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
468
469 frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
470
471 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
472 cmd->data_tx.flags0 = flags0;
473 cmd->data_tx.flags1 = __cpu_to_le16(flags1);
474 cmd->data_tx.len = __cpu_to_le16(msdu->len);
475 cmd->data_tx.id = __cpu_to_le16(msdu_id);
476 cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
477 cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
478
479 memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
480
481 /* refcount is decremented by HTC and HTT completions until it reaches
482 * zero and is freed */
483 skb_cb = ATH10K_SKB_CB(txdesc);
484 skb_cb->htt.msdu_id = msdu_id;
485 skb_cb->htt.refcount = 2;
486 skb_cb->htt.txfrag = txfrag;
487 skb_cb->htt.msdu = msdu;
488
489 res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
490 if (res)
491 goto err;
492
493 return 0;
494err:
495 if (txfrag)
496 ath10k_skb_unmap(dev, txfrag);
497 if (txdesc)
498 dev_kfree_skb_any(txdesc);
499 if (txfrag)
500 dev_kfree_skb_any(txfrag);
501 if (msdu_id >= 0) {
502 spin_lock_bh(&htt->tx_lock);
503 htt->pending_tx[msdu_id] = NULL;
504 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
505 spin_unlock_bh(&htt->tx_lock);
506 }
507 ath10k_htt_tx_dec_pending(htt);
508 ath10k_skb_unmap(dev, msdu);
509 return res;
510}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
new file mode 100644
index 000000000000..44ed5af0a204
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -0,0 +1,304 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _HW_H_
19#define _HW_H_
20
21#include "targaddrs.h"
22
23/* Supported FW version */
24#define SUPPORTED_FW_MAJOR 1
25#define SUPPORTED_FW_MINOR 0
26#define SUPPORTED_FW_RELEASE 0
27#define SUPPORTED_FW_BUILD 629
28
29/* QCA988X 1.0 definitions */
30#define QCA988X_HW_1_0_VERSION 0x4000002c
31#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
32#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
33#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
34#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
35#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
36
37/* QCA988X 2.0 definitions */
38#define QCA988X_HW_2_0_VERSION 0x4100016c
39#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
40#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
41#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
42#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
43#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
44
45/* Known pecularities:
46 * - current FW doesn't support raw rx mode (last tested v599)
47 * - current FW dumps upon raw tx mode (last tested v599)
48 * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
49 * - raw have FCS, nwifi doesn't
50 * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
51 * param, llc/snap) are aligned to 4byte boundaries each */
52enum ath10k_hw_txrx_mode {
53 ATH10K_HW_TXRX_RAW = 0,
54 ATH10K_HW_TXRX_NATIVE_WIFI = 1,
55 ATH10K_HW_TXRX_ETHERNET = 2,
56};
57
58enum ath10k_mcast2ucast_mode {
59 ATH10K_MCAST2UCAST_DISABLED = 0,
60 ATH10K_MCAST2UCAST_ENABLED = 1,
61};
62
63#define TARGET_NUM_VDEVS 8
64#define TARGET_NUM_PEER_AST 2
65#define TARGET_NUM_WDS_ENTRIES 32
66#define TARGET_DMA_BURST_SIZE 0
67#define TARGET_MAC_AGGR_DELIM 0
68#define TARGET_AST_SKID_LIMIT 16
69#define TARGET_NUM_PEERS 16
70#define TARGET_NUM_OFFLOAD_PEERS 0
71#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
72#define TARGET_NUM_PEER_KEYS 2
73#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
74#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
75#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
76#define TARGET_RX_TIMEOUT_LO_PRI 100
77#define TARGET_RX_TIMEOUT_HI_PRI 40
78#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
79#define TARGET_SCAN_MAX_PENDING_REQS 4
80#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
81#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
82#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
83#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
84#define TARGET_NUM_MCAST_GROUPS 0
85#define TARGET_NUM_MCAST_TABLE_ELEMS 0
86#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
87#define TARGET_TX_DBG_LOG_SIZE 1024
88#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
89#define TARGET_VOW_CONFIG 0
90#define TARGET_NUM_MSDU_DESC (1024 + 400)
91#define TARGET_MAX_FRAG_ENTRIES 0
92
93
94/* Number of Copy Engines supported */
95#define CE_COUNT 8
96
97/*
98 * Total number of PCIe MSI interrupts requested for all interrupt sources.
99 * PCIe standard forces this to be a power of 2.
100 * Some Host OS's limit MSI requests that can be granted to 8
101 * so for now we abide by this limit and avoid requesting more
102 * than that.
103 */
104#define MSI_NUM_REQUEST_LOG2 3
105#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2)
106
107/*
108 * Granted MSIs are assigned as follows:
109 * Firmware uses the first
110 * Remaining MSIs, if any, are used by Copy Engines
111 * This mapping is known to both Target firmware and Host software.
112 * It may be changed as long as Host and Target are kept in sync.
113 */
114/* MSI for firmware (errors, etc.) */
115#define MSI_ASSIGN_FW 0
116
117/* MSIs for Copy Engines */
118#define MSI_ASSIGN_CE_INITIAL 1
119#define MSI_ASSIGN_CE_MAX 7
120
121/* as of IP3.7.1 */
122#define RTC_STATE_V_ON 3
123
124#define RTC_STATE_COLD_RESET_MASK 0x00000400
125#define RTC_STATE_V_LSB 0
126#define RTC_STATE_V_MASK 0x00000007
127#define RTC_STATE_ADDRESS 0x0000
128#define PCIE_SOC_WAKE_V_MASK 0x00000001
129#define PCIE_SOC_WAKE_ADDRESS 0x0004
130#define PCIE_SOC_WAKE_RESET 0x00000000
131#define SOC_GLOBAL_RESET_ADDRESS 0x0008
132
133#define RTC_SOC_BASE_ADDRESS 0x00004000
134#define RTC_WMAC_BASE_ADDRESS 0x00005000
135#define MAC_COEX_BASE_ADDRESS 0x00006000
136#define BT_COEX_BASE_ADDRESS 0x00007000
137#define SOC_PCIE_BASE_ADDRESS 0x00008000
138#define SOC_CORE_BASE_ADDRESS 0x00009000
139#define WLAN_UART_BASE_ADDRESS 0x0000c000
140#define WLAN_SI_BASE_ADDRESS 0x00010000
141#define WLAN_GPIO_BASE_ADDRESS 0x00014000
142#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
143#define WLAN_MAC_BASE_ADDRESS 0x00020000
144#define EFUSE_BASE_ADDRESS 0x00030000
145#define FPGA_REG_BASE_ADDRESS 0x00039000
146#define WLAN_UART2_BASE_ADDRESS 0x00054c00
147#define CE_WRAPPER_BASE_ADDRESS 0x00057000
148#define CE0_BASE_ADDRESS 0x00057400
149#define CE1_BASE_ADDRESS 0x00057800
150#define CE2_BASE_ADDRESS 0x00057c00
151#define CE3_BASE_ADDRESS 0x00058000
152#define CE4_BASE_ADDRESS 0x00058400
153#define CE5_BASE_ADDRESS 0x00058800
154#define CE6_BASE_ADDRESS 0x00058c00
155#define CE7_BASE_ADDRESS 0x00059000
156#define DBI_BASE_ADDRESS 0x00060000
157#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
158#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
159
160#define SOC_RESET_CONTROL_OFFSET 0x00000000
161#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
162#define SOC_CPU_CLOCK_OFFSET 0x00000020
163#define SOC_CPU_CLOCK_STANDARD_LSB 0
164#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
165#define SOC_CLOCK_CONTROL_OFFSET 0x00000028
166#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
167#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
168#define SOC_LPO_CAL_OFFSET 0x000000e0
169#define SOC_LPO_CAL_ENABLE_LSB 20
170#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
171
172#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
173#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
174#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
175#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
176
177#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
178#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
179#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
180#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
181#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
182#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
183#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
184#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
185
186#define CLOCK_GPIO_OFFSET 0xffffffff
187#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
188#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
189
190#define SI_CONFIG_OFFSET 0x00000000
191#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
192#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
193#define SI_CONFIG_I2C_LSB 16
194#define SI_CONFIG_I2C_MASK 0x00010000
195#define SI_CONFIG_POS_SAMPLE_LSB 7
196#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
197#define SI_CONFIG_INACTIVE_DATA_LSB 5
198#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
199#define SI_CONFIG_INACTIVE_CLK_LSB 4
200#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
201#define SI_CONFIG_DIVIDER_LSB 0
202#define SI_CONFIG_DIVIDER_MASK 0x0000000f
203#define SI_CS_OFFSET 0x00000004
204#define SI_CS_DONE_ERR_MASK 0x00000400
205#define SI_CS_DONE_INT_MASK 0x00000200
206#define SI_CS_START_LSB 8
207#define SI_CS_START_MASK 0x00000100
208#define SI_CS_RX_CNT_LSB 4
209#define SI_CS_RX_CNT_MASK 0x000000f0
210#define SI_CS_TX_CNT_LSB 0
211#define SI_CS_TX_CNT_MASK 0x0000000f
212
213#define SI_TX_DATA0_OFFSET 0x00000008
214#define SI_TX_DATA1_OFFSET 0x0000000c
215#define SI_RX_DATA0_OFFSET 0x00000010
216#define SI_RX_DATA1_OFFSET 0x00000014
217
218#define CORE_CTRL_CPU_INTR_MASK 0x00002000
219#define CORE_CTRL_ADDRESS 0x0000
220#define PCIE_INTR_ENABLE_ADDRESS 0x0008
221#define PCIE_INTR_CLR_ADDRESS 0x0014
222#define SCRATCH_3_ADDRESS 0x0030
223
224/* Firmware indications to the Host via SCRATCH_3 register. */
225#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
226#define FW_IND_EVENT_PENDING 1
227#define FW_IND_INITIALIZED 2
228
229/* HOST_REG interrupt from firmware */
230#define PCIE_INTR_FIRMWARE_MASK 0x00000400
231#define PCIE_INTR_CE_MASK_ALL 0x0007f800
232
233#define DRAM_BASE_ADDRESS 0x00400000
234
235#define MISSING 0
236
237#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
238#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
239#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET
240#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET
241#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK
242#define RESET_CONTROL_MBOX_RST_MASK MISSING
243#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK
244#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
245#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
246#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
247#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
248#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
249#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
250#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
251#define LOCAL_SCRATCH_OFFSET 0x18
252#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET
253#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET
254#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS
255#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS
256#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS
257#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS
258#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB
259#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK
260#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB
261#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK
262#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
263#define MBOX_BASE_ADDRESS MISSING
264#define INT_STATUS_ENABLE_ERROR_LSB MISSING
265#define INT_STATUS_ENABLE_ERROR_MASK MISSING
266#define INT_STATUS_ENABLE_CPU_LSB MISSING
267#define INT_STATUS_ENABLE_CPU_MASK MISSING
268#define INT_STATUS_ENABLE_COUNTER_LSB MISSING
269#define INT_STATUS_ENABLE_COUNTER_MASK MISSING
270#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
271#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
272#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
273#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
274#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
275#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
276#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
277#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
278#define INT_STATUS_ENABLE_ADDRESS MISSING
279#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
280#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
281#define HOST_INT_STATUS_ADDRESS MISSING
282#define CPU_INT_STATUS_ADDRESS MISSING
283#define ERROR_INT_STATUS_ADDRESS MISSING
284#define ERROR_INT_STATUS_WAKEUP_MASK MISSING
285#define ERROR_INT_STATUS_WAKEUP_LSB MISSING
286#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
287#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
288#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
289#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
290#define COUNT_DEC_ADDRESS MISSING
291#define HOST_INT_STATUS_CPU_MASK MISSING
292#define HOST_INT_STATUS_CPU_LSB MISSING
293#define HOST_INT_STATUS_ERROR_MASK MISSING
294#define HOST_INT_STATUS_ERROR_LSB MISSING
295#define HOST_INT_STATUS_COUNTER_MASK MISSING
296#define HOST_INT_STATUS_COUNTER_LSB MISSING
297#define RX_LOOKAHEAD_VALID_ADDRESS MISSING
298#define WINDOW_DATA_ADDRESS MISSING
299#define WINDOW_READ_ADDR_ADDRESS MISSING
300#define WINDOW_WRITE_ADDR_ADDRESS MISSING
301
302#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
303
304#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
new file mode 100644
index 000000000000..da5c333d0d4b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -0,0 +1,3069 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "mac.h"
19
20#include <net/mac80211.h>
21#include <linux/etherdevice.h>
22
23#include "core.h"
24#include "debug.h"
25#include "wmi.h"
26#include "htt.h"
27#include "txrx.h"
28
29/**********/
30/* Crypto */
31/**********/
32
33static int ath10k_send_key(struct ath10k_vif *arvif,
34 struct ieee80211_key_conf *key,
35 enum set_key_cmd cmd,
36 const u8 *macaddr)
37{
38 struct wmi_vdev_install_key_arg arg = {
39 .vdev_id = arvif->vdev_id,
40 .key_idx = key->keyidx,
41 .key_len = key->keylen,
42 .key_data = key->key,
43 .macaddr = macaddr,
44 };
45
46 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
47 arg.key_flags = WMI_KEY_PAIRWISE;
48 else
49 arg.key_flags = WMI_KEY_GROUP;
50
51 switch (key->cipher) {
52 case WLAN_CIPHER_SUITE_CCMP:
53 arg.key_cipher = WMI_CIPHER_AES_CCM;
54 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
55 break;
56 case WLAN_CIPHER_SUITE_TKIP:
57 arg.key_cipher = WMI_CIPHER_TKIP;
58 arg.key_txmic_len = 8;
59 arg.key_rxmic_len = 8;
60 break;
61 case WLAN_CIPHER_SUITE_WEP40:
62 case WLAN_CIPHER_SUITE_WEP104:
63 arg.key_cipher = WMI_CIPHER_WEP;
64 /* AP/IBSS mode requires self-key to be groupwise
65 * Otherwise pairwise key must be set */
66 if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
67 arg.key_flags = WMI_KEY_PAIRWISE;
68 break;
69 default:
70 ath10k_warn("cipher %d is not supported\n", key->cipher);
71 return -EOPNOTSUPP;
72 }
73
74 if (cmd == DISABLE_KEY) {
75 arg.key_cipher = WMI_CIPHER_NONE;
76 arg.key_data = NULL;
77 }
78
79 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
80}
81
82static int ath10k_install_key(struct ath10k_vif *arvif,
83 struct ieee80211_key_conf *key,
84 enum set_key_cmd cmd,
85 const u8 *macaddr)
86{
87 struct ath10k *ar = arvif->ar;
88 int ret;
89
90 INIT_COMPLETION(ar->install_key_done);
91
92 ret = ath10k_send_key(arvif, key, cmd, macaddr);
93 if (ret)
94 return ret;
95
96 ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
97 if (ret == 0)
98 return -ETIMEDOUT;
99
100 return 0;
101}
102
103static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
104 const u8 *addr)
105{
106 struct ath10k *ar = arvif->ar;
107 struct ath10k_peer *peer;
108 int ret;
109 int i;
110
111 lockdep_assert_held(&ar->conf_mutex);
112
113 spin_lock_bh(&ar->data_lock);
114 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
115 spin_unlock_bh(&ar->data_lock);
116
117 if (!peer)
118 return -ENOENT;
119
120 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
121 if (arvif->wep_keys[i] == NULL)
122 continue;
123
124 ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
125 addr);
126 if (ret)
127 return ret;
128
129 peer->keys[i] = arvif->wep_keys[i];
130 }
131
132 return 0;
133}
134
135static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
136 const u8 *addr)
137{
138 struct ath10k *ar = arvif->ar;
139 struct ath10k_peer *peer;
140 int first_errno = 0;
141 int ret;
142 int i;
143
144 lockdep_assert_held(&ar->conf_mutex);
145
146 spin_lock_bh(&ar->data_lock);
147 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
148 spin_unlock_bh(&ar->data_lock);
149
150 if (!peer)
151 return -ENOENT;
152
153 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
154 if (peer->keys[i] == NULL)
155 continue;
156
157 ret = ath10k_install_key(arvif, peer->keys[i],
158 DISABLE_KEY, addr);
159 if (ret && first_errno == 0)
160 first_errno = ret;
161
162 if (ret)
163 ath10k_warn("could not remove peer wep key %d (%d)\n",
164 i, ret);
165
166 peer->keys[i] = NULL;
167 }
168
169 return first_errno;
170}
171
172static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
173 struct ieee80211_key_conf *key)
174{
175 struct ath10k *ar = arvif->ar;
176 struct ath10k_peer *peer;
177 u8 addr[ETH_ALEN];
178 int first_errno = 0;
179 int ret;
180 int i;
181
182 lockdep_assert_held(&ar->conf_mutex);
183
184 for (;;) {
185 /* since ath10k_install_key we can't hold data_lock all the
186 * time, so we try to remove the keys incrementally */
187 spin_lock_bh(&ar->data_lock);
188 i = 0;
189 list_for_each_entry(peer, &ar->peers, list) {
190 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
191 if (peer->keys[i] == key) {
192 memcpy(addr, peer->addr, ETH_ALEN);
193 peer->keys[i] = NULL;
194 break;
195 }
196 }
197
198 if (i < ARRAY_SIZE(peer->keys))
199 break;
200 }
201 spin_unlock_bh(&ar->data_lock);
202
203 if (i == ARRAY_SIZE(peer->keys))
204 break;
205
206 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
207 if (ret && first_errno == 0)
208 first_errno = ret;
209
210 if (ret)
211 ath10k_warn("could not remove key for %pM\n", addr);
212 }
213
214 return first_errno;
215}
216
217
218/*********************/
219/* General utilities */
220/*********************/
221
222static inline enum wmi_phy_mode
223chan_to_phymode(const struct cfg80211_chan_def *chandef)
224{
225 enum wmi_phy_mode phymode = MODE_UNKNOWN;
226
227 switch (chandef->chan->band) {
228 case IEEE80211_BAND_2GHZ:
229 switch (chandef->width) {
230 case NL80211_CHAN_WIDTH_20_NOHT:
231 phymode = MODE_11G;
232 break;
233 case NL80211_CHAN_WIDTH_20:
234 phymode = MODE_11NG_HT20;
235 break;
236 case NL80211_CHAN_WIDTH_40:
237 phymode = MODE_11NG_HT40;
238 break;
239 case NL80211_CHAN_WIDTH_5:
240 case NL80211_CHAN_WIDTH_10:
241 case NL80211_CHAN_WIDTH_80:
242 case NL80211_CHAN_WIDTH_80P80:
243 case NL80211_CHAN_WIDTH_160:
244 phymode = MODE_UNKNOWN;
245 break;
246 }
247 break;
248 case IEEE80211_BAND_5GHZ:
249 switch (chandef->width) {
250 case NL80211_CHAN_WIDTH_20_NOHT:
251 phymode = MODE_11A;
252 break;
253 case NL80211_CHAN_WIDTH_20:
254 phymode = MODE_11NA_HT20;
255 break;
256 case NL80211_CHAN_WIDTH_40:
257 phymode = MODE_11NA_HT40;
258 break;
259 case NL80211_CHAN_WIDTH_80:
260 phymode = MODE_11AC_VHT80;
261 break;
262 case NL80211_CHAN_WIDTH_5:
263 case NL80211_CHAN_WIDTH_10:
264 case NL80211_CHAN_WIDTH_80P80:
265 case NL80211_CHAN_WIDTH_160:
266 phymode = MODE_UNKNOWN;
267 break;
268 }
269 break;
270 default:
271 break;
272 }
273
274 WARN_ON(phymode == MODE_UNKNOWN);
275 return phymode;
276}
277
278static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
279{
280/*
281 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
282 * 0 for no restriction
283 * 1 for 1/4 us
284 * 2 for 1/2 us
285 * 3 for 1 us
286 * 4 for 2 us
287 * 5 for 4 us
288 * 6 for 8 us
289 * 7 for 16 us
290 */
291 switch (mpdudensity) {
292 case 0:
293 return 0;
294 case 1:
295 case 2:
296 case 3:
297 /* Our lower layer calculations limit our precision to
298 1 microsecond */
299 return 1;
300 case 4:
301 return 2;
302 case 5:
303 return 4;
304 case 6:
305 return 8;
306 case 7:
307 return 16;
308 default:
309 return 0;
310 }
311}
312
313static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
314{
315 int ret;
316
317 lockdep_assert_held(&ar->conf_mutex);
318
319 ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
320 if (ret)
321 return ret;
322
323 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
324 if (ret)
325 return ret;
326
327 return 0;
328}
329
330static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
331{
332 int ret;
333
334 lockdep_assert_held(&ar->conf_mutex);
335
336 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
337 if (ret)
338 return ret;
339
340 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
341 if (ret)
342 return ret;
343
344 return 0;
345}
346
347static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
348{
349 struct ath10k_peer *peer, *tmp;
350
351 lockdep_assert_held(&ar->conf_mutex);
352
353 spin_lock_bh(&ar->data_lock);
354 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
355 if (peer->vdev_id != vdev_id)
356 continue;
357
358 ath10k_warn("removing stale peer %pM from vdev_id %d\n",
359 peer->addr, vdev_id);
360
361 list_del(&peer->list);
362 kfree(peer);
363 }
364 spin_unlock_bh(&ar->data_lock);
365}
366
367/************************/
368/* Interface management */
369/************************/
370
371static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
372{
373 int ret;
374
375 ret = wait_for_completion_timeout(&ar->vdev_setup_done,
376 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
377 if (ret == 0)
378 return -ETIMEDOUT;
379
380 return 0;
381}
382
383static int ath10k_vdev_start(struct ath10k_vif *arvif)
384{
385 struct ath10k *ar = arvif->ar;
386 struct ieee80211_conf *conf = &ar->hw->conf;
387 struct ieee80211_channel *channel = conf->chandef.chan;
388 struct wmi_vdev_start_request_arg arg = {};
389 int ret = 0;
390
391 lockdep_assert_held(&ar->conf_mutex);
392
393 INIT_COMPLETION(ar->vdev_setup_done);
394
395 arg.vdev_id = arvif->vdev_id;
396 arg.dtim_period = arvif->dtim_period;
397 arg.bcn_intval = arvif->beacon_interval;
398
399 arg.channel.freq = channel->center_freq;
400
401 arg.channel.band_center_freq1 = conf->chandef.center_freq1;
402
403 arg.channel.mode = chan_to_phymode(&conf->chandef);
404
405 arg.channel.min_power = channel->max_power * 3;
406 arg.channel.max_power = channel->max_power * 4;
407 arg.channel.max_reg_power = channel->max_reg_power * 4;
408 arg.channel.max_antenna_gain = channel->max_antenna_gain;
409
410 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
411 arg.ssid = arvif->u.ap.ssid;
412 arg.ssid_len = arvif->u.ap.ssid_len;
413 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
414 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
415 arg.ssid = arvif->vif->bss_conf.ssid;
416 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
417 }
418
419 ret = ath10k_wmi_vdev_start(ar, &arg);
420 if (ret) {
421 ath10k_warn("WMI vdev start failed: ret %d\n", ret);
422 return ret;
423 }
424
425 ret = ath10k_vdev_setup_sync(ar);
426 if (ret) {
427 ath10k_warn("vdev setup failed %d\n", ret);
428 return ret;
429 }
430
431 return ret;
432}
433
434static int ath10k_vdev_stop(struct ath10k_vif *arvif)
435{
436 struct ath10k *ar = arvif->ar;
437 int ret;
438
439 lockdep_assert_held(&ar->conf_mutex);
440
441 INIT_COMPLETION(ar->vdev_setup_done);
442
443 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
444 if (ret) {
445 ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
446 return ret;
447 }
448
449 ret = ath10k_vdev_setup_sync(ar);
450 if (ret) {
451 ath10k_warn("vdev setup failed %d\n", ret);
452 return ret;
453 }
454
455 return ret;
456}
457
458static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
459{
460 struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
461 struct wmi_vdev_start_request_arg arg = {};
462 enum nl80211_channel_type type;
463 int ret = 0;
464
465 lockdep_assert_held(&ar->conf_mutex);
466
467 type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
468
469 arg.vdev_id = vdev_id;
470 arg.channel.freq = channel->center_freq;
471 arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
472
473 /* TODO setup this dynamically, what in case we
474 don't have any vifs? */
475 arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
476
477 arg.channel.min_power = channel->max_power * 3;
478 arg.channel.max_power = channel->max_power * 4;
479 arg.channel.max_reg_power = channel->max_reg_power * 4;
480 arg.channel.max_antenna_gain = channel->max_antenna_gain;
481
482 ret = ath10k_wmi_vdev_start(ar, &arg);
483 if (ret) {
484 ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
485 return ret;
486 }
487
488 ret = ath10k_vdev_setup_sync(ar);
489 if (ret) {
490 ath10k_warn("Monitor vdev setup failed %d\n", ret);
491 return ret;
492 }
493
494 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
495 if (ret) {
496 ath10k_warn("Monitor vdev up failed: %d\n", ret);
497 goto vdev_stop;
498 }
499
500 ar->monitor_vdev_id = vdev_id;
501 ar->monitor_enabled = true;
502
503 return 0;
504
505vdev_stop:
506 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
507 if (ret)
508 ath10k_warn("Monitor vdev stop failed: %d\n", ret);
509
510 return ret;
511}
512
513static int ath10k_monitor_stop(struct ath10k *ar)
514{
515 int ret = 0;
516
517 lockdep_assert_held(&ar->conf_mutex);
518
519 /* For some reasons, ath10k_wmi_vdev_down() here couse
520 * often ath10k_wmi_vdev_stop() to fail. Next we could
521 * not run monitor vdev and driver reload
522 * required. Don't see such problems we skip
523 * ath10k_wmi_vdev_down() here.
524 */
525
526 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
527 if (ret)
528 ath10k_warn("Monitor vdev stop failed: %d\n", ret);
529
530 ret = ath10k_vdev_setup_sync(ar);
531 if (ret)
532 ath10k_warn("Monitor_down sync failed: %d\n", ret);
533
534 ar->monitor_enabled = false;
535 return ret;
536}
537
538static int ath10k_monitor_create(struct ath10k *ar)
539{
540 int bit, ret = 0;
541
542 lockdep_assert_held(&ar->conf_mutex);
543
544 if (ar->monitor_present) {
545 ath10k_warn("Monitor mode already enabled\n");
546 return 0;
547 }
548
549 bit = ffs(ar->free_vdev_map);
550 if (bit == 0) {
551 ath10k_warn("No free VDEV slots\n");
552 return -ENOMEM;
553 }
554
555 ar->monitor_vdev_id = bit - 1;
556 ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
557
558 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
559 WMI_VDEV_TYPE_MONITOR,
560 0, ar->mac_addr);
561 if (ret) {
562 ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
563 goto vdev_fail;
564 }
565
566 ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
567 ar->monitor_vdev_id);
568
569 ar->monitor_present = true;
570 return 0;
571
572vdev_fail:
573 /*
574 * Restore the ID to the global map.
575 */
576 ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
577 return ret;
578}
579
580static int ath10k_monitor_destroy(struct ath10k *ar)
581{
582 int ret = 0;
583
584 lockdep_assert_held(&ar->conf_mutex);
585
586 if (!ar->monitor_present)
587 return 0;
588
589 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
590 if (ret) {
591 ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
592 return ret;
593 }
594
595 ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
596 ar->monitor_present = false;
597
598 ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
599 ar->monitor_vdev_id);
600 return ret;
601}
602
603static void ath10k_control_beaconing(struct ath10k_vif *arvif,
604 struct ieee80211_bss_conf *info)
605{
606 int ret = 0;
607
608 if (!info->enable_beacon) {
609 ath10k_vdev_stop(arvif);
610 return;
611 }
612
613 arvif->tx_seq_no = 0x1000;
614
615 ret = ath10k_vdev_start(arvif);
616 if (ret)
617 return;
618
619 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid);
620 if (ret) {
621 ath10k_warn("Failed to bring up VDEV: %d\n",
622 arvif->vdev_id);
623 return;
624 }
625 ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
626}
627
628static void ath10k_control_ibss(struct ath10k_vif *arvif,
629 struct ieee80211_bss_conf *info,
630 const u8 self_peer[ETH_ALEN])
631{
632 int ret = 0;
633
634 if (!info->ibss_joined) {
635 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
636 if (ret)
637 ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
638 self_peer, arvif->vdev_id, ret);
639
640 if (is_zero_ether_addr(arvif->u.ibss.bssid))
641 return;
642
643 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
644 arvif->u.ibss.bssid);
645 if (ret) {
646 ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
647 arvif->u.ibss.bssid, arvif->vdev_id, ret);
648 return;
649 }
650
651 memset(arvif->u.ibss.bssid, 0, ETH_ALEN);
652
653 return;
654 }
655
656 ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
657 if (ret) {
658 ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
659 self_peer, arvif->vdev_id, ret);
660 return;
661 }
662
663 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
664 WMI_VDEV_PARAM_ATIM_WINDOW,
665 ATH10K_DEFAULT_ATIM);
666 if (ret)
667 ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
668 arvif->vdev_id, ret);
669}
670
671/*
672 * Review this when mac80211 gains per-interface powersave support.
673 */
674static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
675{
676 struct ath10k_generic_iter *ar_iter = data;
677 struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
678 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
679 enum wmi_sta_powersave_param param;
680 enum wmi_sta_ps_mode psmode;
681 int ret;
682
683 if (vif->type != NL80211_IFTYPE_STATION)
684 return;
685
686 if (conf->flags & IEEE80211_CONF_PS) {
687 psmode = WMI_STA_PS_MODE_ENABLED;
688 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
689
690 ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
691 arvif->vdev_id,
692 param,
693 conf->dynamic_ps_timeout);
694 if (ret) {
695 ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
696 arvif->vdev_id);
697 return;
698 }
699
700 ar_iter->ret = ret;
701 } else {
702 psmode = WMI_STA_PS_MODE_DISABLED;
703 }
704
705 ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
706 psmode);
707 if (ar_iter->ret)
708 ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
709 psmode, arvif->vdev_id);
710 else
711 ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
712 psmode, arvif->vdev_id);
713}
714
715/**********************/
716/* Station management */
717/**********************/
718
719static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
720 struct ath10k_vif *arvif,
721 struct ieee80211_sta *sta,
722 struct ieee80211_bss_conf *bss_conf,
723 struct wmi_peer_assoc_complete_arg *arg)
724{
725 memcpy(arg->addr, sta->addr, ETH_ALEN);
726 arg->vdev_id = arvif->vdev_id;
727 arg->peer_aid = sta->aid;
728 arg->peer_flags |= WMI_PEER_AUTH;
729
730 if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
731 /*
732 * Seems FW have problems with Power Save in STA
733 * mode when we setup this parameter to high (eg. 5).
734 * Often we see that FW don't send NULL (with clean P flags)
735 * frame even there is info about buffered frames in beacons.
736 * Sometimes we have to wait more than 10 seconds before FW
737 * will wakeup. Often sending one ping from AP to our device
738 * just fail (more than 50%).
739 *
740 * Seems setting this FW parameter to 1 couse FW
741 * will check every beacon and will wakup immediately
742 * after detection buffered data.
743 */
744 arg->peer_listen_intval = 1;
745 else
746 arg->peer_listen_intval = ar->hw->conf.listen_interval;
747
748 arg->peer_num_spatial_streams = 1;
749
750 /*
751 * The assoc capabilities are available only in managed mode.
752 */
753 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
754 arg->peer_caps = bss_conf->assoc_capability;
755}
756
757static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
758 struct ath10k_vif *arvif,
759 struct wmi_peer_assoc_complete_arg *arg)
760{
761 struct ieee80211_vif *vif = arvif->vif;
762 struct ieee80211_bss_conf *info = &vif->bss_conf;
763 struct cfg80211_bss *bss;
764 const u8 *rsnie = NULL;
765 const u8 *wpaie = NULL;
766
767 bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
768 info->bssid, NULL, 0, 0, 0);
769 if (bss) {
770 const struct cfg80211_bss_ies *ies;
771
772 rcu_read_lock();
773 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
774
775 ies = rcu_dereference(bss->ies);
776
777 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
778 WLAN_OUI_TYPE_MICROSOFT_WPA,
779 ies->data,
780 ies->len);
781 rcu_read_unlock();
782 cfg80211_put_bss(ar->hw->wiphy, bss);
783 }
784
785 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
786 if (rsnie || wpaie) {
787 ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
788 arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
789 }
790
791 if (wpaie) {
792 ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
793 arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
794 }
795}
796
797static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
798 struct ieee80211_sta *sta,
799 struct wmi_peer_assoc_complete_arg *arg)
800{
801 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
802 const struct ieee80211_supported_band *sband;
803 const struct ieee80211_rate *rates;
804 u32 ratemask;
805 int i;
806
807 sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
808 ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
809 rates = sband->bitrates;
810
811 rateset->num_rates = 0;
812
813 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
814 if (!(ratemask & 1))
815 continue;
816
817 rateset->rates[rateset->num_rates] = rates->hw_value;
818 rateset->num_rates++;
819 }
820}
821
822static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
823 struct ieee80211_sta *sta,
824 struct wmi_peer_assoc_complete_arg *arg)
825{
826 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
827 int smps;
828 int i, n;
829
830 if (!ht_cap->ht_supported)
831 return;
832
833 arg->peer_flags |= WMI_PEER_HT;
834 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
835 ht_cap->ampdu_factor)) - 1;
836
837 arg->peer_mpdu_density =
838 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
839
840 arg->peer_ht_caps = ht_cap->cap;
841 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
842
843 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
844 arg->peer_flags |= WMI_PEER_LDPC;
845
846 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
847 arg->peer_flags |= WMI_PEER_40MHZ;
848 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
849 }
850
851 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
852 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
853
854 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
855 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
856
857 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
858 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
859 arg->peer_flags |= WMI_PEER_STBC;
860 }
861
862 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
863 u32 stbc;
864 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
865 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
866 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
867 arg->peer_rate_caps |= stbc;
868 arg->peer_flags |= WMI_PEER_STBC;
869 }
870
871 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
872 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
873
874 if (smps == WLAN_HT_CAP_SM_PS_STATIC) {
875 arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
876 arg->peer_flags |= WMI_PEER_STATIC_MIMOPS;
877 } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) {
878 arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
879 arg->peer_flags |= WMI_PEER_DYN_MIMOPS;
880 }
881
882 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
883 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
884 else if (ht_cap->mcs.rx_mask[1])
885 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
886
887 for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
888 if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
889 arg->peer_ht_rates.rates[n++] = i;
890
891 arg->peer_ht_rates.num_rates = n;
892 arg->peer_num_spatial_streams = max((n+7) / 8, 1);
893
894 ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
895 arg->peer_ht_rates.num_rates,
896 arg->peer_num_spatial_streams);
897}
898
899static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
900 struct ath10k_vif *arvif,
901 struct ieee80211_sta *sta,
902 struct ieee80211_bss_conf *bss_conf,
903 struct wmi_peer_assoc_complete_arg *arg)
904{
905 u32 uapsd = 0;
906 u32 max_sp = 0;
907
908 if (sta->wme)
909 arg->peer_flags |= WMI_PEER_QOS;
910
911 if (sta->wme && sta->uapsd_queues) {
912 ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
913 sta->uapsd_queues, sta->max_sp);
914
915 arg->peer_flags |= WMI_PEER_APSD;
916 arg->peer_flags |= WMI_RC_UAPSD_FLAG;
917
918 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
919 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
920 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
921 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
922 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
923 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
924 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
925 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
926 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
927 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
928 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
929 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
930
931
932 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
933 max_sp = sta->max_sp;
934
935 ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
936 sta->addr,
937 WMI_AP_PS_PEER_PARAM_UAPSD,
938 uapsd);
939
940 ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
941 sta->addr,
942 WMI_AP_PS_PEER_PARAM_MAX_SP,
943 max_sp);
944
945 /* TODO setup this based on STA listen interval and
946 beacon interval. Currently we don't know
947 sta->listen_interval - mac80211 patch required.
948 Currently use 10 seconds */
949 ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
950 sta->addr,
951 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
952 10);
953 }
954}
955
956static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar,
957 struct ath10k_vif *arvif,
958 struct ieee80211_sta *sta,
959 struct ieee80211_bss_conf *bss_conf,
960 struct wmi_peer_assoc_complete_arg *arg)
961{
962 if (bss_conf->qos)
963 arg->peer_flags |= WMI_PEER_QOS;
964}
965
966static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
967 struct ieee80211_sta *sta,
968 struct wmi_peer_assoc_complete_arg *arg)
969{
970 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
971
972 if (!vht_cap->vht_supported)
973 return;
974
975 arg->peer_flags |= WMI_PEER_VHT;
976
977 arg->peer_vht_caps = vht_cap->cap;
978
979 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
980 arg->peer_flags |= WMI_PEER_80MHZ;
981
982 arg->peer_vht_rates.rx_max_rate =
983 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
984 arg->peer_vht_rates.rx_mcs_set =
985 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
986 arg->peer_vht_rates.tx_max_rate =
987 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
988 arg->peer_vht_rates.tx_mcs_set =
989 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
990
991 ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
992}
993
994static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
995 struct ath10k_vif *arvif,
996 struct ieee80211_sta *sta,
997 struct ieee80211_bss_conf *bss_conf,
998 struct wmi_peer_assoc_complete_arg *arg)
999{
1000 switch (arvif->vdev_type) {
1001 case WMI_VDEV_TYPE_AP:
1002 ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg);
1003 break;
1004 case WMI_VDEV_TYPE_STA:
1005 ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg);
1006 break;
1007 default:
1008 break;
1009 }
1010}
1011
1012static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
1013 struct ath10k_vif *arvif,
1014 struct ieee80211_sta *sta,
1015 struct wmi_peer_assoc_complete_arg *arg)
1016{
1017 enum wmi_phy_mode phymode = MODE_UNKNOWN;
1018
1019 /* FIXME: add VHT */
1020
1021 switch (ar->hw->conf.chandef.chan->band) {
1022 case IEEE80211_BAND_2GHZ:
1023 if (sta->ht_cap.ht_supported) {
1024 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1025 phymode = MODE_11NG_HT40;
1026 else
1027 phymode = MODE_11NG_HT20;
1028 } else {
1029 phymode = MODE_11G;
1030 }
1031
1032 break;
1033 case IEEE80211_BAND_5GHZ:
1034 if (sta->ht_cap.ht_supported) {
1035 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1036 phymode = MODE_11NA_HT40;
1037 else
1038 phymode = MODE_11NA_HT20;
1039 } else {
1040 phymode = MODE_11A;
1041 }
1042
1043 break;
1044 default:
1045 break;
1046 }
1047
1048 arg->peer_phymode = phymode;
1049 WARN_ON(phymode == MODE_UNKNOWN);
1050}
1051
1052static int ath10k_peer_assoc(struct ath10k *ar,
1053 struct ath10k_vif *arvif,
1054 struct ieee80211_sta *sta,
1055 struct ieee80211_bss_conf *bss_conf)
1056{
1057 struct wmi_peer_assoc_complete_arg arg;
1058
1059 memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
1060
1061 ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
1062 ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
1063 ath10k_peer_assoc_h_rates(ar, sta, &arg);
1064 ath10k_peer_assoc_h_ht(ar, sta, &arg);
1065 ath10k_peer_assoc_h_vht(ar, sta, &arg);
1066 ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
1067 ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
1068
1069 return ath10k_wmi_peer_assoc(ar, &arg);
1070}
1071
1072/* can be called only in mac80211 callbacks due to `key_count` usage */
1073static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1074 struct ieee80211_vif *vif,
1075 struct ieee80211_bss_conf *bss_conf)
1076{
1077 struct ath10k *ar = hw->priv;
1078 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1079 struct ieee80211_sta *ap_sta;
1080 int ret;
1081
1082 rcu_read_lock();
1083
1084 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
1085 if (!ap_sta) {
1086 ath10k_warn("Failed to find station entry for %pM\n",
1087 bss_conf->bssid);
1088 rcu_read_unlock();
1089 return;
1090 }
1091
1092 ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
1093 if (ret) {
1094 ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
1095 rcu_read_unlock();
1096 return;
1097 }
1098
1099 rcu_read_unlock();
1100
1101 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
1102 bss_conf->bssid);
1103 if (ret)
1104 ath10k_warn("VDEV: %d up failed: ret %d\n",
1105 arvif->vdev_id, ret);
1106 else
1107 ath10k_dbg(ATH10K_DBG_MAC,
1108 "VDEV: %d associated, BSSID: %pM, AID: %d\n",
1109 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
1110}
1111
1112/*
1113 * FIXME: flush TIDs
1114 */
1115static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1116 struct ieee80211_vif *vif)
1117{
1118 struct ath10k *ar = hw->priv;
1119 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1120 int ret;
1121
1122 /*
1123 * For some reason, calling VDEV-DOWN before VDEV-STOP
1124 * makes the FW to send frames via HTT after disassociation.
1125 * No idea why this happens, even though VDEV-DOWN is supposed
1126 * to be analogous to link down, so just stop the VDEV.
1127 */
1128 ret = ath10k_vdev_stop(arvif);
1129 if (!ret)
1130 ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
1131 arvif->vdev_id);
1132
1133 /*
1134 * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
1135 * report beacons from previously associated network through HTT.
1136 * This in turn would spam mac80211 WARN_ON if we bring down all
1137 * interfaces as it expects there is no rx when no interface is
1138 * running.
1139 */
1140 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1141 if (ret)
1142 ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
1143 arvif->vdev_id, ret);
1144
1145 ath10k_wmi_flush_tx(ar);
1146
1147 arvif->def_wep_key_index = 0;
1148}
1149
1150static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1151 struct ieee80211_sta *sta)
1152{
1153 int ret = 0;
1154
1155 ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
1156 if (ret) {
1157 ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
1158 return ret;
1159 }
1160
1161 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
1162 if (ret) {
1163 ath10k_warn("could not install peer wep keys (%d)\n", ret);
1164 return ret;
1165 }
1166
1167 return ret;
1168}
1169
1170static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
1171 struct ieee80211_sta *sta)
1172{
1173 int ret = 0;
1174
1175 ret = ath10k_clear_peer_keys(arvif, sta->addr);
1176 if (ret) {
1177 ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
1178 return ret;
1179 }
1180
1181 return ret;
1182}
1183
1184/**************/
1185/* Regulatory */
1186/**************/
1187
1188static int ath10k_update_channel_list(struct ath10k *ar)
1189{
1190 struct ieee80211_hw *hw = ar->hw;
1191 struct ieee80211_supported_band **bands;
1192 enum ieee80211_band band;
1193 struct ieee80211_channel *channel;
1194 struct wmi_scan_chan_list_arg arg = {0};
1195 struct wmi_channel_arg *ch;
1196 bool passive;
1197 int len;
1198 int ret;
1199 int i;
1200
1201 bands = hw->wiphy->bands;
1202 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1203 if (!bands[band])
1204 continue;
1205
1206 for (i = 0; i < bands[band]->n_channels; i++) {
1207 if (bands[band]->channels[i].flags &
1208 IEEE80211_CHAN_DISABLED)
1209 continue;
1210
1211 arg.n_channels++;
1212 }
1213 }
1214
1215 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
1216 arg.channels = kzalloc(len, GFP_KERNEL);
1217 if (!arg.channels)
1218 return -ENOMEM;
1219
1220 ch = arg.channels;
1221 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1222 if (!bands[band])
1223 continue;
1224
1225 for (i = 0; i < bands[band]->n_channels; i++) {
1226 channel = &bands[band]->channels[i];
1227
1228 if (channel->flags & IEEE80211_CHAN_DISABLED)
1229 continue;
1230
1231 ch->allow_ht = true;
1232
1233 /* FIXME: when should we really allow VHT? */
1234 ch->allow_vht = true;
1235
1236 ch->allow_ibss =
1237 !(channel->flags & IEEE80211_CHAN_NO_IBSS);
1238
1239 ch->ht40plus =
1240 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
1241
1242 passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN;
1243 ch->passive = passive;
1244
1245 ch->freq = channel->center_freq;
1246 ch->min_power = channel->max_power * 3;
1247 ch->max_power = channel->max_power * 4;
1248 ch->max_reg_power = channel->max_reg_power * 4;
1249 ch->max_antenna_gain = channel->max_antenna_gain;
1250 ch->reg_class_id = 0; /* FIXME */
1251
1252 /* FIXME: why use only legacy modes, why not any
1253 * HT/VHT modes? Would that even make any
1254 * difference? */
1255 if (channel->band == IEEE80211_BAND_2GHZ)
1256 ch->mode = MODE_11G;
1257 else
1258 ch->mode = MODE_11A;
1259
1260 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
1261 continue;
1262
1263 ath10k_dbg(ATH10K_DBG_WMI,
1264 "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
1265 __func__, ch - arg.channels, arg.n_channels,
1266 ch->freq, ch->max_power, ch->max_reg_power,
1267 ch->max_antenna_gain, ch->mode);
1268
1269 ch++;
1270 }
1271 }
1272
1273 ret = ath10k_wmi_scan_chan_list(ar, &arg);
1274 kfree(arg.channels);
1275
1276 return ret;
1277}
1278
1279static void ath10k_reg_notifier(struct wiphy *wiphy,
1280 struct regulatory_request *request)
1281{
1282 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1283 struct reg_dmn_pair_mapping *regpair;
1284 struct ath10k *ar = hw->priv;
1285 int ret;
1286
1287 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
1288
1289 ret = ath10k_update_channel_list(ar);
1290 if (ret)
1291 ath10k_warn("could not update channel list (%d)\n", ret);
1292
1293 regpair = ar->ath_common.regulatory.regpair;
1294 /* Target allows setting up per-band regdomain but ath_common provides
1295 * a combined one only */
1296 ret = ath10k_wmi_pdev_set_regdomain(ar,
1297 regpair->regDmnEnum,
1298 regpair->regDmnEnum, /* 2ghz */
1299 regpair->regDmnEnum, /* 5ghz */
1300 regpair->reg_2ghz_ctl,
1301 regpair->reg_5ghz_ctl);
1302 if (ret)
1303 ath10k_warn("could not set pdev regdomain (%d)\n", ret);
1304}
1305
1306/***************/
1307/* TX handlers */
1308/***************/
1309
1310/*
1311 * Frames sent to the FW have to be in "Native Wifi" format.
1312 * Strip the QoS field from the 802.11 header.
1313 */
1314static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
1315 struct ieee80211_tx_control *control,
1316 struct sk_buff *skb)
1317{
1318 struct ieee80211_hdr *hdr = (void *)skb->data;
1319 u8 *qos_ctl;
1320
1321 if (!ieee80211_is_data_qos(hdr->frame_control))
1322 return;
1323
1324 qos_ctl = ieee80211_get_qos_ctl(hdr);
1325 memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
1326 skb->len - ieee80211_hdrlen(hdr->frame_control));
1327 skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
1328}
1329
1330static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
1331{
1332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1333 struct ieee80211_vif *vif = info->control.vif;
1334 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1335 struct ath10k *ar = arvif->ar;
1336 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1337 struct ieee80211_key_conf *key = info->control.hw_key;
1338 int ret;
1339
1340 /* TODO AP mode should be implemented */
1341 if (vif->type != NL80211_IFTYPE_STATION)
1342 return;
1343
1344 if (!ieee80211_has_protected(hdr->frame_control))
1345 return;
1346
1347 if (!key)
1348 return;
1349
1350 if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
1351 key->cipher != WLAN_CIPHER_SUITE_WEP104)
1352 return;
1353
1354 if (key->keyidx == arvif->def_wep_key_index)
1355 return;
1356
1357 ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
1358
1359 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
1360 WMI_VDEV_PARAM_DEF_KEYID,
1361 key->keyidx);
1362 if (ret) {
1363 ath10k_warn("could not update wep keyidx (%d)\n", ret);
1364 return;
1365 }
1366
1367 arvif->def_wep_key_index = key->keyidx;
1368}
1369
1370static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
1371{
1372 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1373 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1374 struct ieee80211_vif *vif = info->control.vif;
1375 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1376
1377 /* This is case only for P2P_GO */
1378 if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
1379 arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
1380 return;
1381
1382 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
1383 spin_lock_bh(&ar->data_lock);
1384 if (arvif->u.ap.noa_data)
1385 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
1386 GFP_ATOMIC))
1387 memcpy(skb_put(skb, arvif->u.ap.noa_len),
1388 arvif->u.ap.noa_data,
1389 arvif->u.ap.noa_len);
1390 spin_unlock_bh(&ar->data_lock);
1391 }
1392}
1393
1394static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1395{
1396 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1397 int ret;
1398
1399 if (ieee80211_is_mgmt(hdr->frame_control))
1400 ret = ath10k_htt_mgmt_tx(ar->htt, skb);
1401 else if (ieee80211_is_nullfunc(hdr->frame_control))
1402 /* FW does not report tx status properly for NullFunc frames
1403 * unless they are sent through mgmt tx path. mac80211 sends
1404 * those frames when it detects link/beacon loss and depends on
1405 * the tx status to be correct. */
1406 ret = ath10k_htt_mgmt_tx(ar->htt, skb);
1407 else
1408 ret = ath10k_htt_tx(ar->htt, skb);
1409
1410 if (ret) {
1411 ath10k_warn("tx failed (%d). dropping packet.\n", ret);
1412 ieee80211_free_txskb(ar->hw, skb);
1413 }
1414}
1415
1416void ath10k_offchan_tx_purge(struct ath10k *ar)
1417{
1418 struct sk_buff *skb;
1419
1420 for (;;) {
1421 skb = skb_dequeue(&ar->offchan_tx_queue);
1422 if (!skb)
1423 break;
1424
1425 ieee80211_free_txskb(ar->hw, skb);
1426 }
1427}
1428
1429void ath10k_offchan_tx_work(struct work_struct *work)
1430{
1431 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
1432 struct ath10k_peer *peer;
1433 struct ieee80211_hdr *hdr;
1434 struct sk_buff *skb;
1435 const u8 *peer_addr;
1436 int vdev_id;
1437 int ret;
1438
1439 /* FW requirement: We must create a peer before FW will send out
1440 * an offchannel frame. Otherwise the frame will be stuck and
1441 * never transmitted. We delete the peer upon tx completion.
1442 * It is unlikely that a peer for offchannel tx will already be
1443 * present. However it may be in some rare cases so account for that.
1444 * Otherwise we might remove a legitimate peer and break stuff. */
1445
1446 for (;;) {
1447 skb = skb_dequeue(&ar->offchan_tx_queue);
1448 if (!skb)
1449 break;
1450
1451 mutex_lock(&ar->conf_mutex);
1452
1453 ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
1454 skb);
1455
1456 hdr = (struct ieee80211_hdr *)skb->data;
1457 peer_addr = ieee80211_get_DA(hdr);
1458 vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
1459
1460 spin_lock_bh(&ar->data_lock);
1461 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
1462 spin_unlock_bh(&ar->data_lock);
1463
1464 if (peer)
1465 ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
1466 peer_addr, vdev_id);
1467
1468 if (!peer) {
1469 ret = ath10k_peer_create(ar, vdev_id, peer_addr);
1470 if (ret)
1471 ath10k_warn("peer %pM on vdev %d not created (%d)\n",
1472 peer_addr, vdev_id, ret);
1473 }
1474
1475 spin_lock_bh(&ar->data_lock);
1476 INIT_COMPLETION(ar->offchan_tx_completed);
1477 ar->offchan_tx_skb = skb;
1478 spin_unlock_bh(&ar->data_lock);
1479
1480 ath10k_tx_htt(ar, skb);
1481
1482 ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
1483 3 * HZ);
1484 if (ret <= 0)
1485 ath10k_warn("timed out waiting for offchannel skb %p\n",
1486 skb);
1487
1488 if (!peer) {
1489 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
1490 if (ret)
1491 ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
1492 peer_addr, vdev_id, ret);
1493 }
1494
1495 mutex_unlock(&ar->conf_mutex);
1496 }
1497}
1498
1499/************/
1500/* Scanning */
1501/************/
1502
1503/*
1504 * This gets called if we dont get a heart-beat during scan.
1505 * This may indicate the FW has hung and we need to abort the
1506 * scan manually to prevent cancel_hw_scan() from deadlocking
1507 */
1508void ath10k_reset_scan(unsigned long ptr)
1509{
1510 struct ath10k *ar = (struct ath10k *)ptr;
1511
1512 spin_lock_bh(&ar->data_lock);
1513 if (!ar->scan.in_progress) {
1514 spin_unlock_bh(&ar->data_lock);
1515 return;
1516 }
1517
1518 ath10k_warn("scan timeout. resetting. fw issue?\n");
1519
1520 if (ar->scan.is_roc)
1521 ieee80211_remain_on_channel_expired(ar->hw);
1522 else
1523 ieee80211_scan_completed(ar->hw, 1 /* aborted */);
1524
1525 ar->scan.in_progress = false;
1526 complete_all(&ar->scan.completed);
1527 spin_unlock_bh(&ar->data_lock);
1528}
1529
1530static int ath10k_abort_scan(struct ath10k *ar)
1531{
1532 struct wmi_stop_scan_arg arg = {
1533 .req_id = 1, /* FIXME */
1534 .req_type = WMI_SCAN_STOP_ONE,
1535 .u.scan_id = ATH10K_SCAN_ID,
1536 };
1537 int ret;
1538
1539 lockdep_assert_held(&ar->conf_mutex);
1540
1541 del_timer_sync(&ar->scan.timeout);
1542
1543 spin_lock_bh(&ar->data_lock);
1544 if (!ar->scan.in_progress) {
1545 spin_unlock_bh(&ar->data_lock);
1546 return 0;
1547 }
1548
1549 ar->scan.aborting = true;
1550 spin_unlock_bh(&ar->data_lock);
1551
1552 ret = ath10k_wmi_stop_scan(ar, &arg);
1553 if (ret) {
1554 ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
1555 return -EIO;
1556 }
1557
1558 ath10k_wmi_flush_tx(ar);
1559
1560 ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
1561 if (ret == 0)
1562 ath10k_warn("timed out while waiting for scan to stop\n");
1563
1564 /* scan completion may be done right after we timeout here, so let's
1565 * check the in_progress and tell mac80211 scan is completed. if we
1566 * don't do that and FW fails to send us scan completion indication
1567 * then userspace won't be able to scan anymore */
1568 ret = 0;
1569
1570 spin_lock_bh(&ar->data_lock);
1571 if (ar->scan.in_progress) {
1572 ath10k_warn("could not stop scan. its still in progress\n");
1573 ar->scan.in_progress = false;
1574 ath10k_offchan_tx_purge(ar);
1575 ret = -ETIMEDOUT;
1576 }
1577 spin_unlock_bh(&ar->data_lock);
1578
1579 return ret;
1580}
1581
1582static int ath10k_start_scan(struct ath10k *ar,
1583 const struct wmi_start_scan_arg *arg)
1584{
1585 int ret;
1586
1587 lockdep_assert_held(&ar->conf_mutex);
1588
1589 ret = ath10k_wmi_start_scan(ar, arg);
1590 if (ret)
1591 return ret;
1592
1593 /* make sure we submit the command so the completion
1594 * timeout makes sense */
1595 ath10k_wmi_flush_tx(ar);
1596
1597 ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
1598 if (ret == 0) {
1599 ath10k_abort_scan(ar);
1600 return ret;
1601 }
1602
1603 /* the scan can complete earlier, before we even
1604 * start the timer. in that case the timer handler
1605 * checks ar->scan.in_progress and bails out if its
1606 * false. Add a 200ms margin to account event/command
1607 * processing. */
1608 mod_timer(&ar->scan.timeout, jiffies +
1609 msecs_to_jiffies(arg->max_scan_time+200));
1610 return 0;
1611}
1612
1613/**********************/
1614/* mac80211 callbacks */
1615/**********************/
1616
1617static void ath10k_tx(struct ieee80211_hw *hw,
1618 struct ieee80211_tx_control *control,
1619 struct sk_buff *skb)
1620{
1621 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1622 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1623 struct ath10k *ar = hw->priv;
1624 struct ath10k_vif *arvif = NULL;
1625 u32 vdev_id = 0;
1626 u8 tid;
1627
1628 if (info->control.vif) {
1629 arvif = ath10k_vif_to_arvif(info->control.vif);
1630 vdev_id = arvif->vdev_id;
1631 } else if (ar->monitor_enabled) {
1632 vdev_id = ar->monitor_vdev_id;
1633 }
1634
1635 /* We should disable CCK RATE due to P2P */
1636 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
1637 ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
1638
1639 /* we must calculate tid before we apply qos workaround
1640 * as we'd lose the qos control field */
1641 tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1642 if (ieee80211_is_data_qos(hdr->frame_control) &&
1643 is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
1644 u8 *qc = ieee80211_get_qos_ctl(hdr);
1645 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1646 }
1647
1648 ath10k_tx_h_qos_workaround(hw, control, skb);
1649 ath10k_tx_h_update_wep_key(skb);
1650 ath10k_tx_h_add_p2p_noa_ie(ar, skb);
1651 ath10k_tx_h_seq_no(skb);
1652
1653 memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
1654 ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
1655 ATH10K_SKB_CB(skb)->htt.tid = tid;
1656
1657 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1658 spin_lock_bh(&ar->data_lock);
1659 ATH10K_SKB_CB(skb)->htt.is_offchan = true;
1660 ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
1661 spin_unlock_bh(&ar->data_lock);
1662
1663 ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
1664
1665 skb_queue_tail(&ar->offchan_tx_queue, skb);
1666 ieee80211_queue_work(hw, &ar->offchan_tx_work);
1667 return;
1668 }
1669
1670 ath10k_tx_htt(ar, skb);
1671}
1672
1673/*
1674 * Initialize various parameters with default vaules.
1675 */
1676static int ath10k_start(struct ieee80211_hw *hw)
1677{
1678 struct ath10k *ar = hw->priv;
1679 int ret;
1680
1681 ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
1682 if (ret)
1683 ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
1684 ret);
1685
1686 ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
1687 if (ret)
1688 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
1689 ret);
1690
1691 return 0;
1692}
1693
1694static void ath10k_stop(struct ieee80211_hw *hw)
1695{
1696 struct ath10k *ar = hw->priv;
1697
1698 /* avoid leaks in case FW never confirms scan for offchannel */
1699 cancel_work_sync(&ar->offchan_tx_work);
1700 ath10k_offchan_tx_purge(ar);
1701}
1702
1703static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
1704{
1705 struct ath10k_generic_iter ar_iter;
1706 struct ath10k *ar = hw->priv;
1707 struct ieee80211_conf *conf = &hw->conf;
1708 int ret = 0;
1709 u32 flags;
1710
1711 mutex_lock(&ar->conf_mutex);
1712
1713 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1714 ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
1715 conf->chandef.chan->center_freq);
1716 spin_lock_bh(&ar->data_lock);
1717 ar->rx_channel = conf->chandef.chan;
1718 spin_unlock_bh(&ar->data_lock);
1719 }
1720
1721 if (changed & IEEE80211_CONF_CHANGE_PS) {
1722 memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
1723 ar_iter.ar = ar;
1724 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
1725
1726 ieee80211_iterate_active_interfaces_atomic(hw,
1727 flags,
1728 ath10k_ps_iter,
1729 &ar_iter);
1730
1731 ret = ar_iter.ret;
1732 }
1733
1734 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1735 if (conf->flags & IEEE80211_CONF_MONITOR)
1736 ret = ath10k_monitor_create(ar);
1737 else
1738 ret = ath10k_monitor_destroy(ar);
1739 }
1740
1741 mutex_unlock(&ar->conf_mutex);
1742 return ret;
1743}
1744
1745/*
1746 * TODO:
1747 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
1748 * because we will send mgmt frames without CCK. This requirement
1749 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
1750 * in the TX packet.
1751 */
1752static int ath10k_add_interface(struct ieee80211_hw *hw,
1753 struct ieee80211_vif *vif)
1754{
1755 struct ath10k *ar = hw->priv;
1756 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1757 enum wmi_sta_powersave_param param;
1758 int ret = 0;
1759 u32 value;
1760 int bit;
1761
1762 mutex_lock(&ar->conf_mutex);
1763
1764 arvif->ar = ar;
1765 arvif->vif = vif;
1766
1767 if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
1768 ath10k_warn("Only one monitor interface allowed\n");
1769 ret = -EBUSY;
1770 goto exit;
1771 }
1772
1773 bit = ffs(ar->free_vdev_map);
1774 if (bit == 0) {
1775 ret = -EBUSY;
1776 goto exit;
1777 }
1778
1779 arvif->vdev_id = bit - 1;
1780 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
1781 ar->free_vdev_map &= ~(1 << arvif->vdev_id);
1782
1783 if (ar->p2p)
1784 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
1785
1786 switch (vif->type) {
1787 case NL80211_IFTYPE_UNSPECIFIED:
1788 case NL80211_IFTYPE_STATION:
1789 arvif->vdev_type = WMI_VDEV_TYPE_STA;
1790 if (vif->p2p)
1791 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
1792 break;
1793 case NL80211_IFTYPE_ADHOC:
1794 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
1795 break;
1796 case NL80211_IFTYPE_AP:
1797 arvif->vdev_type = WMI_VDEV_TYPE_AP;
1798
1799 if (vif->p2p)
1800 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
1801 break;
1802 case NL80211_IFTYPE_MONITOR:
1803 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
1804 break;
1805 default:
1806 WARN_ON(1);
1807 break;
1808 }
1809
1810 ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
1811 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
1812
1813 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
1814 arvif->vdev_subtype, vif->addr);
1815 if (ret) {
1816 ath10k_warn("WMI vdev create failed: ret %d\n", ret);
1817 goto exit;
1818 }
1819
1820 ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
1821 arvif->def_wep_key_index);
1822 if (ret)
1823 ath10k_warn("Failed to set default keyid: %d\n", ret);
1824
1825 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
1826 WMI_VDEV_PARAM_TX_ENCAP_TYPE,
1827 ATH10K_HW_TXRX_NATIVE_WIFI);
1828 if (ret)
1829 ath10k_warn("Failed to set TX encap: %d\n", ret);
1830
1831 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1832 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
1833 if (ret) {
1834 ath10k_warn("Failed to create peer for AP: %d\n", ret);
1835 goto exit;
1836 }
1837 }
1838
1839 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
1840 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
1841 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
1842 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1843 param, value);
1844 if (ret)
1845 ath10k_warn("Failed to set RX wake policy: %d\n", ret);
1846
1847 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1848 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1849 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1850 param, value);
1851 if (ret)
1852 ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
1853
1854 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1855 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1856 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1857 param, value);
1858 if (ret)
1859 ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
1860 }
1861
1862 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
1863 ar->monitor_present = true;
1864
1865exit:
1866 mutex_unlock(&ar->conf_mutex);
1867 return ret;
1868}
1869
1870static void ath10k_remove_interface(struct ieee80211_hw *hw,
1871 struct ieee80211_vif *vif)
1872{
1873 struct ath10k *ar = hw->priv;
1874 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1875 int ret;
1876
1877 mutex_lock(&ar->conf_mutex);
1878
1879 ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
1880
1881 ar->free_vdev_map |= 1 << (arvif->vdev_id);
1882
1883 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1884 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
1885 if (ret)
1886 ath10k_warn("Failed to remove peer for AP: %d\n", ret);
1887
1888 kfree(arvif->u.ap.noa_data);
1889 }
1890
1891 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
1892 if (ret)
1893 ath10k_warn("WMI vdev delete failed: %d\n", ret);
1894
1895 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
1896 ar->monitor_present = false;
1897
1898 ath10k_peer_cleanup(ar, arvif->vdev_id);
1899
1900 mutex_unlock(&ar->conf_mutex);
1901}
1902
1903/*
1904 * FIXME: Has to be verified.
1905 */
1906#define SUPPORTED_FILTERS \
1907 (FIF_PROMISC_IN_BSS | \
1908 FIF_ALLMULTI | \
1909 FIF_CONTROL | \
1910 FIF_PSPOLL | \
1911 FIF_OTHER_BSS | \
1912 FIF_BCN_PRBRESP_PROMISC | \
1913 FIF_PROBE_REQ | \
1914 FIF_FCSFAIL)
1915
1916static void ath10k_configure_filter(struct ieee80211_hw *hw,
1917 unsigned int changed_flags,
1918 unsigned int *total_flags,
1919 u64 multicast)
1920{
1921 struct ath10k *ar = hw->priv;
1922 int ret;
1923
1924 mutex_lock(&ar->conf_mutex);
1925
1926 changed_flags &= SUPPORTED_FILTERS;
1927 *total_flags &= SUPPORTED_FILTERS;
1928 ar->filter_flags = *total_flags;
1929
1930 if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
1931 !ar->monitor_enabled) {
1932 ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
1933 if (ret)
1934 ath10k_warn("Unable to start monitor mode\n");
1935 else
1936 ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
1937 } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
1938 ar->monitor_enabled) {
1939 ret = ath10k_monitor_stop(ar);
1940 if (ret)
1941 ath10k_warn("Unable to stop monitor mode\n");
1942 else
1943 ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
1944 }
1945
1946 mutex_unlock(&ar->conf_mutex);
1947}
1948
1949static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
1950 struct ieee80211_vif *vif,
1951 struct ieee80211_bss_conf *info,
1952 u32 changed)
1953{
1954 struct ath10k *ar = hw->priv;
1955 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1956 int ret = 0;
1957
1958 mutex_lock(&ar->conf_mutex);
1959
1960 if (changed & BSS_CHANGED_IBSS)
1961 ath10k_control_ibss(arvif, info, vif->addr);
1962
1963 if (changed & BSS_CHANGED_BEACON_INT) {
1964 arvif->beacon_interval = info->beacon_int;
1965 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
1966 WMI_VDEV_PARAM_BEACON_INTERVAL,
1967 arvif->beacon_interval);
1968 if (ret)
1969 ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
1970 arvif->vdev_id);
1971 else
1972 ath10k_dbg(ATH10K_DBG_MAC,
1973 "Beacon interval: %d set for VDEV: %d\n",
1974 arvif->beacon_interval, arvif->vdev_id);
1975 }
1976
1977 if (changed & BSS_CHANGED_BEACON) {
1978 ret = ath10k_wmi_pdev_set_param(ar,
1979 WMI_PDEV_PARAM_BEACON_TX_MODE,
1980 WMI_BEACON_STAGGERED_MODE);
1981 if (ret)
1982 ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
1983 arvif->vdev_id);
1984 else
1985 ath10k_dbg(ATH10K_DBG_MAC,
1986 "Set staggered beacon mode for VDEV: %d\n",
1987 arvif->vdev_id);
1988 }
1989
1990 if (changed & BSS_CHANGED_BEACON_INFO) {
1991 arvif->dtim_period = info->dtim_period;
1992
1993 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
1994 WMI_VDEV_PARAM_DTIM_PERIOD,
1995 arvif->dtim_period);
1996 if (ret)
1997 ath10k_warn("Failed to set dtim period for VDEV: %d\n",
1998 arvif->vdev_id);
1999 else
2000 ath10k_dbg(ATH10K_DBG_MAC,
2001 "Set dtim period: %d for VDEV: %d\n",
2002 arvif->dtim_period, arvif->vdev_id);
2003 }
2004
2005 if (changed & BSS_CHANGED_SSID &&
2006 vif->type == NL80211_IFTYPE_AP) {
2007 arvif->u.ap.ssid_len = info->ssid_len;
2008 if (info->ssid_len)
2009 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
2010 arvif->u.ap.hidden_ssid = info->hidden_ssid;
2011 }
2012
2013 if (changed & BSS_CHANGED_BSSID) {
2014 if (!is_zero_ether_addr(info->bssid)) {
2015 ret = ath10k_peer_create(ar, arvif->vdev_id,
2016 info->bssid);
2017 if (ret)
2018 ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
2019 info->bssid, arvif->vdev_id);
2020 else
2021 ath10k_dbg(ATH10K_DBG_MAC,
2022 "Added peer: %pM for VDEV: %d\n",
2023 info->bssid, arvif->vdev_id);
2024
2025
2026 if (vif->type == NL80211_IFTYPE_STATION) {
2027 /*
2028 * this is never erased as we it for crypto key
2029 * clearing; this is FW requirement
2030 */
2031 memcpy(arvif->u.sta.bssid, info->bssid,
2032 ETH_ALEN);
2033
2034 ret = ath10k_vdev_start(arvif);
2035 if (!ret)
2036 ath10k_dbg(ATH10K_DBG_MAC,
2037 "VDEV: %d started with BSSID: %pM\n",
2038 arvif->vdev_id, info->bssid);
2039 }
2040
2041 /*
2042 * Mac80211 does not keep IBSS bssid when leaving IBSS,
2043 * so driver need to store it. It is needed when leaving
2044 * IBSS in order to remove BSSID peer.
2045 */
2046 if (vif->type == NL80211_IFTYPE_ADHOC)
2047 memcpy(arvif->u.ibss.bssid, info->bssid,
2048 ETH_ALEN);
2049 }
2050 }
2051
2052 if (changed & BSS_CHANGED_BEACON_ENABLED)
2053 ath10k_control_beaconing(arvif, info);
2054
2055 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2056 u32 cts_prot;
2057 if (info->use_cts_prot)
2058 cts_prot = 1;
2059 else
2060 cts_prot = 0;
2061
2062 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
2063 WMI_VDEV_PARAM_ENABLE_RTSCTS,
2064 cts_prot);
2065 if (ret)
2066 ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
2067 arvif->vdev_id);
2068 else
2069 ath10k_dbg(ATH10K_DBG_MAC,
2070 "Set CTS prot: %d for VDEV: %d\n",
2071 cts_prot, arvif->vdev_id);
2072 }
2073
2074 if (changed & BSS_CHANGED_ERP_SLOT) {
2075 u32 slottime;
2076 if (info->use_short_slot)
2077 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
2078
2079 else
2080 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
2081
2082 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
2083 WMI_VDEV_PARAM_SLOT_TIME,
2084 slottime);
2085 if (ret)
2086 ath10k_warn("Failed to set erp slot for VDEV: %d\n",
2087 arvif->vdev_id);
2088 else
2089 ath10k_dbg(ATH10K_DBG_MAC,
2090 "Set slottime: %d for VDEV: %d\n",
2091 slottime, arvif->vdev_id);
2092 }
2093
2094 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2095 u32 preamble;
2096 if (info->use_short_preamble)
2097 preamble = WMI_VDEV_PREAMBLE_SHORT;
2098 else
2099 preamble = WMI_VDEV_PREAMBLE_LONG;
2100
2101 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
2102 WMI_VDEV_PARAM_PREAMBLE,
2103 preamble);
2104 if (ret)
2105 ath10k_warn("Failed to set preamble for VDEV: %d\n",
2106 arvif->vdev_id);
2107 else
2108 ath10k_dbg(ATH10K_DBG_MAC,
2109 "Set preamble: %d for VDEV: %d\n",
2110 preamble, arvif->vdev_id);
2111 }
2112
2113 if (changed & BSS_CHANGED_ASSOC) {
2114 if (info->assoc)
2115 ath10k_bss_assoc(hw, vif, info);
2116 }
2117
2118 mutex_unlock(&ar->conf_mutex);
2119}
2120
2121static int ath10k_hw_scan(struct ieee80211_hw *hw,
2122 struct ieee80211_vif *vif,
2123 struct cfg80211_scan_request *req)
2124{
2125 struct ath10k *ar = hw->priv;
2126 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2127 struct wmi_start_scan_arg arg;
2128 int ret = 0;
2129 int i;
2130
2131 mutex_lock(&ar->conf_mutex);
2132
2133 spin_lock_bh(&ar->data_lock);
2134 if (ar->scan.in_progress) {
2135 spin_unlock_bh(&ar->data_lock);
2136 ret = -EBUSY;
2137 goto exit;
2138 }
2139
2140 INIT_COMPLETION(ar->scan.started);
2141 INIT_COMPLETION(ar->scan.completed);
2142 ar->scan.in_progress = true;
2143 ar->scan.aborting = false;
2144 ar->scan.is_roc = false;
2145 ar->scan.vdev_id = arvif->vdev_id;
2146 spin_unlock_bh(&ar->data_lock);
2147
2148 memset(&arg, 0, sizeof(arg));
2149 ath10k_wmi_start_scan_init(ar, &arg);
2150 arg.vdev_id = arvif->vdev_id;
2151 arg.scan_id = ATH10K_SCAN_ID;
2152
2153 if (!req->no_cck)
2154 arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
2155
2156 if (req->ie_len) {
2157 arg.ie_len = req->ie_len;
2158 memcpy(arg.ie, req->ie, arg.ie_len);
2159 }
2160
2161 if (req->n_ssids) {
2162 arg.n_ssids = req->n_ssids;
2163 for (i = 0; i < arg.n_ssids; i++) {
2164 arg.ssids[i].len = req->ssids[i].ssid_len;
2165 arg.ssids[i].ssid = req->ssids[i].ssid;
2166 }
2167 }
2168
2169 if (req->n_channels) {
2170 arg.n_channels = req->n_channels;
2171 for (i = 0; i < arg.n_channels; i++)
2172 arg.channels[i] = req->channels[i]->center_freq;
2173 }
2174
2175 ret = ath10k_start_scan(ar, &arg);
2176 if (ret) {
2177 ath10k_warn("could not start hw scan (%d)\n", ret);
2178 spin_lock_bh(&ar->data_lock);
2179 ar->scan.in_progress = false;
2180 spin_unlock_bh(&ar->data_lock);
2181 }
2182
2183exit:
2184 mutex_unlock(&ar->conf_mutex);
2185 return ret;
2186}
2187
2188static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
2189 struct ieee80211_vif *vif)
2190{
2191 struct ath10k *ar = hw->priv;
2192 int ret;
2193
2194 mutex_lock(&ar->conf_mutex);
2195 ret = ath10k_abort_scan(ar);
2196 if (ret) {
2197 ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
2198 ret);
2199 ieee80211_scan_completed(hw, 1 /* aborted */);
2200 }
2201 mutex_unlock(&ar->conf_mutex);
2202}
2203
2204static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2205 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2206 struct ieee80211_key_conf *key)
2207{
2208 struct ath10k *ar = hw->priv;
2209 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2210 struct ath10k_peer *peer;
2211 const u8 *peer_addr;
2212 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2213 key->cipher == WLAN_CIPHER_SUITE_WEP104;
2214 int ret = 0;
2215
2216 if (key->keyidx > WMI_MAX_KEY_INDEX)
2217 return -ENOSPC;
2218
2219 mutex_lock(&ar->conf_mutex);
2220
2221 if (sta)
2222 peer_addr = sta->addr;
2223 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
2224 peer_addr = vif->bss_conf.bssid;
2225 else
2226 peer_addr = vif->addr;
2227
2228 key->hw_key_idx = key->keyidx;
2229
2230 /* the peer should not disappear in mid-way (unless FW goes awry) since
2231 * we already hold conf_mutex. we just make sure its there now. */
2232 spin_lock_bh(&ar->data_lock);
2233 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
2234 spin_unlock_bh(&ar->data_lock);
2235
2236 if (!peer) {
2237 if (cmd == SET_KEY) {
2238 ath10k_warn("cannot install key for non-existent peer %pM\n",
2239 peer_addr);
2240 ret = -EOPNOTSUPP;
2241 goto exit;
2242 } else {
2243 /* if the peer doesn't exist there is no key to disable
2244 * anymore */
2245 goto exit;
2246 }
2247 }
2248
2249 if (is_wep) {
2250 if (cmd == SET_KEY)
2251 arvif->wep_keys[key->keyidx] = key;
2252 else
2253 arvif->wep_keys[key->keyidx] = NULL;
2254
2255 if (cmd == DISABLE_KEY)
2256 ath10k_clear_vdev_key(arvif, key);
2257 }
2258
2259 ret = ath10k_install_key(arvif, key, cmd, peer_addr);
2260 if (ret) {
2261 ath10k_warn("ath10k_install_key failed (%d)\n", ret);
2262 goto exit;
2263 }
2264
2265 spin_lock_bh(&ar->data_lock);
2266 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
2267 if (peer && cmd == SET_KEY)
2268 peer->keys[key->keyidx] = key;
2269 else if (peer && cmd == DISABLE_KEY)
2270 peer->keys[key->keyidx] = NULL;
2271 else if (peer == NULL)
2272 /* impossible unless FW goes crazy */
2273 ath10k_warn("peer %pM disappeared!\n", peer_addr);
2274 spin_unlock_bh(&ar->data_lock);
2275
2276exit:
2277 mutex_unlock(&ar->conf_mutex);
2278 return ret;
2279}
2280
2281static int ath10k_sta_state(struct ieee80211_hw *hw,
2282 struct ieee80211_vif *vif,
2283 struct ieee80211_sta *sta,
2284 enum ieee80211_sta_state old_state,
2285 enum ieee80211_sta_state new_state)
2286{
2287 struct ath10k *ar = hw->priv;
2288 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2289 int ret = 0;
2290
2291 mutex_lock(&ar->conf_mutex);
2292
2293 if (old_state == IEEE80211_STA_NOTEXIST &&
2294 new_state == IEEE80211_STA_NONE &&
2295 vif->type != NL80211_IFTYPE_STATION) {
2296 /*
2297 * New station addition.
2298 */
2299 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
2300 if (ret)
2301 ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
2302 sta->addr, arvif->vdev_id);
2303 else
2304 ath10k_dbg(ATH10K_DBG_MAC,
2305 "Added peer: %pM for VDEV: %d\n",
2306 sta->addr, arvif->vdev_id);
2307 } else if ((old_state == IEEE80211_STA_NONE &&
2308 new_state == IEEE80211_STA_NOTEXIST)) {
2309 /*
2310 * Existing station deletion.
2311 */
2312 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
2313 if (ret)
2314 ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
2315 sta->addr, arvif->vdev_id);
2316 else
2317 ath10k_dbg(ATH10K_DBG_MAC,
2318 "Removed peer: %pM for VDEV: %d\n",
2319 sta->addr, arvif->vdev_id);
2320
2321 if (vif->type == NL80211_IFTYPE_STATION)
2322 ath10k_bss_disassoc(hw, vif);
2323 } else if (old_state == IEEE80211_STA_AUTH &&
2324 new_state == IEEE80211_STA_ASSOC &&
2325 (vif->type == NL80211_IFTYPE_AP ||
2326 vif->type == NL80211_IFTYPE_ADHOC)) {
2327 /*
2328 * New association.
2329 */
2330 ret = ath10k_station_assoc(ar, arvif, sta);
2331 if (ret)
2332 ath10k_warn("Failed to associate station: %pM\n",
2333 sta->addr);
2334 else
2335 ath10k_dbg(ATH10K_DBG_MAC,
2336 "Station %pM moved to assoc state\n",
2337 sta->addr);
2338 } else if (old_state == IEEE80211_STA_ASSOC &&
2339 new_state == IEEE80211_STA_AUTH &&
2340 (vif->type == NL80211_IFTYPE_AP ||
2341 vif->type == NL80211_IFTYPE_ADHOC)) {
2342 /*
2343 * Disassociation.
2344 */
2345 ret = ath10k_station_disassoc(ar, arvif, sta);
2346 if (ret)
2347 ath10k_warn("Failed to disassociate station: %pM\n",
2348 sta->addr);
2349 else
2350 ath10k_dbg(ATH10K_DBG_MAC,
2351 "Station %pM moved to disassociated state\n",
2352 sta->addr);
2353 }
2354
2355 mutex_unlock(&ar->conf_mutex);
2356 return ret;
2357}
2358
2359static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
2360 u16 ac, bool enable)
2361{
2362 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2363 u32 value = 0;
2364 int ret = 0;
2365
2366 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
2367 return 0;
2368
2369 switch (ac) {
2370 case IEEE80211_AC_VO:
2371 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
2372 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
2373 break;
2374 case IEEE80211_AC_VI:
2375 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
2376 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
2377 break;
2378 case IEEE80211_AC_BE:
2379 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
2380 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
2381 break;
2382 case IEEE80211_AC_BK:
2383 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
2384 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
2385 break;
2386 }
2387
2388 if (enable)
2389 arvif->u.sta.uapsd |= value;
2390 else
2391 arvif->u.sta.uapsd &= ~value;
2392
2393 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2394 WMI_STA_PS_PARAM_UAPSD,
2395 arvif->u.sta.uapsd);
2396 if (ret) {
2397 ath10k_warn("could not set uapsd params %d\n", ret);
2398 goto exit;
2399 }
2400
2401 if (arvif->u.sta.uapsd)
2402 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
2403 else
2404 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
2405
2406 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2407 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
2408 value);
2409 if (ret)
2410 ath10k_warn("could not set rx wake param %d\n", ret);
2411
2412exit:
2413 return ret;
2414}
2415
2416static int ath10k_conf_tx(struct ieee80211_hw *hw,
2417 struct ieee80211_vif *vif, u16 ac,
2418 const struct ieee80211_tx_queue_params *params)
2419{
2420 struct ath10k *ar = hw->priv;
2421 struct wmi_wmm_params_arg *p = NULL;
2422 int ret;
2423
2424 mutex_lock(&ar->conf_mutex);
2425
2426 switch (ac) {
2427 case IEEE80211_AC_VO:
2428 p = &ar->wmm_params.ac_vo;
2429 break;
2430 case IEEE80211_AC_VI:
2431 p = &ar->wmm_params.ac_vi;
2432 break;
2433 case IEEE80211_AC_BE:
2434 p = &ar->wmm_params.ac_be;
2435 break;
2436 case IEEE80211_AC_BK:
2437 p = &ar->wmm_params.ac_bk;
2438 break;
2439 }
2440
2441 if (WARN_ON(!p)) {
2442 ret = -EINVAL;
2443 goto exit;
2444 }
2445
2446 p->cwmin = params->cw_min;
2447 p->cwmax = params->cw_max;
2448 p->aifs = params->aifs;
2449
2450 /*
2451 * The channel time duration programmed in the HW is in absolute
2452 * microseconds, while mac80211 gives the txop in units of
2453 * 32 microseconds.
2454 */
2455 p->txop = params->txop * 32;
2456
2457 /* FIXME: FW accepts wmm params per hw, not per vif */
2458 ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
2459 if (ret) {
2460 ath10k_warn("could not set wmm params %d\n", ret);
2461 goto exit;
2462 }
2463
2464 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
2465 if (ret)
2466 ath10k_warn("could not set sta uapsd %d\n", ret);
2467
2468exit:
2469 mutex_unlock(&ar->conf_mutex);
2470 return ret;
2471}
2472
2473#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
2474
2475static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
2476 struct ieee80211_vif *vif,
2477 struct ieee80211_channel *chan,
2478 int duration,
2479 enum ieee80211_roc_type type)
2480{
2481 struct ath10k *ar = hw->priv;
2482 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2483 struct wmi_start_scan_arg arg;
2484 int ret;
2485
2486 mutex_lock(&ar->conf_mutex);
2487
2488 spin_lock_bh(&ar->data_lock);
2489 if (ar->scan.in_progress) {
2490 spin_unlock_bh(&ar->data_lock);
2491 ret = -EBUSY;
2492 goto exit;
2493 }
2494
2495 INIT_COMPLETION(ar->scan.started);
2496 INIT_COMPLETION(ar->scan.completed);
2497 INIT_COMPLETION(ar->scan.on_channel);
2498 ar->scan.in_progress = true;
2499 ar->scan.aborting = false;
2500 ar->scan.is_roc = true;
2501 ar->scan.vdev_id = arvif->vdev_id;
2502 ar->scan.roc_freq = chan->center_freq;
2503 spin_unlock_bh(&ar->data_lock);
2504
2505 memset(&arg, 0, sizeof(arg));
2506 ath10k_wmi_start_scan_init(ar, &arg);
2507 arg.vdev_id = arvif->vdev_id;
2508 arg.scan_id = ATH10K_SCAN_ID;
2509 arg.n_channels = 1;
2510 arg.channels[0] = chan->center_freq;
2511 arg.dwell_time_active = duration;
2512 arg.dwell_time_passive = duration;
2513 arg.max_scan_time = 2 * duration;
2514 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
2515 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
2516
2517 ret = ath10k_start_scan(ar, &arg);
2518 if (ret) {
2519 ath10k_warn("could not start roc scan (%d)\n", ret);
2520 spin_lock_bh(&ar->data_lock);
2521 ar->scan.in_progress = false;
2522 spin_unlock_bh(&ar->data_lock);
2523 goto exit;
2524 }
2525
2526 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
2527 if (ret == 0) {
2528 ath10k_warn("could not switch to channel for roc scan\n");
2529 ath10k_abort_scan(ar);
2530 ret = -ETIMEDOUT;
2531 goto exit;
2532 }
2533
2534 ret = 0;
2535exit:
2536 mutex_unlock(&ar->conf_mutex);
2537 return ret;
2538}
2539
2540static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
2541{
2542 struct ath10k *ar = hw->priv;
2543
2544 mutex_lock(&ar->conf_mutex);
2545 ath10k_abort_scan(ar);
2546 mutex_unlock(&ar->conf_mutex);
2547
2548 return 0;
2549}
2550
2551/*
2552 * Both RTS and Fragmentation threshold are interface-specific
2553 * in ath10k, but device-specific in mac80211.
2554 */
2555static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2556{
2557 struct ath10k_generic_iter *ar_iter = data;
2558 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2559 u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
2560
2561 rts = min_t(u32, rts, ATH10K_RTS_MAX);
2562
2563 ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
2564 WMI_VDEV_PARAM_RTS_THRESHOLD,
2565 rts);
2566 if (ar_iter->ret)
2567 ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
2568 arvif->vdev_id);
2569 else
2570 ath10k_dbg(ATH10K_DBG_MAC,
2571 "Set RTS threshold: %d for VDEV: %d\n",
2572 rts, arvif->vdev_id);
2573}
2574
2575static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2576{
2577 struct ath10k_generic_iter ar_iter;
2578 struct ath10k *ar = hw->priv;
2579
2580 memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
2581 ar_iter.ar = ar;
2582
2583 mutex_lock(&ar->conf_mutex);
2584 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
2585 ath10k_set_rts_iter, &ar_iter);
2586 mutex_unlock(&ar->conf_mutex);
2587
2588 return ar_iter.ret;
2589}
2590
2591static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2592{
2593 struct ath10k_generic_iter *ar_iter = data;
2594 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2595 u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
2596 int ret;
2597
2598 frag = clamp_t(u32, frag,
2599 ATH10K_FRAGMT_THRESHOLD_MIN,
2600 ATH10K_FRAGMT_THRESHOLD_MAX);
2601
2602 ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
2603 WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
2604 frag);
2605
2606 ar_iter->ret = ret;
2607 if (ar_iter->ret)
2608 ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
2609 arvif->vdev_id);
2610 else
2611 ath10k_dbg(ATH10K_DBG_MAC,
2612 "Set frag threshold: %d for VDEV: %d\n",
2613 frag, arvif->vdev_id);
2614}
2615
2616static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
2617{
2618 struct ath10k_generic_iter ar_iter;
2619 struct ath10k *ar = hw->priv;
2620
2621 memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
2622 ar_iter.ar = ar;
2623
2624 mutex_lock(&ar->conf_mutex);
2625 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
2626 ath10k_set_frag_iter, &ar_iter);
2627 mutex_unlock(&ar->conf_mutex);
2628
2629 return ar_iter.ret;
2630}
2631
2632static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
2633{
2634 struct ath10k *ar = hw->priv;
2635 int ret;
2636
2637 /* mac80211 doesn't care if we really xmit queued frames or not
2638 * we'll collect those frames either way if we stop/delete vdevs */
2639 if (drop)
2640 return;
2641
2642 ret = wait_event_timeout(ar->htt->empty_tx_wq, ({
2643 bool empty;
2644 spin_lock_bh(&ar->htt->tx_lock);
2645 empty = bitmap_empty(ar->htt->used_msdu_ids,
2646 ar->htt->max_num_pending_tx);
2647 spin_unlock_bh(&ar->htt->tx_lock);
2648 (empty);
2649 }), ATH10K_FLUSH_TIMEOUT_HZ);
2650 if (ret <= 0)
2651 ath10k_warn("tx not flushed\n");
2652}
2653
2654/* TODO: Implement this function properly
2655 * For now it is needed to reply to Probe Requests in IBSS mode.
2656 * Propably we need this information from FW.
2657 */
2658static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
2659{
2660 return 1;
2661}
2662
2663static const struct ieee80211_ops ath10k_ops = {
2664 .tx = ath10k_tx,
2665 .start = ath10k_start,
2666 .stop = ath10k_stop,
2667 .config = ath10k_config,
2668 .add_interface = ath10k_add_interface,
2669 .remove_interface = ath10k_remove_interface,
2670 .configure_filter = ath10k_configure_filter,
2671 .bss_info_changed = ath10k_bss_info_changed,
2672 .hw_scan = ath10k_hw_scan,
2673 .cancel_hw_scan = ath10k_cancel_hw_scan,
2674 .set_key = ath10k_set_key,
2675 .sta_state = ath10k_sta_state,
2676 .conf_tx = ath10k_conf_tx,
2677 .remain_on_channel = ath10k_remain_on_channel,
2678 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
2679 .set_rts_threshold = ath10k_set_rts_threshold,
2680 .set_frag_threshold = ath10k_set_frag_threshold,
2681 .flush = ath10k_flush,
2682 .tx_last_beacon = ath10k_tx_last_beacon,
2683};
2684
2685#define RATETAB_ENT(_rate, _rateid, _flags) { \
2686 .bitrate = (_rate), \
2687 .flags = (_flags), \
2688 .hw_value = (_rateid), \
2689}
2690
2691#define CHAN2G(_channel, _freq, _flags) { \
2692 .band = IEEE80211_BAND_2GHZ, \
2693 .hw_value = (_channel), \
2694 .center_freq = (_freq), \
2695 .flags = (_flags), \
2696 .max_antenna_gain = 0, \
2697 .max_power = 30, \
2698}
2699
2700#define CHAN5G(_channel, _freq, _flags) { \
2701 .band = IEEE80211_BAND_5GHZ, \
2702 .hw_value = (_channel), \
2703 .center_freq = (_freq), \
2704 .flags = (_flags), \
2705 .max_antenna_gain = 0, \
2706 .max_power = 30, \
2707}
2708
2709static const struct ieee80211_channel ath10k_2ghz_channels[] = {
2710 CHAN2G(1, 2412, 0),
2711 CHAN2G(2, 2417, 0),
2712 CHAN2G(3, 2422, 0),
2713 CHAN2G(4, 2427, 0),
2714 CHAN2G(5, 2432, 0),
2715 CHAN2G(6, 2437, 0),
2716 CHAN2G(7, 2442, 0),
2717 CHAN2G(8, 2447, 0),
2718 CHAN2G(9, 2452, 0),
2719 CHAN2G(10, 2457, 0),
2720 CHAN2G(11, 2462, 0),
2721 CHAN2G(12, 2467, 0),
2722 CHAN2G(13, 2472, 0),
2723 CHAN2G(14, 2484, 0),
2724};
2725
2726static const struct ieee80211_channel ath10k_5ghz_channels[] = {
2727 CHAN5G(36, 5180, 0),
2728 CHAN5G(40, 5200, 0),
2729 CHAN5G(44, 5220, 0),
2730 CHAN5G(48, 5240, 0),
2731 CHAN5G(52, 5260, 0),
2732 CHAN5G(56, 5280, 0),
2733 CHAN5G(60, 5300, 0),
2734 CHAN5G(64, 5320, 0),
2735 CHAN5G(100, 5500, 0),
2736 CHAN5G(104, 5520, 0),
2737 CHAN5G(108, 5540, 0),
2738 CHAN5G(112, 5560, 0),
2739 CHAN5G(116, 5580, 0),
2740 CHAN5G(120, 5600, 0),
2741 CHAN5G(124, 5620, 0),
2742 CHAN5G(128, 5640, 0),
2743 CHAN5G(132, 5660, 0),
2744 CHAN5G(136, 5680, 0),
2745 CHAN5G(140, 5700, 0),
2746 CHAN5G(149, 5745, 0),
2747 CHAN5G(153, 5765, 0),
2748 CHAN5G(157, 5785, 0),
2749 CHAN5G(161, 5805, 0),
2750 CHAN5G(165, 5825, 0),
2751};
2752
2753static struct ieee80211_rate ath10k_rates[] = {
2754 /* CCK */
2755 RATETAB_ENT(10, 0x82, 0),
2756 RATETAB_ENT(20, 0x84, 0),
2757 RATETAB_ENT(55, 0x8b, 0),
2758 RATETAB_ENT(110, 0x96, 0),
2759 /* OFDM */
2760 RATETAB_ENT(60, 0x0c, 0),
2761 RATETAB_ENT(90, 0x12, 0),
2762 RATETAB_ENT(120, 0x18, 0),
2763 RATETAB_ENT(180, 0x24, 0),
2764 RATETAB_ENT(240, 0x30, 0),
2765 RATETAB_ENT(360, 0x48, 0),
2766 RATETAB_ENT(480, 0x60, 0),
2767 RATETAB_ENT(540, 0x6c, 0),
2768};
2769
2770#define ath10k_a_rates (ath10k_rates + 4)
2771#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
2772#define ath10k_g_rates (ath10k_rates + 0)
2773#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
2774
2775struct ath10k *ath10k_mac_create(void)
2776{
2777 struct ieee80211_hw *hw;
2778 struct ath10k *ar;
2779
2780 hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops);
2781 if (!hw)
2782 return NULL;
2783
2784 ar = hw->priv;
2785 ar->hw = hw;
2786
2787 return ar;
2788}
2789
2790void ath10k_mac_destroy(struct ath10k *ar)
2791{
2792 ieee80211_free_hw(ar->hw);
2793}
2794
2795static const struct ieee80211_iface_limit ath10k_if_limits[] = {
2796 {
2797 .max = 8,
2798 .types = BIT(NL80211_IFTYPE_STATION)
2799 | BIT(NL80211_IFTYPE_P2P_CLIENT)
2800 | BIT(NL80211_IFTYPE_P2P_GO)
2801 | BIT(NL80211_IFTYPE_AP)
2802 }
2803};
2804
2805static const struct ieee80211_iface_combination ath10k_if_comb = {
2806 .limits = ath10k_if_limits,
2807 .n_limits = ARRAY_SIZE(ath10k_if_limits),
2808 .max_interfaces = 8,
2809 .num_different_channels = 1,
2810 .beacon_int_infra_match = true,
2811};
2812
2813static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
2814{
2815 struct ieee80211_sta_vht_cap vht_cap = {0};
2816 u16 mcs_map;
2817
2818 vht_cap.vht_supported = 1;
2819 vht_cap.cap = ar->vht_cap_info;
2820
2821 /* FIXME: check dynamically how many streams board supports */
2822 mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
2823 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
2824 IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
2825 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
2826 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
2827 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
2828 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
2829 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
2830
2831 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
2832 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
2833
2834 return vht_cap;
2835}
2836
2837static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
2838{
2839 int i;
2840 struct ieee80211_sta_ht_cap ht_cap = {0};
2841
2842 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
2843 return ht_cap;
2844
2845 ht_cap.ht_supported = 1;
2846 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
2847 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
2848 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
2849 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
2850 ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
2851
2852 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
2853 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
2854
2855 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
2856 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
2857
2858 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
2859 u32 smps;
2860
2861 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
2862 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
2863
2864 ht_cap.cap |= smps;
2865 }
2866
2867 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
2868 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
2869
2870 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
2871 u32 stbc;
2872
2873 stbc = ar->ht_cap_info;
2874 stbc &= WMI_HT_CAP_RX_STBC;
2875 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
2876 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
2877 stbc &= IEEE80211_HT_CAP_RX_STBC;
2878
2879 ht_cap.cap |= stbc;
2880 }
2881
2882 if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
2883 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
2884
2885 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
2886 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
2887
2888 /* max AMSDU is implicitly taken from vht_cap_info */
2889 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
2890 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
2891
2892 for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
2893 ht_cap.mcs.rx_mask[i] = 0xFF;
2894
2895 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
2896
2897 return ht_cap;
2898}
2899
2900
2901static void ath10k_get_arvif_iter(void *data, u8 *mac,
2902 struct ieee80211_vif *vif)
2903{
2904 struct ath10k_vif_iter *arvif_iter = data;
2905 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2906
2907 if (arvif->vdev_id == arvif_iter->vdev_id)
2908 arvif_iter->arvif = arvif;
2909}
2910
2911struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
2912{
2913 struct ath10k_vif_iter arvif_iter;
2914 u32 flags;
2915
2916 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
2917 arvif_iter.vdev_id = vdev_id;
2918
2919 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
2920 ieee80211_iterate_active_interfaces_atomic(ar->hw,
2921 flags,
2922 ath10k_get_arvif_iter,
2923 &arvif_iter);
2924 if (!arvif_iter.arvif) {
2925 ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
2926 return NULL;
2927 }
2928
2929 return arvif_iter.arvif;
2930}
2931
2932int ath10k_mac_register(struct ath10k *ar)
2933{
2934 struct ieee80211_supported_band *band;
2935 struct ieee80211_sta_vht_cap vht_cap;
2936 struct ieee80211_sta_ht_cap ht_cap;
2937 void *channels;
2938 int ret;
2939
2940 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
2941
2942 SET_IEEE80211_DEV(ar->hw, ar->dev);
2943
2944 ht_cap = ath10k_get_ht_cap(ar);
2945 vht_cap = ath10k_create_vht_cap(ar);
2946
2947 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
2948 channels = kmemdup(ath10k_2ghz_channels,
2949 sizeof(ath10k_2ghz_channels),
2950 GFP_KERNEL);
2951 if (!channels)
2952 return -ENOMEM;
2953
2954 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
2955 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
2956 band->channels = channels;
2957 band->n_bitrates = ath10k_g_rates_size;
2958 band->bitrates = ath10k_g_rates;
2959 band->ht_cap = ht_cap;
2960
2961 /* vht is not supported in 2.4 GHz */
2962
2963 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
2964 }
2965
2966 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
2967 channels = kmemdup(ath10k_5ghz_channels,
2968 sizeof(ath10k_5ghz_channels),
2969 GFP_KERNEL);
2970 if (!channels) {
2971 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
2972 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
2973 kfree(band->channels);
2974 }
2975 return -ENOMEM;
2976 }
2977
2978 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
2979 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
2980 band->channels = channels;
2981 band->n_bitrates = ath10k_a_rates_size;
2982 band->bitrates = ath10k_a_rates;
2983 band->ht_cap = ht_cap;
2984 band->vht_cap = vht_cap;
2985 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
2986 }
2987
2988 ar->hw->wiphy->interface_modes =
2989 BIT(NL80211_IFTYPE_STATION) |
2990 BIT(NL80211_IFTYPE_ADHOC) |
2991 BIT(NL80211_IFTYPE_AP) |
2992 BIT(NL80211_IFTYPE_P2P_CLIENT) |
2993 BIT(NL80211_IFTYPE_P2P_GO);
2994
2995 ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
2996 IEEE80211_HW_SUPPORTS_PS |
2997 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
2998 IEEE80211_HW_SUPPORTS_UAPSD |
2999 IEEE80211_HW_MFP_CAPABLE |
3000 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
3001 IEEE80211_HW_HAS_RATE_CONTROL |
3002 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
3003 IEEE80211_HW_WANT_MONITOR_VIF |
3004 IEEE80211_HW_AP_LINK_PS;
3005
3006 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
3007 ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
3008
3009 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
3010 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
3011 ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
3012 }
3013
3014 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
3015 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
3016
3017 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
3018
3019 ar->hw->channel_change_time = 5000;
3020 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
3021
3022 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
3023 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
3024
3025 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
3026 /*
3027 * on LL hardware queues are managed entirely by the FW
3028 * so we only advertise to mac we can do the queues thing
3029 */
3030 ar->hw->queues = 4;
3031
3032 ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
3033 ar->hw->wiphy->n_iface_combinations = 1;
3034
3035 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
3036 ath10k_reg_notifier);
3037 if (ret) {
3038 ath10k_err("Regulatory initialization failed\n");
3039 return ret;
3040 }
3041
3042 ret = ieee80211_register_hw(ar->hw);
3043 if (ret) {
3044 ath10k_err("ieee80211 registration failed: %d\n", ret);
3045 return ret;
3046 }
3047
3048 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
3049 ret = regulatory_hint(ar->hw->wiphy,
3050 ar->ath_common.regulatory.alpha2);
3051 if (ret)
3052 goto exit;
3053 }
3054
3055 return 0;
3056exit:
3057 ieee80211_unregister_hw(ar->hw);
3058 return ret;
3059}
3060
3061void ath10k_mac_unregister(struct ath10k *ar)
3062{
3063 ieee80211_unregister_hw(ar->hw);
3064
3065 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
3066 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
3067
3068 SET_IEEE80211_DEV(ar->hw, NULL);
3069}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
new file mode 100644
index 000000000000..27fc92e58829
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _MAC_H_
19#define _MAC_H_
20
21#include <net/mac80211.h>
22#include "core.h"
23
24struct ath10k_generic_iter {
25 struct ath10k *ar;
26 int ret;
27};
28
29struct ath10k *ath10k_mac_create(void);
30void ath10k_mac_destroy(struct ath10k *ar);
31int ath10k_mac_register(struct ath10k *ar);
32void ath10k_mac_unregister(struct ath10k *ar);
33struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
34void ath10k_reset_scan(unsigned long ptr);
35void ath10k_offchan_tx_purge(struct ath10k *ar);
36void ath10k_offchan_tx_work(struct work_struct *work);
37
38static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
39{
40 return (struct ath10k_vif *)vif->drv_priv;
41}
42
43static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
44{
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
46 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
47 struct ieee80211_vif *vif = info->control.vif;
48 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
49
50 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
51 if (arvif->tx_seq_no == 0)
52 arvif->tx_seq_no = 0x1000;
53
54 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
55 arvif->tx_seq_no += 0x10;
56 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
57 hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
58 }
59}
60
61#endif /* _MAC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
new file mode 100644
index 000000000000..33af4672c909
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -0,0 +1,2507 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22
23#include "core.h"
24#include "debug.h"
25
26#include "targaddrs.h"
27#include "bmi.h"
28
29#include "hif.h"
30#include "htc.h"
31
32#include "ce.h"
33#include "pci.h"
34
35unsigned int ath10k_target_ps;
36module_param(ath10k_target_ps, uint, 0644);
37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39#define QCA988X_1_0_DEVICE_ID (0xabcd)
40#define QCA988X_2_0_DEVICE_ID (0x003c)
41
42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43 { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
45 {0}
46};
47
48static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49 u32 *data);
50
51static void ath10k_pci_process_ce(struct ath10k *ar);
52static int ath10k_pci_post_rx(struct ath10k *ar);
53static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
54 int num);
55static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
56static void ath10k_pci_stop_ce(struct ath10k *ar);
57
58static const struct ce_attr host_ce_config_wlan[] = {
59 /* host->target HTC control and raw streams */
60 { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
61 /* could be moved to share CE3 */
62 /* target->host HTT + HTC control */
63 { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
64 /* target->host WMI */
65 { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
66 /* host->target WMI */
67 { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
68 /* host->target HTT */
69 { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
70 CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
71 /* unused */
72 { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
73 /* Target autonomous hif_memcpy */
74 { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
75 /* ce_diag, the Diagnostic Window */
76 { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
77};
78
79/* Target firmware's Copy Engine configuration. */
80static const struct ce_pipe_config target_ce_config_wlan[] = {
81 /* host->target HTC control and raw streams */
82 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
83 /* target->host HTT + HTC control */
84 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
85 /* target->host WMI */
86 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
87 /* host->target WMI */
88 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
89 /* host->target HTT */
90 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
91 /* NB: 50% of src nentries, since tx has 2 frags */
92 /* unused */
93 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
94 /* Reserved for target autonomous hif_memcpy */
95 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
96 /* CE7 used only by Host */
97};
98
99/*
100 * Diagnostic read/write access is provided for startup/config/debug usage.
101 * Caller must guarantee proper alignment, when applicable, and single user
102 * at any moment.
103 */
104static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
105 int nbytes)
106{
107 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
108 int ret = 0;
109 u32 buf;
110 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
111 unsigned int id;
112 unsigned int flags;
113 struct ce_state *ce_diag;
114 /* Host buffer address in CE space */
115 u32 ce_data;
116 dma_addr_t ce_data_base = 0;
117 void *data_buf = NULL;
118 int i;
119
120 /*
121 * This code cannot handle reads to non-memory space. Redirect to the
122 * register read fn but preserve the multi word read capability of
123 * this fn
124 */
125 if (address < DRAM_BASE_ADDRESS) {
126 if (!IS_ALIGNED(address, 4) ||
127 !IS_ALIGNED((unsigned long)data, 4))
128 return -EIO;
129
130 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
131 ar, address, (u32 *)data)) == 0)) {
132 nbytes -= sizeof(u32);
133 address += sizeof(u32);
134 data += sizeof(u32);
135 }
136 return ret;
137 }
138
139 ce_diag = ar_pci->ce_diag;
140
141 /*
142 * Allocate a temporary bounce buffer to hold caller's data
143 * to be DMA'ed from Target. This guarantees
144 * 1) 4-byte alignment
145 * 2) Buffer in DMA-able space
146 */
147 orig_nbytes = nbytes;
148 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
149 orig_nbytes,
150 &ce_data_base);
151
152 if (!data_buf) {
153 ret = -ENOMEM;
154 goto done;
155 }
156 memset(data_buf, 0, orig_nbytes);
157
158 remaining_bytes = orig_nbytes;
159 ce_data = ce_data_base;
160 while (remaining_bytes) {
161 nbytes = min_t(unsigned int, remaining_bytes,
162 DIAG_TRANSFER_LIMIT);
163
164 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
165 if (ret != 0)
166 goto done;
167
168 /* Request CE to send from Target(!) address to Host buffer */
169 /*
170 * The address supplied by the caller is in the
171 * Target CPU virtual address space.
172 *
173 * In order to use this address with the diagnostic CE,
174 * convert it from Target CPU virtual address space
175 * to CE address space
176 */
177 ath10k_pci_wake(ar);
178 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
179 address);
180 ath10k_pci_sleep(ar);
181
182 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
183 0);
184 if (ret)
185 goto done;
186
187 i = 0;
188 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
189 &completed_nbytes,
190 &id) != 0) {
191 mdelay(1);
192 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
193 ret = -EBUSY;
194 goto done;
195 }
196 }
197
198 if (nbytes != completed_nbytes) {
199 ret = -EIO;
200 goto done;
201 }
202
203 if (buf != (u32) address) {
204 ret = -EIO;
205 goto done;
206 }
207
208 i = 0;
209 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
210 &completed_nbytes,
211 &id, &flags) != 0) {
212 mdelay(1);
213
214 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
215 ret = -EBUSY;
216 goto done;
217 }
218 }
219
220 if (nbytes != completed_nbytes) {
221 ret = -EIO;
222 goto done;
223 }
224
225 if (buf != ce_data) {
226 ret = -EIO;
227 goto done;
228 }
229
230 remaining_bytes -= nbytes;
231 address += nbytes;
232 ce_data += nbytes;
233 }
234
235done:
236 if (ret == 0) {
237 /* Copy data from allocated DMA buf to caller's buf */
238 WARN_ON_ONCE(orig_nbytes & 3);
239 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
240 ((u32 *)data)[i] =
241 __le32_to_cpu(((__le32 *)data_buf)[i]);
242 }
243 } else
244 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
245 __func__, address);
246
247 if (data_buf)
248 pci_free_consistent(ar_pci->pdev, orig_nbytes,
249 data_buf, ce_data_base);
250
251 return ret;
252}
253
254/* Read 4-byte aligned data from Target memory or register */
255static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
256 u32 *data)
257{
258 /* Assume range doesn't cross this boundary */
259 if (address >= DRAM_BASE_ADDRESS)
260 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
261
262 ath10k_pci_wake(ar);
263 *data = ath10k_pci_read32(ar, address);
264 ath10k_pci_sleep(ar);
265 return 0;
266}
267
268static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
269 const void *data, int nbytes)
270{
271 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
272 int ret = 0;
273 u32 buf;
274 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
275 unsigned int id;
276 unsigned int flags;
277 struct ce_state *ce_diag;
278 void *data_buf = NULL;
279 u32 ce_data; /* Host buffer address in CE space */
280 dma_addr_t ce_data_base = 0;
281 int i;
282
283 ce_diag = ar_pci->ce_diag;
284
285 /*
286 * Allocate a temporary bounce buffer to hold caller's data
287 * to be DMA'ed to Target. This guarantees
288 * 1) 4-byte alignment
289 * 2) Buffer in DMA-able space
290 */
291 orig_nbytes = nbytes;
292 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
293 orig_nbytes,
294 &ce_data_base);
295 if (!data_buf) {
296 ret = -ENOMEM;
297 goto done;
298 }
299
300 /* Copy caller's data to allocated DMA buf */
301 WARN_ON_ONCE(orig_nbytes & 3);
302 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
303 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
304
305 /*
306 * The address supplied by the caller is in the
307 * Target CPU virtual address space.
308 *
309 * In order to use this address with the diagnostic CE,
310 * convert it from
311 * Target CPU virtual address space
312 * to
313 * CE address space
314 */
315 ath10k_pci_wake(ar);
316 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
317 ath10k_pci_sleep(ar);
318
319 remaining_bytes = orig_nbytes;
320 ce_data = ce_data_base;
321 while (remaining_bytes) {
322 /* FIXME: check cast */
323 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
324
325 /* Set up to receive directly into Target(!) address */
326 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
327 if (ret != 0)
328 goto done;
329
330 /*
331 * Request CE to send caller-supplied data that
332 * was copied to bounce buffer to Target(!) address.
333 */
334 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
335 nbytes, 0, 0);
336 if (ret != 0)
337 goto done;
338
339 i = 0;
340 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
341 &completed_nbytes,
342 &id) != 0) {
343 mdelay(1);
344
345 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
346 ret = -EBUSY;
347 goto done;
348 }
349 }
350
351 if (nbytes != completed_nbytes) {
352 ret = -EIO;
353 goto done;
354 }
355
356 if (buf != ce_data) {
357 ret = -EIO;
358 goto done;
359 }
360
361 i = 0;
362 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
363 &completed_nbytes,
364 &id, &flags) != 0) {
365 mdelay(1);
366
367 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
368 ret = -EBUSY;
369 goto done;
370 }
371 }
372
373 if (nbytes != completed_nbytes) {
374 ret = -EIO;
375 goto done;
376 }
377
378 if (buf != address) {
379 ret = -EIO;
380 goto done;
381 }
382
383 remaining_bytes -= nbytes;
384 address += nbytes;
385 ce_data += nbytes;
386 }
387
388done:
389 if (data_buf) {
390 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
391 ce_data_base);
392 }
393
394 if (ret != 0)
395 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
396 address);
397
398 return ret;
399}
400
401/* Write 4B data to Target memory or register */
402static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
403 u32 data)
404{
405 /* Assume range doesn't cross this boundary */
406 if (address >= DRAM_BASE_ADDRESS)
407 return ath10k_pci_diag_write_mem(ar, address, &data,
408 sizeof(u32));
409
410 ath10k_pci_wake(ar);
411 ath10k_pci_write32(ar, address, data);
412 ath10k_pci_sleep(ar);
413 return 0;
414}
415
416static bool ath10k_pci_target_is_awake(struct ath10k *ar)
417{
418 void __iomem *mem = ath10k_pci_priv(ar)->mem;
419 u32 val;
420 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
421 RTC_STATE_ADDRESS);
422 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
423}
424
425static void ath10k_pci_wait(struct ath10k *ar)
426{
427 int n = 100;
428
429 while (n-- && !ath10k_pci_target_is_awake(ar))
430 msleep(10);
431
432 if (n < 0)
433 ath10k_warn("Unable to wakeup target\n");
434}
435
436void ath10k_do_pci_wake(struct ath10k *ar)
437{
438 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
439 void __iomem *pci_addr = ar_pci->mem;
440 int tot_delay = 0;
441 int curr_delay = 5;
442
443 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
444 /* Force AWAKE */
445 iowrite32(PCIE_SOC_WAKE_V_MASK,
446 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
447 PCIE_SOC_WAKE_ADDRESS);
448 }
449 atomic_inc(&ar_pci->keep_awake_count);
450
451 if (ar_pci->verified_awake)
452 return;
453
454 for (;;) {
455 if (ath10k_pci_target_is_awake(ar)) {
456 ar_pci->verified_awake = true;
457 break;
458 }
459
460 if (tot_delay > PCIE_WAKE_TIMEOUT) {
461 ath10k_warn("target takes too long to wake up (awake count %d)\n",
462 atomic_read(&ar_pci->keep_awake_count));
463 break;
464 }
465
466 udelay(curr_delay);
467 tot_delay += curr_delay;
468
469 if (curr_delay < 50)
470 curr_delay += 5;
471 }
472}
473
474void ath10k_do_pci_sleep(struct ath10k *ar)
475{
476 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
477 void __iomem *pci_addr = ar_pci->mem;
478
479 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
480 /* Allow sleep */
481 ar_pci->verified_awake = false;
482 iowrite32(PCIE_SOC_WAKE_RESET,
483 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
484 PCIE_SOC_WAKE_ADDRESS);
485 }
486}
487
488/*
489 * FIXME: Handle OOM properly.
490 */
491static inline
492struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
493{
494 struct ath10k_pci_compl *compl = NULL;
495
496 spin_lock_bh(&pipe_info->pipe_lock);
497 if (list_empty(&pipe_info->compl_free)) {
498 ath10k_warn("Completion buffers are full\n");
499 goto exit;
500 }
501 compl = list_first_entry(&pipe_info->compl_free,
502 struct ath10k_pci_compl, list);
503 list_del(&compl->list);
504exit:
505 spin_unlock_bh(&pipe_info->pipe_lock);
506 return compl;
507}
508
509/* Called by lower (CE) layer when a send to Target completes. */
510static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
511 void *transfer_context,
512 u32 ce_data,
513 unsigned int nbytes,
514 unsigned int transfer_id)
515{
516 struct ath10k *ar = ce_state->ar;
517 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
518 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
519 struct ath10k_pci_compl *compl;
520 bool process = false;
521
522 do {
523 /*
524 * For the send completion of an item in sendlist, just
525 * increment num_sends_allowed. The upper layer callback will
526 * be triggered when last fragment is done with send.
527 */
528 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
529 spin_lock_bh(&pipe_info->pipe_lock);
530 pipe_info->num_sends_allowed++;
531 spin_unlock_bh(&pipe_info->pipe_lock);
532 continue;
533 }
534
535 compl = get_free_compl(pipe_info);
536 if (!compl)
537 break;
538
539 compl->send_or_recv = HIF_CE_COMPLETE_SEND;
540 compl->ce_state = ce_state;
541 compl->pipe_info = pipe_info;
542 compl->transfer_context = transfer_context;
543 compl->nbytes = nbytes;
544 compl->transfer_id = transfer_id;
545 compl->flags = 0;
546
547 /*
548 * Add the completion to the processing queue.
549 */
550 spin_lock_bh(&ar_pci->compl_lock);
551 list_add_tail(&compl->list, &ar_pci->compl_process);
552 spin_unlock_bh(&ar_pci->compl_lock);
553
554 process = true;
555 } while (ath10k_ce_completed_send_next(ce_state,
556 &transfer_context,
557 &ce_data, &nbytes,
558 &transfer_id) == 0);
559
560 /*
561 * If only some of the items within a sendlist have completed,
562 * don't invoke completion processing until the entire sendlist
563 * has been sent.
564 */
565 if (!process)
566 return;
567
568 ath10k_pci_process_ce(ar);
569}
570
571/* Called by lower (CE) layer when data is received from the Target. */
572static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
573 void *transfer_context, u32 ce_data,
574 unsigned int nbytes,
575 unsigned int transfer_id,
576 unsigned int flags)
577{
578 struct ath10k *ar = ce_state->ar;
579 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
580 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
581 struct ath10k_pci_compl *compl;
582 struct sk_buff *skb;
583
584 do {
585 compl = get_free_compl(pipe_info);
586 if (!compl)
587 break;
588
589 compl->send_or_recv = HIF_CE_COMPLETE_RECV;
590 compl->ce_state = ce_state;
591 compl->pipe_info = pipe_info;
592 compl->transfer_context = transfer_context;
593 compl->nbytes = nbytes;
594 compl->transfer_id = transfer_id;
595 compl->flags = flags;
596
597 skb = transfer_context;
598 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
599 skb->len + skb_tailroom(skb),
600 DMA_FROM_DEVICE);
601 /*
602 * Add the completion to the processing queue.
603 */
604 spin_lock_bh(&ar_pci->compl_lock);
605 list_add_tail(&compl->list, &ar_pci->compl_process);
606 spin_unlock_bh(&ar_pci->compl_lock);
607
608 } while (ath10k_ce_completed_recv_next(ce_state,
609 &transfer_context,
610 &ce_data, &nbytes,
611 &transfer_id,
612 &flags) == 0);
613
614 ath10k_pci_process_ce(ar);
615}
616
617/* Send the first nbytes bytes of the buffer */
618static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
619 unsigned int transfer_id,
620 unsigned int bytes, struct sk_buff *nbuf)
621{
622 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
623 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
624 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
625 struct ce_state *ce_hdl = pipe_info->ce_hdl;
626 struct ce_sendlist sendlist;
627 unsigned int len;
628 u32 flags = 0;
629 int ret;
630
631 memset(&sendlist, 0, sizeof(struct ce_sendlist));
632
633 len = min(bytes, nbuf->len);
634 bytes -= len;
635
636 if (len & 3)
637 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
638
639 ath10k_dbg(ATH10K_DBG_PCI,
640 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
641 nbuf->data, (unsigned long long) skb_cb->paddr,
642 nbuf->len, len);
643 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
644 "ath10k tx: data: ",
645 nbuf->data, nbuf->len);
646
647 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
648
649 /* Make sure we have resources to handle this request */
650 spin_lock_bh(&pipe_info->pipe_lock);
651 if (!pipe_info->num_sends_allowed) {
652 ath10k_warn("Pipe: %d is full\n", pipe_id);
653 spin_unlock_bh(&pipe_info->pipe_lock);
654 return -ENOSR;
655 }
656 pipe_info->num_sends_allowed--;
657 spin_unlock_bh(&pipe_info->pipe_lock);
658
659 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
660 if (ret)
661 ath10k_warn("CE send failed: %p\n", nbuf);
662
663 return ret;
664}
665
666static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
667{
668 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
669 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
670 int ret;
671
672 spin_lock_bh(&pipe_info->pipe_lock);
673 ret = pipe_info->num_sends_allowed;
674 spin_unlock_bh(&pipe_info->pipe_lock);
675
676 return ret;
677}
678
679static void ath10k_pci_hif_dump_area(struct ath10k *ar)
680{
681 u32 reg_dump_area = 0;
682 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
683 u32 host_addr;
684 int ret;
685 u32 i;
686
687 ath10k_err("firmware crashed!\n");
688 ath10k_err("hardware name %s version 0x%x\n",
689 ar->hw_params.name, ar->target_version);
690 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
691 ar->fw_version_minor, ar->fw_version_release,
692 ar->fw_version_build);
693
694 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
695 if (ath10k_pci_diag_read_mem(ar, host_addr,
696 &reg_dump_area, sizeof(u32)) != 0) {
697 ath10k_warn("could not read hi_failure_state\n");
698 return;
699 }
700
701 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
702
703 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
704 &reg_dump_values[0],
705 REG_DUMP_COUNT_QCA988X * sizeof(u32));
706 if (ret != 0) {
707 ath10k_err("could not dump FW Dump Area\n");
708 return;
709 }
710
711 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
712
713 ath10k_err("target Register Dump\n");
714 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
715 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
716 i,
717 reg_dump_values[i],
718 reg_dump_values[i + 1],
719 reg_dump_values[i + 2],
720 reg_dump_values[i + 3]);
721}
722
723static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
724 int force)
725{
726 if (!force) {
727 int resources;
728 /*
729 * Decide whether to actually poll for completions, or just
730 * wait for a later chance.
731 * If there seem to be plenty of resources left, then just wait
732 * since checking involves reading a CE register, which is a
733 * relatively expensive operation.
734 */
735 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
736
737 /*
738 * If at least 50% of the total resources are still available,
739 * don't bother checking again yet.
740 */
741 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
742 return;
743 }
744 ath10k_ce_per_engine_service(ar, pipe);
745}
746
747static void ath10k_pci_hif_post_init(struct ath10k *ar,
748 struct ath10k_hif_cb *callbacks)
749{
750 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
751
752 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
753
754 memcpy(&ar_pci->msg_callbacks_current, callbacks,
755 sizeof(ar_pci->msg_callbacks_current));
756}
757
758static int ath10k_pci_start_ce(struct ath10k *ar)
759{
760 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
761 struct ce_state *ce_diag = ar_pci->ce_diag;
762 const struct ce_attr *attr;
763 struct hif_ce_pipe_info *pipe_info;
764 struct ath10k_pci_compl *compl;
765 int i, pipe_num, completions, disable_interrupts;
766
767 spin_lock_init(&ar_pci->compl_lock);
768 INIT_LIST_HEAD(&ar_pci->compl_process);
769
770 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
771 pipe_info = &ar_pci->pipe_info[pipe_num];
772
773 spin_lock_init(&pipe_info->pipe_lock);
774 INIT_LIST_HEAD(&pipe_info->compl_free);
775
776 /* Handle Diagnostic CE specially */
777 if (pipe_info->ce_hdl == ce_diag)
778 continue;
779
780 attr = &host_ce_config_wlan[pipe_num];
781 completions = 0;
782
783 if (attr->src_nentries) {
784 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
785 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
786 ath10k_pci_ce_send_done,
787 disable_interrupts);
788 completions += attr->src_nentries;
789 pipe_info->num_sends_allowed = attr->src_nentries - 1;
790 }
791
792 if (attr->dest_nentries) {
793 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
794 ath10k_pci_ce_recv_data);
795 completions += attr->dest_nentries;
796 }
797
798 if (completions == 0)
799 continue;
800
801 for (i = 0; i < completions; i++) {
802 compl = kmalloc(sizeof(struct ath10k_pci_compl),
803 GFP_KERNEL);
804 if (!compl) {
805 ath10k_warn("No memory for completion state\n");
806 ath10k_pci_stop_ce(ar);
807 return -ENOMEM;
808 }
809
810 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
811 list_add_tail(&compl->list, &pipe_info->compl_free);
812 }
813 }
814
815 return 0;
816}
817
818static void ath10k_pci_stop_ce(struct ath10k *ar)
819{
820 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
821 struct ath10k_pci_compl *compl;
822 struct sk_buff *skb;
823 int i;
824
825 ath10k_ce_disable_interrupts(ar);
826
827 /* Cancel the pending tasklet */
828 tasklet_kill(&ar_pci->intr_tq);
829
830 for (i = 0; i < CE_COUNT; i++)
831 tasklet_kill(&ar_pci->pipe_info[i].intr);
832
833 /* Mark pending completions as aborted, so that upper layers free up
834 * their associated resources */
835 spin_lock_bh(&ar_pci->compl_lock);
836 list_for_each_entry(compl, &ar_pci->compl_process, list) {
837 skb = (struct sk_buff *)compl->transfer_context;
838 ATH10K_SKB_CB(skb)->is_aborted = true;
839 }
840 spin_unlock_bh(&ar_pci->compl_lock);
841}
842
843static void ath10k_pci_cleanup_ce(struct ath10k *ar)
844{
845 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
846 struct ath10k_pci_compl *compl, *tmp;
847 struct hif_ce_pipe_info *pipe_info;
848 struct sk_buff *netbuf;
849 int pipe_num;
850
851 /* Free pending completions. */
852 spin_lock_bh(&ar_pci->compl_lock);
853 if (!list_empty(&ar_pci->compl_process))
854 ath10k_warn("pending completions still present! possible memory leaks.\n");
855
856 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
857 list_del(&compl->list);
858 netbuf = (struct sk_buff *)compl->transfer_context;
859 dev_kfree_skb_any(netbuf);
860 kfree(compl);
861 }
862 spin_unlock_bh(&ar_pci->compl_lock);
863
864 /* Free unused completions for each pipe. */
865 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
866 pipe_info = &ar_pci->pipe_info[pipe_num];
867
868 spin_lock_bh(&pipe_info->pipe_lock);
869 list_for_each_entry_safe(compl, tmp,
870 &pipe_info->compl_free, list) {
871 list_del(&compl->list);
872 kfree(compl);
873 }
874 spin_unlock_bh(&pipe_info->pipe_lock);
875 }
876}
877
878static void ath10k_pci_process_ce(struct ath10k *ar)
879{
880 struct ath10k_pci *ar_pci = ar->hif.priv;
881 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
882 struct ath10k_pci_compl *compl;
883 struct sk_buff *skb;
884 unsigned int nbytes;
885 int ret, send_done = 0;
886
887 /* Upper layers aren't ready to handle tx/rx completions in parallel so
888 * we must serialize all completion processing. */
889
890 spin_lock_bh(&ar_pci->compl_lock);
891 if (ar_pci->compl_processing) {
892 spin_unlock_bh(&ar_pci->compl_lock);
893 return;
894 }
895 ar_pci->compl_processing = true;
896 spin_unlock_bh(&ar_pci->compl_lock);
897
898 for (;;) {
899 spin_lock_bh(&ar_pci->compl_lock);
900 if (list_empty(&ar_pci->compl_process)) {
901 spin_unlock_bh(&ar_pci->compl_lock);
902 break;
903 }
904 compl = list_first_entry(&ar_pci->compl_process,
905 struct ath10k_pci_compl, list);
906 list_del(&compl->list);
907 spin_unlock_bh(&ar_pci->compl_lock);
908
909 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
910 cb->tx_completion(ar,
911 compl->transfer_context,
912 compl->transfer_id);
913 send_done = 1;
914 } else {
915 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
916 if (ret) {
917 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
918 compl->pipe_info->pipe_num);
919 break;
920 }
921
922 skb = (struct sk_buff *)compl->transfer_context;
923 nbytes = compl->nbytes;
924
925 ath10k_dbg(ATH10K_DBG_PCI,
926 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
927 skb, nbytes);
928 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
929 "ath10k rx: ", skb->data, nbytes);
930
931 if (skb->len + skb_tailroom(skb) >= nbytes) {
932 skb_trim(skb, 0);
933 skb_put(skb, nbytes);
934 cb->rx_completion(ar, skb,
935 compl->pipe_info->pipe_num);
936 } else {
937 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
938 nbytes,
939 skb->len + skb_tailroom(skb));
940 }
941 }
942
943 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
944
945 /*
946 * Add completion back to the pipe's free list.
947 */
948 spin_lock_bh(&compl->pipe_info->pipe_lock);
949 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
950 compl->pipe_info->num_sends_allowed += send_done;
951 spin_unlock_bh(&compl->pipe_info->pipe_lock);
952 }
953
954 spin_lock_bh(&ar_pci->compl_lock);
955 ar_pci->compl_processing = false;
956 spin_unlock_bh(&ar_pci->compl_lock);
957}
958
959/* TODO - temporary mapping while we have too few CE's */
960static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
961 u16 service_id, u8 *ul_pipe,
962 u8 *dl_pipe, int *ul_is_polled,
963 int *dl_is_polled)
964{
965 int ret = 0;
966
967 /* polling for received messages not supported */
968 *dl_is_polled = 0;
969
970 switch (service_id) {
971 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
972 /*
973 * Host->target HTT gets its own pipe, so it can be polled
974 * while other pipes are interrupt driven.
975 */
976 *ul_pipe = 4;
977 /*
978 * Use the same target->host pipe for HTC ctrl, HTC raw
979 * streams, and HTT.
980 */
981 *dl_pipe = 1;
982 break;
983
984 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
985 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
986 /*
987 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
988 * HTC_CTRL_RSVD_SVC could share the same pipe as the
989 * WMI services. So, if another CE is needed, change
990 * this to *ul_pipe = 3, which frees up CE 0.
991 */
992 /* *ul_pipe = 3; */
993 *ul_pipe = 0;
994 *dl_pipe = 1;
995 break;
996
997 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
998 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
999 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1000 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1001
1002 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1003 *ul_pipe = 3;
1004 *dl_pipe = 2;
1005 break;
1006
1007 /* pipe 5 unused */
1008 /* pipe 6 reserved */
1009 /* pipe 7 reserved */
1010
1011 default:
1012 ret = -1;
1013 break;
1014 }
1015 *ul_is_polled =
1016 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1017
1018 return ret;
1019}
1020
1021static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1022 u8 *ul_pipe, u8 *dl_pipe)
1023{
1024 int ul_is_polled, dl_is_polled;
1025
1026 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1027 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1028 ul_pipe,
1029 dl_pipe,
1030 &ul_is_polled,
1031 &dl_is_polled);
1032}
1033
1034static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1035 int num)
1036{
1037 struct ath10k *ar = pipe_info->hif_ce_state;
1038 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1039 struct ce_state *ce_state = pipe_info->ce_hdl;
1040 struct sk_buff *skb;
1041 dma_addr_t ce_data;
1042 int i, ret = 0;
1043
1044 if (pipe_info->buf_sz == 0)
1045 return 0;
1046
1047 for (i = 0; i < num; i++) {
1048 skb = dev_alloc_skb(pipe_info->buf_sz);
1049 if (!skb) {
1050 ath10k_warn("could not allocate skbuff for pipe %d\n",
1051 num);
1052 ret = -ENOMEM;
1053 goto err;
1054 }
1055
1056 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1057
1058 ce_data = dma_map_single(ar->dev, skb->data,
1059 skb->len + skb_tailroom(skb),
1060 DMA_FROM_DEVICE);
1061
1062 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1063 ath10k_warn("could not dma map skbuff\n");
1064 dev_kfree_skb_any(skb);
1065 ret = -EIO;
1066 goto err;
1067 }
1068
1069 ATH10K_SKB_CB(skb)->paddr = ce_data;
1070
1071 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1072 pipe_info->buf_sz,
1073 PCI_DMA_FROMDEVICE);
1074
1075 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1076 ce_data);
1077 if (ret) {
1078 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1079 num, ret);
1080 goto err;
1081 }
1082 }
1083
1084 return ret;
1085
1086err:
1087 ath10k_pci_rx_pipe_cleanup(pipe_info);
1088 return ret;
1089}
1090
1091static int ath10k_pci_post_rx(struct ath10k *ar)
1092{
1093 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1094 struct hif_ce_pipe_info *pipe_info;
1095 const struct ce_attr *attr;
1096 int pipe_num, ret = 0;
1097
1098 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1099 pipe_info = &ar_pci->pipe_info[pipe_num];
1100 attr = &host_ce_config_wlan[pipe_num];
1101
1102 if (attr->dest_nentries == 0)
1103 continue;
1104
1105 ret = ath10k_pci_post_rx_pipe(pipe_info,
1106 attr->dest_nentries - 1);
1107 if (ret) {
1108 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1109 pipe_num);
1110
1111 for (; pipe_num >= 0; pipe_num--) {
1112 pipe_info = &ar_pci->pipe_info[pipe_num];
1113 ath10k_pci_rx_pipe_cleanup(pipe_info);
1114 }
1115 return ret;
1116 }
1117 }
1118
1119 return 0;
1120}
1121
1122static int ath10k_pci_hif_start(struct ath10k *ar)
1123{
1124 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1125 int ret;
1126
1127 ret = ath10k_pci_start_ce(ar);
1128 if (ret) {
1129 ath10k_warn("could not start CE (%d)\n", ret);
1130 return ret;
1131 }
1132
1133 /* Post buffers once to start things off. */
1134 ret = ath10k_pci_post_rx(ar);
1135 if (ret) {
1136 ath10k_warn("could not post rx pipes (%d)\n", ret);
1137 return ret;
1138 }
1139
1140 ar_pci->started = 1;
1141 return 0;
1142}
1143
1144static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1145{
1146 struct ath10k *ar;
1147 struct ath10k_pci *ar_pci;
1148 struct ce_state *ce_hdl;
1149 u32 buf_sz;
1150 struct sk_buff *netbuf;
1151 u32 ce_data;
1152
1153 buf_sz = pipe_info->buf_sz;
1154
1155 /* Unused Copy Engine */
1156 if (buf_sz == 0)
1157 return;
1158
1159 ar = pipe_info->hif_ce_state;
1160 ar_pci = ath10k_pci_priv(ar);
1161
1162 if (!ar_pci->started)
1163 return;
1164
1165 ce_hdl = pipe_info->ce_hdl;
1166
1167 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1168 &ce_data) == 0) {
1169 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1170 netbuf->len + skb_tailroom(netbuf),
1171 DMA_FROM_DEVICE);
1172 dev_kfree_skb_any(netbuf);
1173 }
1174}
1175
1176static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1177{
1178 struct ath10k *ar;
1179 struct ath10k_pci *ar_pci;
1180 struct ce_state *ce_hdl;
1181 struct sk_buff *netbuf;
1182 u32 ce_data;
1183 unsigned int nbytes;
1184 unsigned int id;
1185 u32 buf_sz;
1186
1187 buf_sz = pipe_info->buf_sz;
1188
1189 /* Unused Copy Engine */
1190 if (buf_sz == 0)
1191 return;
1192
1193 ar = pipe_info->hif_ce_state;
1194 ar_pci = ath10k_pci_priv(ar);
1195
1196 if (!ar_pci->started)
1197 return;
1198
1199 ce_hdl = pipe_info->ce_hdl;
1200
1201 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1202 &ce_data, &nbytes, &id) == 0) {
1203 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1204 /*
1205 * Indicate the completion to higer layer to free
1206 * the buffer
1207 */
1208 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1209 ar_pci->msg_callbacks_current.tx_completion(ar,
1210 netbuf,
1211 id);
1212 }
1213}
1214
1215/*
1216 * Cleanup residual buffers for device shutdown:
1217 * buffers that were enqueued for receive
1218 * buffers that were to be sent
1219 * Note: Buffers that had completed but which were
1220 * not yet processed are on a completion queue. They
1221 * are handled when the completion thread shuts down.
1222 */
1223static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1224{
1225 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1226 int pipe_num;
1227
1228 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1229 struct hif_ce_pipe_info *pipe_info;
1230
1231 pipe_info = &ar_pci->pipe_info[pipe_num];
1232 ath10k_pci_rx_pipe_cleanup(pipe_info);
1233 ath10k_pci_tx_pipe_cleanup(pipe_info);
1234 }
1235}
1236
1237static void ath10k_pci_ce_deinit(struct ath10k *ar)
1238{
1239 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1240 struct hif_ce_pipe_info *pipe_info;
1241 int pipe_num;
1242
1243 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1244 pipe_info = &ar_pci->pipe_info[pipe_num];
1245 if (pipe_info->ce_hdl) {
1246 ath10k_ce_deinit(pipe_info->ce_hdl);
1247 pipe_info->ce_hdl = NULL;
1248 pipe_info->buf_sz = 0;
1249 }
1250 }
1251}
1252
1253static void ath10k_pci_hif_stop(struct ath10k *ar)
1254{
1255 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1256
1257 ath10k_pci_stop_ce(ar);
1258
1259 /* At this point, asynchronous threads are stopped, the target should
1260 * not DMA nor interrupt. We process the leftovers and then free
1261 * everything else up. */
1262
1263 ath10k_pci_process_ce(ar);
1264 ath10k_pci_cleanup_ce(ar);
1265 ath10k_pci_buffer_cleanup(ar);
1266 ath10k_pci_ce_deinit(ar);
1267}
1268
1269static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1270 void *req, u32 req_len,
1271 void *resp, u32 *resp_len)
1272{
1273 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1274 struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1275 struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1276 dma_addr_t req_paddr = 0;
1277 dma_addr_t resp_paddr = 0;
1278 struct bmi_xfer xfer = {};
1279 void *treq, *tresp = NULL;
1280 int ret = 0;
1281
1282 if (resp && !resp_len)
1283 return -EINVAL;
1284
1285 if (resp && resp_len && *resp_len == 0)
1286 return -EINVAL;
1287
1288 treq = kmemdup(req, req_len, GFP_KERNEL);
1289 if (!treq)
1290 return -ENOMEM;
1291
1292 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1293 ret = dma_mapping_error(ar->dev, req_paddr);
1294 if (ret)
1295 goto err_dma;
1296
1297 if (resp && resp_len) {
1298 tresp = kzalloc(*resp_len, GFP_KERNEL);
1299 if (!tresp) {
1300 ret = -ENOMEM;
1301 goto err_req;
1302 }
1303
1304 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1305 DMA_FROM_DEVICE);
1306 ret = dma_mapping_error(ar->dev, resp_paddr);
1307 if (ret)
1308 goto err_req;
1309
1310 xfer.wait_for_resp = true;
1311 xfer.resp_len = 0;
1312
1313 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1314 }
1315
1316 init_completion(&xfer.done);
1317
1318 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1319 if (ret)
1320 goto err_resp;
1321
1322 ret = wait_for_completion_timeout(&xfer.done,
1323 BMI_COMMUNICATION_TIMEOUT_HZ);
1324 if (ret <= 0) {
1325 u32 unused_buffer;
1326 unsigned int unused_nbytes;
1327 unsigned int unused_id;
1328
1329 ret = -ETIMEDOUT;
1330 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1331 &unused_nbytes, &unused_id);
1332 } else {
1333 /* non-zero means we did not time out */
1334 ret = 0;
1335 }
1336
1337err_resp:
1338 if (resp) {
1339 u32 unused_buffer;
1340
1341 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1342 dma_unmap_single(ar->dev, resp_paddr,
1343 *resp_len, DMA_FROM_DEVICE);
1344 }
1345err_req:
1346 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1347
1348 if (ret == 0 && resp_len) {
1349 *resp_len = min(*resp_len, xfer.resp_len);
1350 memcpy(resp, tresp, xfer.resp_len);
1351 }
1352err_dma:
1353 kfree(treq);
1354 kfree(tresp);
1355
1356 return ret;
1357}
1358
1359static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1360 void *transfer_context,
1361 u32 data,
1362 unsigned int nbytes,
1363 unsigned int transfer_id)
1364{
1365 struct bmi_xfer *xfer = transfer_context;
1366
1367 if (xfer->wait_for_resp)
1368 return;
1369
1370 complete(&xfer->done);
1371}
1372
1373static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1374 void *transfer_context,
1375 u32 data,
1376 unsigned int nbytes,
1377 unsigned int transfer_id,
1378 unsigned int flags)
1379{
1380 struct bmi_xfer *xfer = transfer_context;
1381
1382 if (!xfer->wait_for_resp) {
1383 ath10k_warn("unexpected: BMI data received; ignoring\n");
1384 return;
1385 }
1386
1387 xfer->resp_len = nbytes;
1388 complete(&xfer->done);
1389}
1390
1391/*
1392 * Map from service/endpoint to Copy Engine.
1393 * This table is derived from the CE_PCI TABLE, above.
1394 * It is passed to the Target at startup for use by firmware.
1395 */
1396static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1397 {
1398 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1399 PIPEDIR_OUT, /* out = UL = host -> target */
1400 3,
1401 },
1402 {
1403 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1404 PIPEDIR_IN, /* in = DL = target -> host */
1405 2,
1406 },
1407 {
1408 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1409 PIPEDIR_OUT, /* out = UL = host -> target */
1410 3,
1411 },
1412 {
1413 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1414 PIPEDIR_IN, /* in = DL = target -> host */
1415 2,
1416 },
1417 {
1418 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1419 PIPEDIR_OUT, /* out = UL = host -> target */
1420 3,
1421 },
1422 {
1423 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1424 PIPEDIR_IN, /* in = DL = target -> host */
1425 2,
1426 },
1427 {
1428 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1429 PIPEDIR_OUT, /* out = UL = host -> target */
1430 3,
1431 },
1432 {
1433 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1434 PIPEDIR_IN, /* in = DL = target -> host */
1435 2,
1436 },
1437 {
1438 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1439 PIPEDIR_OUT, /* out = UL = host -> target */
1440 3,
1441 },
1442 {
1443 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1444 PIPEDIR_IN, /* in = DL = target -> host */
1445 2,
1446 },
1447 {
1448 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1449 PIPEDIR_OUT, /* out = UL = host -> target */
1450 0, /* could be moved to 3 (share with WMI) */
1451 },
1452 {
1453 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1454 PIPEDIR_IN, /* in = DL = target -> host */
1455 1,
1456 },
1457 {
1458 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1459 PIPEDIR_OUT, /* out = UL = host -> target */
1460 0,
1461 },
1462 {
1463 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1464 PIPEDIR_IN, /* in = DL = target -> host */
1465 1,
1466 },
1467 {
1468 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1469 PIPEDIR_OUT, /* out = UL = host -> target */
1470 4,
1471 },
1472 {
1473 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1474 PIPEDIR_IN, /* in = DL = target -> host */
1475 1,
1476 },
1477
1478 /* (Additions here) */
1479
1480 { /* Must be last */
1481 0,
1482 0,
1483 0,
1484 },
1485};
1486
1487/*
1488 * Send an interrupt to the device to wake up the Target CPU
1489 * so it has an opportunity to notice any changed state.
1490 */
1491static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1492{
1493 int ret;
1494 u32 core_ctrl;
1495
1496 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1497 CORE_CTRL_ADDRESS,
1498 &core_ctrl);
1499 if (ret) {
1500 ath10k_warn("Unable to read core ctrl\n");
1501 return ret;
1502 }
1503
1504 /* A_INUM_FIRMWARE interrupt to Target CPU */
1505 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1506
1507 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1508 CORE_CTRL_ADDRESS,
1509 core_ctrl);
1510 if (ret)
1511 ath10k_warn("Unable to set interrupt mask\n");
1512
1513 return ret;
1514}
1515
1516static int ath10k_pci_init_config(struct ath10k *ar)
1517{
1518 u32 interconnect_targ_addr;
1519 u32 pcie_state_targ_addr = 0;
1520 u32 pipe_cfg_targ_addr = 0;
1521 u32 svc_to_pipe_map = 0;
1522 u32 pcie_config_flags = 0;
1523 u32 ealloc_value;
1524 u32 ealloc_targ_addr;
1525 u32 flag2_value;
1526 u32 flag2_targ_addr;
1527 int ret = 0;
1528
1529 /* Download to Target the CE Config and the service-to-CE map */
1530 interconnect_targ_addr =
1531 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1532
1533 /* Supply Target-side CE configuration */
1534 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1535 &pcie_state_targ_addr);
1536 if (ret != 0) {
1537 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1538 return ret;
1539 }
1540
1541 if (pcie_state_targ_addr == 0) {
1542 ret = -EIO;
1543 ath10k_err("Invalid pcie state addr\n");
1544 return ret;
1545 }
1546
1547 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1548 offsetof(struct pcie_state,
1549 pipe_cfg_addr),
1550 &pipe_cfg_targ_addr);
1551 if (ret != 0) {
1552 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1553 return ret;
1554 }
1555
1556 if (pipe_cfg_targ_addr == 0) {
1557 ret = -EIO;
1558 ath10k_err("Invalid pipe cfg addr\n");
1559 return ret;
1560 }
1561
1562 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1563 target_ce_config_wlan,
1564 sizeof(target_ce_config_wlan));
1565
1566 if (ret != 0) {
1567 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1568 return ret;
1569 }
1570
1571 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1572 offsetof(struct pcie_state,
1573 svc_to_pipe_map),
1574 &svc_to_pipe_map);
1575 if (ret != 0) {
1576 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1577 return ret;
1578 }
1579
1580 if (svc_to_pipe_map == 0) {
1581 ret = -EIO;
1582 ath10k_err("Invalid svc_to_pipe map\n");
1583 return ret;
1584 }
1585
1586 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1587 target_service_to_ce_map_wlan,
1588 sizeof(target_service_to_ce_map_wlan));
1589 if (ret != 0) {
1590 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1591 return ret;
1592 }
1593
1594 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1595 offsetof(struct pcie_state,
1596 config_flags),
1597 &pcie_config_flags);
1598 if (ret != 0) {
1599 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1600 return ret;
1601 }
1602
1603 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1604
1605 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1606 offsetof(struct pcie_state, config_flags),
1607 &pcie_config_flags,
1608 sizeof(pcie_config_flags));
1609 if (ret != 0) {
1610 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1611 return ret;
1612 }
1613
1614 /* configure early allocation */
1615 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1616
1617 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1618 if (ret != 0) {
1619 ath10k_err("Faile to get early alloc val: %d\n", ret);
1620 return ret;
1621 }
1622
1623 /* first bank is switched to IRAM */
1624 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1625 HI_EARLY_ALLOC_MAGIC_MASK);
1626 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1627 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1628
1629 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1630 if (ret != 0) {
1631 ath10k_err("Failed to set early alloc val: %d\n", ret);
1632 return ret;
1633 }
1634
1635 /* Tell Target to proceed with initialization */
1636 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1637
1638 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1639 if (ret != 0) {
1640 ath10k_err("Failed to get option val: %d\n", ret);
1641 return ret;
1642 }
1643
1644 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1645
1646 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1647 if (ret != 0) {
1648 ath10k_err("Failed to set option val: %d\n", ret);
1649 return ret;
1650 }
1651
1652 return 0;
1653}
1654
1655
1656
1657static int ath10k_pci_ce_init(struct ath10k *ar)
1658{
1659 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1660 struct hif_ce_pipe_info *pipe_info;
1661 const struct ce_attr *attr;
1662 int pipe_num;
1663
1664 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1665 pipe_info = &ar_pci->pipe_info[pipe_num];
1666 pipe_info->pipe_num = pipe_num;
1667 pipe_info->hif_ce_state = ar;
1668 attr = &host_ce_config_wlan[pipe_num];
1669
1670 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1671 if (pipe_info->ce_hdl == NULL) {
1672 ath10k_err("Unable to initialize CE for pipe: %d\n",
1673 pipe_num);
1674
1675 /* It is safe to call it here. It checks if ce_hdl is
1676 * valid for each pipe */
1677 ath10k_pci_ce_deinit(ar);
1678 return -1;
1679 }
1680
1681 if (pipe_num == ar_pci->ce_count - 1) {
1682 /*
1683 * Reserve the ultimate CE for
1684 * diagnostic Window support
1685 */
1686 ar_pci->ce_diag =
1687 ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1688 continue;
1689 }
1690
1691 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1692 }
1693
1694 /*
1695 * Initially, establish CE completion handlers for use with BMI.
1696 * These are overwritten with generic handlers after we exit BMI phase.
1697 */
1698 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1699 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1700 ath10k_pci_bmi_send_done, 0);
1701
1702 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1703 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1704 ath10k_pci_bmi_recv_data);
1705
1706 return 0;
1707}
1708
1709static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1710{
1711 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1712 u32 fw_indicator_address, fw_indicator;
1713
1714 ath10k_pci_wake(ar);
1715
1716 fw_indicator_address = ar_pci->fw_indicator_address;
1717 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1718
1719 if (fw_indicator & FW_IND_EVENT_PENDING) {
1720 /* ACK: clear Target-side pending event */
1721 ath10k_pci_write32(ar, fw_indicator_address,
1722 fw_indicator & ~FW_IND_EVENT_PENDING);
1723
1724 if (ar_pci->started) {
1725 ath10k_pci_hif_dump_area(ar);
1726 } else {
1727 /*
1728 * Probable Target failure before we're prepared
1729 * to handle it. Generally unexpected.
1730 */
1731 ath10k_warn("early firmware event indicated\n");
1732 }
1733 }
1734
1735 ath10k_pci_sleep(ar);
1736}
1737
1738static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1739 .send_head = ath10k_pci_hif_send_head,
1740 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1741 .start = ath10k_pci_hif_start,
1742 .stop = ath10k_pci_hif_stop,
1743 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1744 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1745 .send_complete_check = ath10k_pci_hif_send_complete_check,
1746 .init = ath10k_pci_hif_post_init,
1747 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
1748};
1749
1750static void ath10k_pci_ce_tasklet(unsigned long ptr)
1751{
1752 struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1753 struct ath10k_pci *ar_pci = pipe->ar_pci;
1754
1755 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1756}
1757
1758static void ath10k_msi_err_tasklet(unsigned long data)
1759{
1760 struct ath10k *ar = (struct ath10k *)data;
1761
1762 ath10k_pci_fw_interrupt_handler(ar);
1763}
1764
1765/*
1766 * Handler for a per-engine interrupt on a PARTICULAR CE.
1767 * This is used in cases where each CE has a private MSI interrupt.
1768 */
1769static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1770{
1771 struct ath10k *ar = arg;
1772 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1774
1775 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1776 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1777 return IRQ_HANDLED;
1778 }
1779
1780 /*
1781 * NOTE: We are able to derive ce_id from irq because we
1782 * use a one-to-one mapping for CE's 0..5.
1783 * CE's 6 & 7 do not use interrupts at all.
1784 *
1785 * This mapping must be kept in sync with the mapping
1786 * used by firmware.
1787 */
1788 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1789 return IRQ_HANDLED;
1790}
1791
1792static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1793{
1794 struct ath10k *ar = arg;
1795 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1796
1797 tasklet_schedule(&ar_pci->msi_fw_err);
1798 return IRQ_HANDLED;
1799}
1800
1801/*
1802 * Top-level interrupt handler for all PCI interrupts from a Target.
1803 * When a block of MSI interrupts is allocated, this top-level handler
1804 * is not used; instead, we directly call the correct sub-handler.
1805 */
1806static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1807{
1808 struct ath10k *ar = arg;
1809 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1810
1811 if (ar_pci->num_msi_intrs == 0) {
1812 /*
1813 * IMPORTANT: INTR_CLR regiser has to be set after
1814 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1815 * really cleared.
1816 */
1817 iowrite32(0, ar_pci->mem +
1818 (SOC_CORE_BASE_ADDRESS |
1819 PCIE_INTR_ENABLE_ADDRESS));
1820 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1821 PCIE_INTR_CE_MASK_ALL,
1822 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1823 PCIE_INTR_CLR_ADDRESS));
1824 /*
1825 * IMPORTANT: this extra read transaction is required to
1826 * flush the posted write buffer.
1827 */
1828 (void) ioread32(ar_pci->mem +
1829 (SOC_CORE_BASE_ADDRESS |
1830 PCIE_INTR_ENABLE_ADDRESS));
1831 }
1832
1833 tasklet_schedule(&ar_pci->intr_tq);
1834
1835 return IRQ_HANDLED;
1836}
1837
1838static void ath10k_pci_tasklet(unsigned long data)
1839{
1840 struct ath10k *ar = (struct ath10k *)data;
1841 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1842
1843 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1844 ath10k_ce_per_engine_service_any(ar);
1845
1846 if (ar_pci->num_msi_intrs == 0) {
1847 /* Enable Legacy PCI line interrupts */
1848 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1849 PCIE_INTR_CE_MASK_ALL,
1850 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1851 PCIE_INTR_ENABLE_ADDRESS));
1852 /*
1853 * IMPORTANT: this extra read transaction is required to
1854 * flush the posted write buffer
1855 */
1856 (void) ioread32(ar_pci->mem +
1857 (SOC_CORE_BASE_ADDRESS |
1858 PCIE_INTR_ENABLE_ADDRESS));
1859 }
1860}
1861
1862static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
1863{
1864 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1865 int ret;
1866 int i;
1867
1868 ret = pci_enable_msi_block(ar_pci->pdev, num);
1869 if (ret)
1870 return ret;
1871
1872 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
1873 ath10k_pci_msi_fw_handler,
1874 IRQF_SHARED, "ath10k_pci", ar);
1875 if (ret)
1876 return ret;
1877
1878 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
1879 ret = request_irq(ar_pci->pdev->irq + i,
1880 ath10k_pci_per_engine_handler,
1881 IRQF_SHARED, "ath10k_pci", ar);
1882 if (ret) {
1883 ath10k_warn("request_irq(%d) failed %d\n",
1884 ar_pci->pdev->irq + i, ret);
1885
1886 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
1887 free_irq(ar_pci->pdev->irq + i, ar);
1888
1889 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
1890 pci_disable_msi(ar_pci->pdev);
1891 return ret;
1892 }
1893 }
1894
1895 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
1896 return 0;
1897}
1898
1899static int ath10k_pci_start_intr_msi(struct ath10k *ar)
1900{
1901 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1902 int ret;
1903
1904 ret = pci_enable_msi(ar_pci->pdev);
1905 if (ret < 0)
1906 return ret;
1907
1908 ret = request_irq(ar_pci->pdev->irq,
1909 ath10k_pci_interrupt_handler,
1910 IRQF_SHARED, "ath10k_pci", ar);
1911 if (ret < 0) {
1912 pci_disable_msi(ar_pci->pdev);
1913 return ret;
1914 }
1915
1916 ath10k_info("MSI interrupt handling\n");
1917 return 0;
1918}
1919
1920static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
1921{
1922 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1923 int ret;
1924
1925 ret = request_irq(ar_pci->pdev->irq,
1926 ath10k_pci_interrupt_handler,
1927 IRQF_SHARED, "ath10k_pci", ar);
1928 if (ret < 0)
1929 return ret;
1930
1931 /*
1932 * Make sure to wake the Target before enabling Legacy
1933 * Interrupt.
1934 */
1935 iowrite32(PCIE_SOC_WAKE_V_MASK,
1936 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
1937 PCIE_SOC_WAKE_ADDRESS);
1938
1939 ath10k_pci_wait(ar);
1940
1941 /*
1942 * A potential race occurs here: The CORE_BASE write
1943 * depends on target correctly decoding AXI address but
1944 * host won't know when target writes BAR to CORE_CTRL.
1945 * This write might get lost if target has NOT written BAR.
1946 * For now, fix the race by repeating the write in below
1947 * synchronization checking.
1948 */
1949 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1950 PCIE_INTR_CE_MASK_ALL,
1951 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1952 PCIE_INTR_ENABLE_ADDRESS));
1953 iowrite32(PCIE_SOC_WAKE_RESET,
1954 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
1955 PCIE_SOC_WAKE_ADDRESS);
1956
1957 ath10k_info("legacy interrupt handling\n");
1958 return 0;
1959}
1960
1961static int ath10k_pci_start_intr(struct ath10k *ar)
1962{
1963 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1964 int num = MSI_NUM_REQUEST;
1965 int ret;
1966 int i;
1967
1968 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
1969 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
1970 (unsigned long) ar);
1971
1972 for (i = 0; i < CE_COUNT; i++) {
1973 ar_pci->pipe_info[i].ar_pci = ar_pci;
1974 tasklet_init(&ar_pci->pipe_info[i].intr,
1975 ath10k_pci_ce_tasklet,
1976 (unsigned long)&ar_pci->pipe_info[i]);
1977 }
1978
1979 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
1980 num = 1;
1981
1982 if (num > 1) {
1983 ret = ath10k_pci_start_intr_msix(ar, num);
1984 if (ret == 0)
1985 goto exit;
1986
1987 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
1988 num = 1;
1989 }
1990
1991 if (num == 1) {
1992 ret = ath10k_pci_start_intr_msi(ar);
1993 if (ret == 0)
1994 goto exit;
1995
1996 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
1997 ret);
1998 num = 0;
1999 }
2000
2001 ret = ath10k_pci_start_intr_legacy(ar);
2002
2003exit:
2004 ar_pci->num_msi_intrs = num;
2005 ar_pci->ce_count = CE_COUNT;
2006 return ret;
2007}
2008
2009static void ath10k_pci_stop_intr(struct ath10k *ar)
2010{
2011 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2012 int i;
2013
2014 /* There's at least one interrupt irregardless whether its legacy INTR
2015 * or MSI or MSI-X */
2016 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2017 free_irq(ar_pci->pdev->irq + i, ar);
2018
2019 if (ar_pci->num_msi_intrs > 0)
2020 pci_disable_msi(ar_pci->pdev);
2021}
2022
2023static int ath10k_pci_reset_target(struct ath10k *ar)
2024{
2025 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2026 int wait_limit = 300; /* 3 sec */
2027
2028 /* Wait for Target to finish initialization before we proceed. */
2029 iowrite32(PCIE_SOC_WAKE_V_MASK,
2030 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2031 PCIE_SOC_WAKE_ADDRESS);
2032
2033 ath10k_pci_wait(ar);
2034
2035 while (wait_limit-- &&
2036 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2037 FW_IND_INITIALIZED)) {
2038 if (ar_pci->num_msi_intrs == 0)
2039 /* Fix potential race by repeating CORE_BASE writes */
2040 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2041 PCIE_INTR_CE_MASK_ALL,
2042 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2043 PCIE_INTR_ENABLE_ADDRESS));
2044 mdelay(10);
2045 }
2046
2047 if (wait_limit < 0) {
2048 ath10k_err("Target stalled\n");
2049 iowrite32(PCIE_SOC_WAKE_RESET,
2050 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2051 PCIE_SOC_WAKE_ADDRESS);
2052 return -EIO;
2053 }
2054
2055 iowrite32(PCIE_SOC_WAKE_RESET,
2056 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2057 PCIE_SOC_WAKE_ADDRESS);
2058
2059 return 0;
2060}
2061
2062static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
2063{
2064 struct ath10k *ar = ar_pci->ar;
2065 void __iomem *mem = ar_pci->mem;
2066 int i;
2067 u32 val;
2068
2069 if (!SOC_GLOBAL_RESET_ADDRESS)
2070 return;
2071
2072 if (!mem)
2073 return;
2074
2075 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2076 PCIE_SOC_WAKE_V_MASK);
2077 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2078 if (ath10k_pci_target_is_awake(ar))
2079 break;
2080 msleep(1);
2081 }
2082
2083 /* Put Target, including PCIe, into RESET. */
2084 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2085 val |= 1;
2086 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2087
2088 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2089 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2090 RTC_STATE_COLD_RESET_MASK)
2091 break;
2092 msleep(1);
2093 }
2094
2095 /* Pull Target, including PCIe, out of RESET. */
2096 val &= ~1;
2097 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2098
2099 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2100 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2101 RTC_STATE_COLD_RESET_MASK))
2102 break;
2103 msleep(1);
2104 }
2105
2106 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2107}
2108
2109static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2110{
2111 int i;
2112
2113 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2114 if (!test_bit(i, ar_pci->features))
2115 continue;
2116
2117 switch (i) {
2118 case ATH10K_PCI_FEATURE_MSI_X:
2119 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2120 break;
2121 case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
2122 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2123 break;
2124 }
2125 }
2126}
2127
2128static int ath10k_pci_probe(struct pci_dev *pdev,
2129 const struct pci_device_id *pci_dev)
2130{
2131 void __iomem *mem;
2132 int ret = 0;
2133 struct ath10k *ar;
2134 struct ath10k_pci *ar_pci;
2135 u32 lcr_val;
2136
2137 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2138
2139 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2140 if (ar_pci == NULL)
2141 return -ENOMEM;
2142
2143 ar_pci->pdev = pdev;
2144 ar_pci->dev = &pdev->dev;
2145
2146 switch (pci_dev->device) {
2147 case QCA988X_1_0_DEVICE_ID:
2148 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
2149 break;
2150 case QCA988X_2_0_DEVICE_ID:
2151 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2152 break;
2153 default:
2154 ret = -ENODEV;
2155 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2156 goto err_ar_pci;
2157 }
2158
2159 ath10k_pci_dump_features(ar_pci);
2160
2161 ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
2162 &ath10k_pci_hif_ops);
2163 if (!ar) {
2164 ath10k_err("ath10k_core_create failed!\n");
2165 ret = -EINVAL;
2166 goto err_ar_pci;
2167 }
2168
2169 /* Enable QCA988X_1.0 HW workarounds */
2170 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
2171 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2172
2173 ar_pci->ar = ar;
2174 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2175 atomic_set(&ar_pci->keep_awake_count, 0);
2176
2177 pci_set_drvdata(pdev, ar);
2178
2179 /*
2180 * Without any knowledge of the Host, the Target may have been reset or
2181 * power cycled and its Config Space may no longer reflect the PCI
2182 * address space that was assigned earlier by the PCI infrastructure.
2183 * Refresh it now.
2184 */
2185 ret = pci_assign_resource(pdev, BAR_NUM);
2186 if (ret) {
2187 ath10k_err("cannot assign PCI space: %d\n", ret);
2188 goto err_ar;
2189 }
2190
2191 ret = pci_enable_device(pdev);
2192 if (ret) {
2193 ath10k_err("cannot enable PCI device: %d\n", ret);
2194 goto err_ar;
2195 }
2196
2197 /* Request MMIO resources */
2198 ret = pci_request_region(pdev, BAR_NUM, "ath");
2199 if (ret) {
2200 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2201 goto err_device;
2202 }
2203
2204 /*
2205 * Target structures have a limit of 32 bit DMA pointers.
2206 * DMA pointers can be wider than 32 bits by default on some systems.
2207 */
2208 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2209 if (ret) {
2210 ath10k_err("32-bit DMA not available: %d\n", ret);
2211 goto err_region;
2212 }
2213
2214 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2215 if (ret) {
2216 ath10k_err("cannot enable 32-bit consistent DMA\n");
2217 goto err_region;
2218 }
2219
2220 /* Set bus master bit in PCI_COMMAND to enable DMA */
2221 pci_set_master(pdev);
2222
2223 /*
2224 * Temporary FIX: disable ASPM
2225 * Will be removed after the OTP is programmed
2226 */
2227 pci_read_config_dword(pdev, 0x80, &lcr_val);
2228 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2229
2230 /* Arrange for access to Target SoC registers. */
2231 mem = pci_iomap(pdev, BAR_NUM, 0);
2232 if (!mem) {
2233 ath10k_err("PCI iomap error\n");
2234 ret = -EIO;
2235 goto err_master;
2236 }
2237
2238 ar_pci->mem = mem;
2239
2240 spin_lock_init(&ar_pci->ce_lock);
2241
2242 ar_pci->cacheline_sz = dma_get_cache_alignment();
2243
2244 ret = ath10k_pci_start_intr(ar);
2245 if (ret) {
2246 ath10k_err("could not start interrupt handling (%d)\n", ret);
2247 goto err_iomap;
2248 }
2249
2250 /*
2251 * Bring the target up cleanly.
2252 *
2253 * The target may be in an undefined state with an AUX-powered Target
2254 * and a Host in WoW mode. If the Host crashes, loses power, or is
2255 * restarted (without unloading the driver) then the Target is left
2256 * (aux) powered and running. On a subsequent driver load, the Target
2257 * is in an unexpected state. We try to catch that here in order to
2258 * reset the Target and retry the probe.
2259 */
2260 ath10k_pci_device_reset(ar_pci);
2261
2262 ret = ath10k_pci_reset_target(ar);
2263 if (ret)
2264 goto err_intr;
2265
2266 if (ath10k_target_ps) {
2267 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
2268 } else {
2269 /* Force AWAKE forever */
2270 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
2271 ath10k_do_pci_wake(ar);
2272 }
2273
2274 ret = ath10k_pci_ce_init(ar);
2275 if (ret)
2276 goto err_intr;
2277
2278 ret = ath10k_pci_init_config(ar);
2279 if (ret)
2280 goto err_ce;
2281
2282 ret = ath10k_pci_wake_target_cpu(ar);
2283 if (ret) {
2284 ath10k_err("could not wake up target CPU (%d)\n", ret);
2285 goto err_ce;
2286 }
2287
2288 ret = ath10k_core_register(ar);
2289 if (ret) {
2290 ath10k_err("could not register driver core (%d)\n", ret);
2291 goto err_ce;
2292 }
2293
2294 return 0;
2295
2296err_ce:
2297 ath10k_pci_ce_deinit(ar);
2298err_intr:
2299 ath10k_pci_stop_intr(ar);
2300err_iomap:
2301 pci_iounmap(pdev, mem);
2302err_master:
2303 pci_clear_master(pdev);
2304err_region:
2305 pci_release_region(pdev, BAR_NUM);
2306err_device:
2307 pci_disable_device(pdev);
2308err_ar:
2309 pci_set_drvdata(pdev, NULL);
2310 ath10k_core_destroy(ar);
2311err_ar_pci:
2312 /* call HIF PCI free here */
2313 kfree(ar_pci);
2314
2315 return ret;
2316}
2317
2318static void ath10k_pci_remove(struct pci_dev *pdev)
2319{
2320 struct ath10k *ar = pci_get_drvdata(pdev);
2321 struct ath10k_pci *ar_pci;
2322
2323 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2324
2325 if (!ar)
2326 return;
2327
2328 ar_pci = ath10k_pci_priv(ar);
2329
2330 if (!ar_pci)
2331 return;
2332
2333 tasklet_kill(&ar_pci->msi_fw_err);
2334
2335 ath10k_core_unregister(ar);
2336 ath10k_pci_stop_intr(ar);
2337
2338 pci_set_drvdata(pdev, NULL);
2339 pci_iounmap(pdev, ar_pci->mem);
2340 pci_release_region(pdev, BAR_NUM);
2341 pci_clear_master(pdev);
2342 pci_disable_device(pdev);
2343
2344 ath10k_core_destroy(ar);
2345 kfree(ar_pci);
2346}
2347
2348#if defined(CONFIG_PM_SLEEP)
2349
2350#define ATH10K_PCI_PM_CONTROL 0x44
2351
2352static int ath10k_pci_suspend(struct device *device)
2353{
2354 struct pci_dev *pdev = to_pci_dev(device);
2355 struct ath10k *ar = pci_get_drvdata(pdev);
2356 struct ath10k_pci *ar_pci;
2357 u32 val;
2358 int ret, retval;
2359
2360 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2361
2362 if (!ar)
2363 return -ENODEV;
2364
2365 ar_pci = ath10k_pci_priv(ar);
2366 if (!ar_pci)
2367 return -ENODEV;
2368
2369 if (ath10k_core_target_suspend(ar))
2370 return -EBUSY;
2371
2372 ret = wait_event_interruptible_timeout(ar->event_queue,
2373 ar->is_target_paused == true,
2374 1 * HZ);
2375 if (ret < 0) {
2376 ath10k_warn("suspend interrupted (%d)\n", ret);
2377 retval = ret;
2378 goto resume;
2379 } else if (ret == 0) {
2380 ath10k_warn("suspend timed out - target pause event never came\n");
2381 retval = EIO;
2382 goto resume;
2383 }
2384
2385 /*
2386 * reset is_target_paused and host can check that in next time,
2387 * or it will always be TRUE and host just skip the waiting
2388 * condition, it causes target assert due to host already
2389 * suspend
2390 */
2391 ar->is_target_paused = false;
2392
2393 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2394
2395 if ((val & 0x000000ff) != 0x3) {
2396 pci_save_state(pdev);
2397 pci_disable_device(pdev);
2398 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2399 (val & 0xffffff00) | 0x03);
2400 }
2401
2402 return 0;
2403resume:
2404 ret = ath10k_core_target_resume(ar);
2405 if (ret)
2406 ath10k_warn("could not resume (%d)\n", ret);
2407
2408 return retval;
2409}
2410
2411static int ath10k_pci_resume(struct device *device)
2412{
2413 struct pci_dev *pdev = to_pci_dev(device);
2414 struct ath10k *ar = pci_get_drvdata(pdev);
2415 struct ath10k_pci *ar_pci;
2416 int ret;
2417 u32 val;
2418
2419 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2420
2421 if (!ar)
2422 return -ENODEV;
2423 ar_pci = ath10k_pci_priv(ar);
2424
2425 if (!ar_pci)
2426 return -ENODEV;
2427
2428 ret = pci_enable_device(pdev);
2429 if (ret) {
2430 ath10k_warn("cannot enable PCI device: %d\n", ret);
2431 return ret;
2432 }
2433
2434 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2435
2436 if ((val & 0x000000ff) != 0) {
2437 pci_restore_state(pdev);
2438 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2439 val & 0xffffff00);
2440 /*
2441 * Suspend/Resume resets the PCI configuration space,
2442 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2443 * to keep PCI Tx retries from interfering with C3 CPU state
2444 */
2445 pci_read_config_dword(pdev, 0x40, &val);
2446
2447 if ((val & 0x0000ff00) != 0)
2448 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2449 }
2450
2451 ret = ath10k_core_target_resume(ar);
2452 if (ret)
2453 ath10k_warn("target resume failed: %d\n", ret);
2454
2455 return ret;
2456}
2457
2458static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
2459 ath10k_pci_suspend,
2460 ath10k_pci_resume);
2461
2462#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
2463
2464#else
2465
2466#define ATH10K_PCI_PM_OPS NULL
2467
2468#endif /* CONFIG_PM_SLEEP */
2469
2470MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2471
2472static struct pci_driver ath10k_pci_driver = {
2473 .name = "ath10k_pci",
2474 .id_table = ath10k_pci_id_table,
2475 .probe = ath10k_pci_probe,
2476 .remove = ath10k_pci_remove,
2477 .driver.pm = ATH10K_PCI_PM_OPS,
2478};
2479
2480static int __init ath10k_pci_init(void)
2481{
2482 int ret;
2483
2484 ret = pci_register_driver(&ath10k_pci_driver);
2485 if (ret)
2486 ath10k_err("pci_register_driver failed [%d]\n", ret);
2487
2488 return ret;
2489}
2490module_init(ath10k_pci_init);
2491
2492static void __exit ath10k_pci_exit(void)
2493{
2494 pci_unregister_driver(&ath10k_pci_driver);
2495}
2496
2497module_exit(ath10k_pci_exit);
2498
2499MODULE_AUTHOR("Qualcomm Atheros");
2500MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2501MODULE_LICENSE("Dual BSD/GPL");
2502MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2503MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2504MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2505MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2506MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2507MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
new file mode 100644
index 000000000000..d2a055a07dc6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -0,0 +1,355 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _PCI_H_
19#define _PCI_H_
20
21#include <linux/interrupt.h>
22
23#include "hw.h"
24#include "ce.h"
25
26/* FW dump area */
27#define REG_DUMP_COUNT_QCA988X 60
28
29/*
30 * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
31 */
32#define DIAG_TRANSFER_LIMIT 2048
33
34/*
35 * maximum number of bytes that can be
36 * handled atomically by DiagRead/DiagWrite
37 */
38#define DIAG_TRANSFER_LIMIT 2048
39
40struct bmi_xfer {
41 struct completion done;
42 bool wait_for_resp;
43 u32 resp_len;
44};
45
46struct ath10k_pci_compl {
47 struct list_head list;
48 int send_or_recv;
49 struct ce_state *ce_state;
50 struct hif_ce_pipe_info *pipe_info;
51 void *transfer_context;
52 unsigned int nbytes;
53 unsigned int transfer_id;
54 unsigned int flags;
55};
56
57/* compl_state.send_or_recv */
58#define HIF_CE_COMPLETE_FREE 0
59#define HIF_CE_COMPLETE_SEND 1
60#define HIF_CE_COMPLETE_RECV 2
61
62/*
63 * PCI-specific Target state
64 *
65 * NOTE: Structure is shared between Host software and Target firmware!
66 *
67 * Much of this may be of interest to the Host so
68 * HOST_INTEREST->hi_interconnect_state points here
69 * (and all members are 32-bit quantities in order to
70 * facilitate Host access). In particular, Host software is
71 * required to initialize pipe_cfg_addr and svc_to_pipe_map.
72 */
73struct pcie_state {
74 /* Pipe configuration Target address */
75 /* NB: ce_pipe_config[CE_COUNT] */
76 u32 pipe_cfg_addr;
77
78 /* Service to pipe map Target address */
79 /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
80 u32 svc_to_pipe_map;
81
82 /* number of MSI interrupts requested */
83 u32 msi_requested;
84
85 /* number of MSI interrupts granted */
86 u32 msi_granted;
87
88 /* Message Signalled Interrupt address */
89 u32 msi_addr;
90
91 /* Base data */
92 u32 msi_data;
93
94 /*
95 * Data for firmware interrupt;
96 * MSI data for other interrupts are
97 * in various SoC registers
98 */
99 u32 msi_fw_intr_data;
100
101 /* PCIE_PWR_METHOD_* */
102 u32 power_mgmt_method;
103
104 /* PCIE_CONFIG_FLAG_* */
105 u32 config_flags;
106};
107
108/* PCIE_CONFIG_FLAG definitions */
109#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
110
111/* Host software's Copy Engine configuration. */
112#define CE_ATTR_FLAGS 0
113
114/*
115 * Configuration information for a Copy Engine pipe.
116 * Passed from Host to Target during startup (one per CE).
117 *
118 * NOTE: Structure is shared between Host software and Target firmware!
119 */
120struct ce_pipe_config {
121 u32 pipenum;
122 u32 pipedir;
123 u32 nentries;
124 u32 nbytes_max;
125 u32 flags;
126 u32 reserved;
127};
128
129/*
130 * Directions for interconnect pipe configuration.
131 * These definitions may be used during configuration and are shared
132 * between Host and Target.
133 *
134 * Pipe Directions are relative to the Host, so PIPEDIR_IN means
135 * "coming IN over air through Target to Host" as with a WiFi Rx operation.
136 * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
137 * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
138 * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
139 * over the interconnect.
140 */
141#define PIPEDIR_NONE 0
142#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
143#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
144#define PIPEDIR_INOUT 3 /* bidirectional */
145
146/* Establish a mapping between a service/direction and a pipe. */
147struct service_to_pipe {
148 u32 service_id;
149 u32 pipedir;
150 u32 pipenum;
151};
152
153enum ath10k_pci_features {
154 ATH10K_PCI_FEATURE_MSI_X = 0,
155 ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1,
156
157 /* keep last */
158 ATH10K_PCI_FEATURE_COUNT
159};
160
161/* Per-pipe state. */
162struct hif_ce_pipe_info {
163 /* Handle of underlying Copy Engine */
164 struct ce_state *ce_hdl;
165
166 /* Our pipe number; facilitiates use of pipe_info ptrs. */
167 u8 pipe_num;
168
169 /* Convenience back pointer to hif_ce_state. */
170 struct ath10k *hif_ce_state;
171
172 size_t buf_sz;
173
174 /* protects compl_free and num_send_allowed */
175 spinlock_t pipe_lock;
176
177 /* List of free CE completion slots */
178 struct list_head compl_free;
179
180 /* Limit the number of outstanding send requests. */
181 int num_sends_allowed;
182
183 struct ath10k_pci *ar_pci;
184 struct tasklet_struct intr;
185};
186
187struct ath10k_pci {
188 struct pci_dev *pdev;
189 struct device *dev;
190 struct ath10k *ar;
191 void __iomem *mem;
192 int cacheline_sz;
193
194 DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
195
196 /*
197 * Number of MSI interrupts granted, 0 --> using legacy PCI line
198 * interrupts.
199 */
200 int num_msi_intrs;
201
202 struct tasklet_struct intr_tq;
203 struct tasklet_struct msi_fw_err;
204
205 /* Number of Copy Engines supported */
206 unsigned int ce_count;
207
208 int started;
209
210 atomic_t keep_awake_count;
211 bool verified_awake;
212
213 /* List of CE completions to be processed */
214 struct list_head compl_process;
215
216 /* protects compl_processing and compl_process */
217 spinlock_t compl_lock;
218
219 bool compl_processing;
220
221 struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
222
223 struct ath10k_hif_cb msg_callbacks_current;
224
225 /* Target address used to signal a pending firmware event */
226 u32 fw_indicator_address;
227
228 /* Copy Engine used for Diagnostic Accesses */
229 struct ce_state *ce_diag;
230
231 /* FIXME: document what this really protects */
232 spinlock_t ce_lock;
233
234 /* Map CE id to ce_state */
235 struct ce_state *ce_id_to_state[CE_COUNT_MAX];
236
237 /* makes sure that dummy reads are atomic */
238 spinlock_t hw_v1_workaround_lock;
239};
240
241static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
242{
243 return ar->hif.priv;
244}
245
246static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
247{
248 return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
249}
250
251static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
252{
253 iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
254}
255
256#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
257#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
258
259#define BAR_NUM 0
260
261#define CDC_WAR_MAGIC_STR 0xceef0000
262#define CDC_WAR_DATA_CE 4
263
264/*
265 * TODO: Should be a function call specific to each Target-type.
266 * This convoluted macro converts from Target CPU Virtual Address Space to CE
267 * Address Space. As part of this process, we conservatively fetch the current
268 * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
269 * for this device; but that's not guaranteed.
270 */
271#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
272 (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
273 CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
274 0x100000 | ((addr) & 0xfffff))
275
276/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
277#define DIAG_ACCESS_CE_TIMEOUT_MS 10
278
279/*
280 * This API allows the Host to access Target registers directly
281 * and relatively efficiently over PCIe.
282 * This allows the Host to avoid extra overhead associated with
283 * sending a message to firmware and waiting for a response message
284 * from firmware, as is done on other interconnects.
285 *
286 * Yet there is some complexity with direct accesses because the
287 * Target's power state is not known a priori. The Host must issue
288 * special PCIe reads/writes in order to explicitly wake the Target
289 * and to verify that it is awake and will remain awake.
290 *
291 * Usage:
292 *
293 * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
294 * These calls must be bracketed by ath10k_pci_wake and
295 * ath10k_pci_sleep. A single BEGIN/END pair is adequate for
296 * multiple READ/WRITE operations.
297 *
298 * Use ath10k_pci_wake to put the Target in a state in
299 * which it is legal for the Host to directly access it. This
300 * may involve waking the Target from a low power state, which
301 * may take up to 2Ms!
302 *
303 * Use ath10k_pci_sleep to tell the Target that as far as
304 * this code path is concerned, it no longer needs to remain
305 * directly accessible. BEGIN/END is under a reference counter;
306 * multiple code paths may issue BEGIN/END on a single targid.
307 */
308static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
309 u32 value)
310{
311 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
312 void __iomem *addr = ar_pci->mem;
313
314 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
315 unsigned long irq_flags;
316
317 spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
318
319 ioread32(addr+offset+4); /* 3rd read prior to write */
320 ioread32(addr+offset+4); /* 2nd read prior to write */
321 ioread32(addr+offset+4); /* 1st read prior to write */
322 iowrite32(value, addr+offset);
323
324 spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
325 irq_flags);
326 } else {
327 iowrite32(value, addr+offset);
328 }
329}
330
331static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
332{
333 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
334
335 return ioread32(ar_pci->mem + offset);
336}
337
338extern unsigned int ath10k_target_ps;
339
340void ath10k_do_pci_wake(struct ath10k *ar);
341void ath10k_do_pci_sleep(struct ath10k *ar);
342
343static inline void ath10k_pci_wake(struct ath10k *ar)
344{
345 if (ath10k_target_ps)
346 ath10k_do_pci_wake(ar);
347}
348
349static inline void ath10k_pci_sleep(struct ath10k *ar)
350{
351 if (ath10k_target_ps)
352 ath10k_do_pci_sleep(ar);
353}
354
355#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
new file mode 100644
index 000000000000..bfec6c8f2ecb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -0,0 +1,990 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _RX_DESC_H_
19#define _RX_DESC_H_
20
21enum rx_attention_flags {
22 RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0,
23 RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1,
24 RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2,
25 RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3,
26 RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4,
27 RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5,
28 RX_ATTENTION_FLAGS_NON_QOS = 1 << 6,
29 RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7,
30 RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8,
31 RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9,
32 RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10,
33 RX_ATTENTION_FLAGS_EOSP = 1 << 11,
34 RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12,
35 RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13,
36 RX_ATTENTION_FLAGS_ORDER = 1 << 14,
37 RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15,
38 RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16,
39 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17,
40 RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18,
41 RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19,
42 RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20,
43 RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21,
44 RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22,
45 RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23,
46 RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24,
47 RX_ATTENTION_FLAGS_DIRECTED = 1 << 25,
48 RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26,
49 RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27,
50 RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28,
51 RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29,
52 RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30,
53 RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31,
54};
55
56struct rx_attention {
57 __le32 flags; /* %RX_ATTENTION_FLAGS_ */
58} __packed;
59
60/*
61 * first_mpdu
62 * Indicates the first MSDU of the PPDU. If both first_mpdu
63 * and last_mpdu are set in the MSDU then this is a not an
64 * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
65 * A-MPDU shall have both first_mpdu and last_mpdu bits set to
66 * 0. The PPDU start status will only be valid when this bit
67 * is set.
68 *
69 * last_mpdu
70 * Indicates the last MSDU of the last MPDU of the PPDU. The
71 * PPDU end status will only be valid when this bit is set.
72 *
73 * mcast_bcast
74 * Multicast / broadcast indicator. Only set when the MAC
75 * address 1 bit 0 is set indicating mcast/bcast and the BSSID
76 * matches one of the 4 BSSID registers. Only set when
77 * first_msdu is set.
78 *
79 * peer_idx_invalid
80 * Indicates no matching entries within the the max search
81 * count. Only set when first_msdu is set.
82 *
83 * peer_idx_timeout
84 * Indicates an unsuccessful search for the peer index due to
85 * timeout. Only set when first_msdu is set.
86 *
87 * power_mgmt
88 * Power management bit set in the 802.11 header. Only set
89 * when first_msdu is set.
90 *
91 * non_qos
92 * Set if packet is not a non-QoS data frame. Only set when
93 * first_msdu is set.
94 *
95 * null_data
96 * Set if frame type indicates either null data or QoS null
97 * data format. Only set when first_msdu is set.
98 *
99 * mgmt_type
100 * Set if packet is a management packet. Only set when
101 * first_msdu is set.
102 *
103 * ctrl_type
104 * Set if packet is a control packet. Only set when first_msdu
105 * is set.
106 *
107 * more_data
108 * Set if more bit in frame control is set. Only set when
109 * first_msdu is set.
110 *
111 * eosp
112 * Set if the EOSP (end of service period) bit in the QoS
113 * control field is set. Only set when first_msdu is set.
114 *
115 * u_apsd_trigger
116 * Set if packet is U-APSD trigger. Key table will have bits
117 * per TID to indicate U-APSD trigger.
118 *
119 * fragment
120 * Indicates that this is an 802.11 fragment frame. This is
121 * set when either the more_frag bit is set in the frame
122 * control or the fragment number is not zero. Only set when
123 * first_msdu is set.
124 *
125 * order
126 * Set if the order bit in the frame control is set. Only set
127 * when first_msdu is set.
128 *
129 * classification
130 * Indicates that this status has a corresponding MSDU that
131 * requires FW processing. The OLE will have classification
132 * ring mask registers which will indicate the ring(s) for
133 * packets and descriptors which need FW attention.
134 *
135 * overflow_err
136 * PCU Receive FIFO does not have enough space to store the
137 * full receive packet. Enough space is reserved in the
138 * receive FIFO for the status is written. This MPDU remaining
139 * packets in the PPDU will be filtered and no Ack response
140 * will be transmitted.
141 *
142 * msdu_length_err
143 * Indicates that the MSDU length from the 802.3 encapsulated
144 * length field extends beyond the MPDU boundary.
145 *
146 * tcp_udp_chksum_fail
147 * Indicates that the computed checksum (tcp_udp_chksum) did
148 * not match the checksum in the TCP/UDP header.
149 *
150 * ip_chksum_fail
151 * Indicates that the computed checksum did not match the
152 * checksum in the IP header.
153 *
154 * sa_idx_invalid
155 * Indicates no matching entry was found in the address search
156 * table for the source MAC address.
157 *
158 * da_idx_invalid
159 * Indicates no matching entry was found in the address search
160 * table for the destination MAC address.
161 *
162 * sa_idx_timeout
163 * Indicates an unsuccessful search for the source MAC address
164 * due to the expiring of the search timer.
165 *
166 * da_idx_timeout
167 * Indicates an unsuccessful search for the destination MAC
168 * address due to the expiring of the search timer.
169 *
170 * encrypt_required
171 * Indicates that this data type frame is not encrypted even if
172 * the policy for this MPDU requires encryption as indicated in
173 * the peer table key type.
174 *
175 * directed
176 * MPDU is a directed packet which means that the RA matched
177 * our STA addresses. In proxySTA it means that the TA matched
178 * an entry in our address search table with the corresponding
179 * 'no_ack' bit is the address search entry cleared.
180 *
181 * buffer_fragment
182 * Indicates that at least one of the rx buffers has been
183 * fragmented. If set the FW should look at the rx_frag_info
184 * descriptor described below.
185 *
186 * mpdu_length_err
187 * Indicates that the MPDU was pre-maturely terminated
188 * resulting in a truncated MPDU. Don't trust the MPDU length
189 * field.
190 *
191 * tkip_mic_err
192 * Indicates that the MPDU Michael integrity check failed
193 *
194 * decrypt_err
195 * Indicates that the MPDU decrypt integrity check failed
196 *
197 * fcs_err
198 * Indicates that the MPDU FCS check failed
199 *
200 * msdu_done
201 * If set indicates that the RX packet data, RX header data, RX
202 * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
203 * start/end descriptors and RX Attention descriptor are all
204 * valid. This bit must be in the last octet of the
205 * descriptor.
206 */
207
208struct rx_frag_info {
209 u8 ring0_more_count;
210 u8 ring1_more_count;
211 u8 ring2_more_count;
212 u8 ring3_more_count;
213} __packed;
214
215/*
216 * ring0_more_count
217 * Indicates the number of more buffers associated with RX DMA
218 * ring 0. Field is filled in by the RX_DMA.
219 *
220 * ring1_more_count
221 * Indicates the number of more buffers associated with RX DMA
222 * ring 1. Field is filled in by the RX_DMA.
223 *
224 * ring2_more_count
225 * Indicates the number of more buffers associated with RX DMA
226 * ring 2. Field is filled in by the RX_DMA.
227 *
228 * ring3_more_count
229 * Indicates the number of more buffers associated with RX DMA
230 * ring 3. Field is filled in by the RX_DMA.
231 */
232
233enum htt_rx_mpdu_encrypt_type {
234 HTT_RX_MPDU_ENCRYPT_WEP40 = 0,
235 HTT_RX_MPDU_ENCRYPT_WEP104 = 1,
236 HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
237 HTT_RX_MPDU_ENCRYPT_WEP128 = 3,
238 HTT_RX_MPDU_ENCRYPT_TKIP_WPA = 4,
239 HTT_RX_MPDU_ENCRYPT_WAPI = 5,
240 HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
241 HTT_RX_MPDU_ENCRYPT_NONE = 7,
242};
243
244#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
245#define RX_MPDU_START_INFO0_PEER_IDX_LSB 0
246#define RX_MPDU_START_INFO0_SEQ_NUM_MASK 0x0fff0000
247#define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16
248#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
249#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28
250#define RX_MPDU_START_INFO0_FROM_DS (1 << 11)
251#define RX_MPDU_START_INFO0_TO_DS (1 << 12)
252#define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13)
253#define RX_MPDU_START_INFO0_RETRY (1 << 14)
254#define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15)
255
256#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
257#define RX_MPDU_START_INFO1_TID_LSB 28
258#define RX_MPDU_START_INFO1_DIRECTED (1 << 16)
259
260struct rx_mpdu_start {
261 __le32 info0;
262 union {
263 struct {
264 __le32 pn31_0;
265 __le32 info1; /* %RX_MPDU_START_INFO1_ */
266 } __packed;
267 struct {
268 u8 pn[6];
269 } __packed;
270 } __packed;
271} __packed;
272
273/*
274 * peer_idx
275 * The index of the address search table which associated with
276 * the peer table entry corresponding to this MPDU. Only valid
277 * when first_msdu is set.
278 *
279 * fr_ds
280 * Set if the from DS bit is set in the frame control. Only
281 * valid when first_msdu is set.
282 *
283 * to_ds
284 * Set if the to DS bit is set in the frame control. Only
285 * valid when first_msdu is set.
286 *
287 * encrypted
288 * Protected bit from the frame control. Only valid when
289 * first_msdu is set.
290 *
291 * retry
292 * Retry bit from the frame control. Only valid when
293 * first_msdu is set.
294 *
295 * txbf_h_info
296 * The MPDU data will contain H information. Primarily used
297 * for debug.
298 *
299 * seq_num
300 * The sequence number from the 802.11 header. Only valid when
301 * first_msdu is set.
302 *
303 * encrypt_type
304 * Indicates type of decrypt cipher used (as defined in the
305 * peer table)
306 * 0: WEP40
307 * 1: WEP104
308 * 2: TKIP without MIC
309 * 3: WEP128
310 * 4: TKIP (WPA)
311 * 5: WAPI
312 * 6: AES-CCM (WPA2)
313 * 7: No cipher
314 * Only valid when first_msdu_is set
315 *
316 * pn_31_0
317 * Bits [31:0] of the PN number extracted from the IV field
318 * WEP: IV = {key_id_octet, pn2, pn1, pn0}. Only pn[23:0] is
319 * valid.
320 * TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
321 * WEPSeed[1], pn1}. Only pn[47:0] is valid.
322 * AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
323 * pn0}. Only pn[47:0] is valid.
324 * WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
325 * pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
326 * The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
327 * pn[47:0] are valid.
328 * Only valid when first_msdu is set.
329 *
330 * pn_47_32
331 * Bits [47:32] of the PN number. See description for
332 * pn_31_0. The remaining PN fields are in the rx_msdu_end
333 * descriptor
334 *
335 * pn
336 * Use this field to access the pn without worrying about
337 * byte-order and bitmasking/bitshifting.
338 *
339 * directed
340 * See definition in RX attention descriptor
341 *
342 * reserved_2
343 * Reserved: HW should fill with zero. FW should ignore.
344 *
345 * tid
346 * The TID field in the QoS control field
347 */
348
349#define RX_MPDU_END_INFO0_RESERVED_0_MASK 0x00001fff
350#define RX_MPDU_END_INFO0_RESERVED_0_LSB 0
351#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
352#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16
353#define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13)
354#define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14)
355#define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15)
356#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28)
357#define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29)
358#define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30)
359#define RX_MPDU_END_INFO0_FCS_ERR (1 << 31)
360
361struct rx_mpdu_end {
362 __le32 info0;
363} __packed;
364
365/*
366 * reserved_0
367 * Reserved
368 *
369 * overflow_err
370 * PCU Receive FIFO does not have enough space to store the
371 * full receive packet. Enough space is reserved in the
372 * receive FIFO for the status is written. This MPDU remaining
373 * packets in the PPDU will be filtered and no Ack response
374 * will be transmitted.
375 *
376 * last_mpdu
377 * Indicates that this is the last MPDU of a PPDU.
378 *
379 * post_delim_err
380 * Indicates that a delimiter FCS error occurred after this
381 * MPDU before the next MPDU. Only valid when last_msdu is
382 * set.
383 *
384 * post_delim_cnt
385 * Count of the delimiters after this MPDU. This requires the
386 * last MPDU to be held until all the EOF descriptors have been
387 * received. This may be inefficient in the future when
388 * ML-MIMO is used. Only valid when last_mpdu is set.
389 *
390 * mpdu_length_err
391 * See definition in RX attention descriptor
392 *
393 * tkip_mic_err
394 * See definition in RX attention descriptor
395 *
396 * decrypt_err
397 * See definition in RX attention descriptor
398 *
399 * fcs_err
400 * See definition in RX attention descriptor
401 */
402
403#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK 0x00003fff
404#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB 0
405#define RX_MSDU_START_INFO0_IP_OFFSET_MASK 0x000fc000
406#define RX_MSDU_START_INFO0_IP_OFFSET_LSB 14
407#define RX_MSDU_START_INFO0_RING_MASK_MASK 0x00f00000
408#define RX_MSDU_START_INFO0_RING_MASK_LSB 20
409#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
410#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB 24
411
412#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK 0x000000ff
413#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB 0
414#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK 0x00000300
415#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8
416#define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000
417#define RX_MSDU_START_INFO1_SA_IDX_LSB 16
418#define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10)
419#define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11)
420#define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12)
421#define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13)
422#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
423#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
424
425enum rx_msdu_decap_format {
426 RX_MSDU_DECAP_RAW = 0,
427 RX_MSDU_DECAP_NATIVE_WIFI = 1,
428 RX_MSDU_DECAP_ETHERNET2_DIX = 2,
429 RX_MSDU_DECAP_8023_SNAP_LLC = 3
430};
431
432struct rx_msdu_start {
433 __le32 info0; /* %RX_MSDU_START_INFO0_ */
434 __le32 flow_id_crc;
435 __le32 info1; /* %RX_MSDU_START_INFO1_ */
436} __packed;
437
438/*
439 * msdu_length
440 * MSDU length in bytes after decapsulation. This field is
441 * still valid for MPDU frames without A-MSDU. It still
442 * represents MSDU length after decapsulation
443 *
444 * ip_offset
445 * Indicates the IP offset in bytes from the start of the
446 * packet after decapsulation. Only valid if ipv4_proto or
447 * ipv6_proto is set.
448 *
449 * ring_mask
450 * Indicates the destination RX rings for this MSDU.
451 *
452 * tcp_udp_offset
453 * Indicates the offset in bytes to the start of TCP or UDP
454 * header from the start of the IP header after decapsulation.
455 * Only valid if tcp_prot or udp_prot is set. The value 0
456 * indicates that the offset is longer than 127 bytes.
457 *
458 * reserved_0c
459 * Reserved: HW should fill with zero. FW should ignore.
460 *
461 * flow_id_crc
462 * The flow_id_crc runs CRC32 on the following information:
463 * IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
464 * protocol[7:0]}.
465 * IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
466 * next_header[7:0]}
467 * UDP case: sort_port[15:0], dest_port[15:0]
468 * TCP case: sort_port[15:0], dest_port[15:0],
469 * {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
470 * {16'b0, urgent_ptr[15:0]}, all options except 32-bit
471 * timestamp.
472 *
473 * msdu_number
474 * Indicates the MSDU number within a MPDU. This value is
475 * reset to zero at the start of each MPDU. If the number of
476 * MSDU exceeds 255 this number will wrap using modulo 256.
477 *
478 * decap_format
479 * Indicates the format after decapsulation:
480 * 0: RAW: No decapsulation
481 * 1: Native WiFi
482 * 2: Ethernet 2 (DIX)
483 * 3: 802.3 (SNAP/LLC)
484 *
485 * ipv4_proto
486 * Set if L2 layer indicates IPv4 protocol.
487 *
488 * ipv6_proto
489 * Set if L2 layer indicates IPv6 protocol.
490 *
491 * tcp_proto
492 * Set if the ipv4_proto or ipv6_proto are set and the IP
493 * protocol indicates TCP.
494 *
495 * udp_proto
496 * Set if the ipv4_proto or ipv6_proto are set and the IP
497 * protocol indicates UDP.
498 *
499 * ip_frag
500 * Indicates that either the IP More frag bit is set or IP frag
501 * number is non-zero. If set indicates that this is a
502 * fragmented IP packet.
503 *
504 * tcp_only_ack
505 * Set if only the TCP Ack bit is set in the TCP flags and if
506 * the TCP payload is 0.
507 *
508 * sa_idx
509 * The offset in the address table which matches the MAC source
510 * address.
511 *
512 * reserved_2b
513 * Reserved: HW should fill with zero. FW should ignore.
514 */
515
516#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
517#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0
518#define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14)
519#define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15)
520#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30)
521#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31)
522
523struct rx_msdu_end {
524 __le16 ip_hdr_cksum;
525 __le16 tcp_hdr_cksum;
526 u8 key_id_octet;
527 u8 classification_filter;
528 u8 wapi_pn[10];
529 __le32 info0;
530} __packed;
531
532/*
533 *ip_hdr_chksum
534 * This can include the IP header checksum or the pseudo header
535 * checksum used by TCP/UDP checksum.
536 *
537 *tcp_udp_chksum
538 * The value of the computed TCP/UDP checksum. A mode bit
539 * selects whether this checksum is the full checksum or the
540 * partial checksum which does not include the pseudo header.
541 *
542 *key_id_octet
543 * The key ID octet from the IV. Only valid when first_msdu is
544 * set.
545 *
546 *classification_filter
547 * Indicates the number classification filter rule
548 *
549 *ext_wapi_pn_63_48
550 * Extension PN (packet number) which is only used by WAPI.
551 * This corresponds to WAPI PN bits [63:48] (pn6 and pn7). The
552 * WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
553 * descriptor.
554 *
555 *ext_wapi_pn_95_64
556 * Extension PN (packet number) which is only used by WAPI.
557 * This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
558 * pn11).
559 *
560 *ext_wapi_pn_127_96
561 * Extension PN (packet number) which is only used by WAPI.
562 * This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
563 * pn15).
564 *
565 *reported_mpdu_length
566 * MPDU length before decapsulation. Only valid when
567 * first_msdu is set. This field is taken directly from the
568 * length field of the A-MPDU delimiter or the preamble length
569 * field for non-A-MPDU frames.
570 *
571 *first_msdu
572 * Indicates the first MSDU of A-MSDU. If both first_msdu and
573 * last_msdu are set in the MSDU then this is a non-aggregated
574 * MSDU frame: normal MPDU. Interior MSDU in an A-MSDU shall
575 * have both first_mpdu and last_mpdu bits set to 0.
576 *
577 *last_msdu
578 * Indicates the last MSDU of the A-MSDU. MPDU end status is
579 * only valid when last_msdu is set.
580 *
581 *reserved_3a
582 * Reserved: HW should fill with zero. FW should ignore.
583 *
584 *pre_delim_err
585 * Indicates that the first delimiter had a FCS failure. Only
586 * valid when first_mpdu and first_msdu are set.
587 *
588 *reserved_3b
589 * Reserved: HW should fill with zero. FW should ignore.
590 */
591
592#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
593#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1
594
595#define RX_PPDU_START_SIG_RATE_OFDM_48 0
596#define RX_PPDU_START_SIG_RATE_OFDM_24 1
597#define RX_PPDU_START_SIG_RATE_OFDM_12 2
598#define RX_PPDU_START_SIG_RATE_OFDM_6 3
599#define RX_PPDU_START_SIG_RATE_OFDM_54 4
600#define RX_PPDU_START_SIG_RATE_OFDM_36 5
601#define RX_PPDU_START_SIG_RATE_OFDM_18 6
602#define RX_PPDU_START_SIG_RATE_OFDM_9 7
603
604#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0
605#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
606#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2
607#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3
608#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4
609#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
610#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6
611
612#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
613#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
614#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
615#define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C
616#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
617
618#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0)
619
620#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f
621#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0
622#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK 0x0001ffe0
623#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB 5
624#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK 0x00fc0000
625#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18
626#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
627#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24
628#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4)
629#define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17)
630
631#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
632#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0
633
634#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
635#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0
636#define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24)
637
638#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
639#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0
640
641#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
642#define RX_PPDU_START_INFO5_SERVICE_LSB 0
643
644struct rx_ppdu_start {
645 struct {
646 u8 pri20_mhz;
647 u8 ext20_mhz;
648 u8 ext40_mhz;
649 u8 ext80_mhz;
650 } rssi_chains[4];
651 u8 rssi_comb;
652 __le16 rsvd0;
653 u8 info0; /* %RX_PPDU_START_INFO0_ */
654 __le32 info1; /* %RX_PPDU_START_INFO1_ */
655 __le32 info2; /* %RX_PPDU_START_INFO2_ */
656 __le32 info3; /* %RX_PPDU_START_INFO3_ */
657 __le32 info4; /* %RX_PPDU_START_INFO4_ */
658 __le32 info5; /* %RX_PPDU_START_INFO5_ */
659} __packed;
660
661/*
662 * rssi_chain0_pri20
663 * RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
664 * Value of 0x80 indicates invalid.
665 *
666 * rssi_chain0_sec20
667 * RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
668 * Value of 0x80 indicates invalid.
669 *
670 * rssi_chain0_sec40
671 * RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
672 * Value of 0x80 indicates invalid.
673 *
674 * rssi_chain0_sec80
675 * RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
676 * Value of 0x80 indicates invalid.
677 *
678 * rssi_chain1_pri20
679 * RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
680 * Value of 0x80 indicates invalid.
681 *
682 * rssi_chain1_sec20
683 * RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
684 * Value of 0x80 indicates invalid.
685 *
686 * rssi_chain1_sec40
687 * RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
688 * Value of 0x80 indicates invalid.
689 *
690 * rssi_chain1_sec80
691 * RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
692 * Value of 0x80 indicates invalid.
693 *
694 * rssi_chain2_pri20
695 * RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
696 * Value of 0x80 indicates invalid.
697 *
698 * rssi_chain2_sec20
699 * RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
700 * Value of 0x80 indicates invalid.
701 *
702 * rssi_chain2_sec40
703 * RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
704 * Value of 0x80 indicates invalid.
705 *
706 * rssi_chain2_sec80
707 * RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
708 * Value of 0x80 indicates invalid.
709 *
710 * rssi_chain3_pri20
711 * RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
712 * Value of 0x80 indicates invalid.
713 *
714 * rssi_chain3_sec20
715 * RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
716 * Value of 0x80 indicates invalid.
717 *
718 * rssi_chain3_sec40
719 * RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
720 * Value of 0x80 indicates invalid.
721 *
722 * rssi_chain3_sec80
723 * RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
724 * Value of 0x80 indicates invalid.
725 *
726 * rssi_comb
727 * The combined RSSI of RX PPDU of all active chains and
728 * bandwidths. Value of 0x80 indicates invalid.
729 *
730 * reserved_4a
731 * Reserved: HW should fill with 0, FW should ignore.
732 *
733 * is_greenfield
734 * Do we really support this?
735 *
736 * reserved_4b
737 * Reserved: HW should fill with 0, FW should ignore.
738 *
739 * l_sig_rate
740 * If l_sig_rate_select is 0:
741 * 0x8: OFDM 48 Mbps
742 * 0x9: OFDM 24 Mbps
743 * 0xA: OFDM 12 Mbps
744 * 0xB: OFDM 6 Mbps
745 * 0xC: OFDM 54 Mbps
746 * 0xD: OFDM 36 Mbps
747 * 0xE: OFDM 18 Mbps
748 * 0xF: OFDM 9 Mbps
749 * If l_sig_rate_select is 1:
750 * 0x8: CCK 11 Mbps long preamble
751 * 0x9: CCK 5.5 Mbps long preamble
752 * 0xA: CCK 2 Mbps long preamble
753 * 0xB: CCK 1 Mbps long preamble
754 * 0xC: CCK 11 Mbps short preamble
755 * 0xD: CCK 5.5 Mbps short preamble
756 * 0xE: CCK 2 Mbps short preamble
757 *
758 * l_sig_rate_select
759 * Legacy signal rate select. If set then l_sig_rate indicates
760 * CCK rates. If clear then l_sig_rate indicates OFDM rates.
761 *
762 * l_sig_length
763 * Length of legacy frame in octets.
764 *
765 * l_sig_parity
766 * Odd parity over l_sig_rate and l_sig_length
767 *
768 * l_sig_tail
769 * Tail bits for Viterbi decoder
770 *
771 * preamble_type
772 * Indicates the type of preamble ahead:
773 * 0x4: Legacy (OFDM/CCK)
774 * 0x8: HT
775 * 0x9: HT with TxBF
776 * 0xC: VHT
777 * 0xD: VHT with TxBF
778 * 0x80 - 0xFF: Reserved for special baseband data types such
779 * as radar and spectral scan.
780 *
781 * ht_sig_vht_sig_a_1
782 * If preamble_type == 0x8 or 0x9
783 * HT-SIG (first 24 bits)
784 * If preamble_type == 0xC or 0xD
785 * VHT-SIG A (first 24 bits)
786 * Else
787 * Reserved
788 *
789 * reserved_6
790 * Reserved: HW should fill with 0, FW should ignore.
791 *
792 * ht_sig_vht_sig_a_2
793 * If preamble_type == 0x8 or 0x9
794 * HT-SIG (last 24 bits)
795 * If preamble_type == 0xC or 0xD
796 * VHT-SIG A (last 24 bits)
797 * Else
798 * Reserved
799 *
800 * txbf_h_info
801 * Indicates that the packet data carries H information which
802 * is used for TxBF debug.
803 *
804 * reserved_7
805 * Reserved: HW should fill with 0, FW should ignore.
806 *
807 * vht_sig_b
808 * WiFi 1.0 and WiFi 2.0 will likely have this field to be all
809 * 0s since the BB does not plan on decoding VHT SIG-B.
810 *
811 * reserved_8
812 * Reserved: HW should fill with 0, FW should ignore.
813 *
814 * service
815 * Service field from BB for OFDM, HT and VHT packets. CCK
816 * packets will have service field of 0.
817 *
818 * reserved_9
819 * Reserved: HW should fill with 0, FW should ignore.
820*/
821
822
823#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0)
824#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1)
825#define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2)
826
827#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff
828#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0
829#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
830#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
831
832#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
833
834struct rx_ppdu_end {
835 __le32 evm_p0;
836 __le32 evm_p1;
837 __le32 evm_p2;
838 __le32 evm_p3;
839 __le32 evm_p4;
840 __le32 evm_p5;
841 __le32 evm_p6;
842 __le32 evm_p7;
843 __le32 evm_p8;
844 __le32 evm_p9;
845 __le32 evm_p10;
846 __le32 evm_p11;
847 __le32 evm_p12;
848 __le32 evm_p13;
849 __le32 evm_p14;
850 __le32 evm_p15;
851 __le32 tsf_timestamp;
852 __le32 wb_timestamp;
853 u8 locationing_timestamp;
854 u8 phy_err_code;
855 __le16 flags; /* %RX_PPDU_END_FLAGS_ */
856 __le32 info0; /* %RX_PPDU_END_INFO0_ */
857 __le16 bb_length;
858 __le16 info1; /* %RX_PPDU_END_INFO1_ */
859} __packed;
860
861/*
862 * evm_p0
863 * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
864 *
865 * evm_p1
866 * EVM for pilot 1. Contain EVM for streams: 0, 1, 2 and 3.
867 *
868 * evm_p2
869 * EVM for pilot 2. Contain EVM for streams: 0, 1, 2 and 3.
870 *
871 * evm_p3
872 * EVM for pilot 3. Contain EVM for streams: 0, 1, 2 and 3.
873 *
874 * evm_p4
875 * EVM for pilot 4. Contain EVM for streams: 0, 1, 2 and 3.
876 *
877 * evm_p5
878 * EVM for pilot 5. Contain EVM for streams: 0, 1, 2 and 3.
879 *
880 * evm_p6
881 * EVM for pilot 6. Contain EVM for streams: 0, 1, 2 and 3.
882 *
883 * evm_p7
884 * EVM for pilot 7. Contain EVM for streams: 0, 1, 2 and 3.
885 *
886 * evm_p8
887 * EVM for pilot 8. Contain EVM for streams: 0, 1, 2 and 3.
888 *
889 * evm_p9
890 * EVM for pilot 9. Contain EVM for streams: 0, 1, 2 and 3.
891 *
892 * evm_p10
893 * EVM for pilot 10. Contain EVM for streams: 0, 1, 2 and 3.
894 *
895 * evm_p11
896 * EVM for pilot 11. Contain EVM for streams: 0, 1, 2 and 3.
897 *
898 * evm_p12
899 * EVM for pilot 12. Contain EVM for streams: 0, 1, 2 and 3.
900 *
901 * evm_p13
902 * EVM for pilot 13. Contain EVM for streams: 0, 1, 2 and 3.
903 *
904 * evm_p14
905 * EVM for pilot 14. Contain EVM for streams: 0, 1, 2 and 3.
906 *
907 * evm_p15
908 * EVM for pilot 15. Contain EVM for streams: 0, 1, 2 and 3.
909 *
910 * tsf_timestamp
911 * Receive TSF timestamp sampled on the rising edge of
912 * rx_clear. For PHY errors this may be the current TSF when
913 * phy_error is asserted if the rx_clear does not assert before
914 * the end of the PHY error.
915 *
916 * wb_timestamp
917 * WLAN/BT timestamp is a 1 usec resolution timestamp which
918 * does not get updated based on receive beacon like TSF. The
919 * same rules for capturing tsf_timestamp are used to capture
920 * the wb_timestamp.
921 *
922 * locationing_timestamp
923 * Timestamp used for locationing. This timestamp is used to
924 * indicate fractions of usec. For example if the MAC clock is
925 * running at 80 MHz, the timestamp will increment every 12.5
926 * nsec. The value starts at 0 and increments to 79 and
927 * returns to 0 and repeats. This information is valid for
928 * every PPDU. This information can be used in conjunction
929 * with wb_timestamp to capture large delta times.
930 *
931 * phy_err_code
932 * See the 1.10.8.1.2 for the list of the PHY error codes.
933 *
934 * phy_err
935 * Indicates a PHY error was detected for this PPDU.
936 *
937 * rx_location
938 * Indicates that location information was requested.
939 *
940 * txbf_h_info
941 * Indicates that the packet data carries H information which
942 * is used for TxBF debug.
943 *
944 * reserved_18
945 * Reserved: HW should fill with 0, FW should ignore.
946 *
947 * rx_antenna
948 * Receive antenna value
949 *
950 * tx_ht_vht_ack
951 * Indicates that a HT or VHT Ack/BA frame was transmitted in
952 * response to this receive packet.
953 *
954 * bb_captured_channel
955 * Indicates that the BB has captured a channel dump. FW can
956 * then read the channel dump memory. This may indicate that
957 * the channel was captured either based on PCU setting the
958 * capture_channel bit BB descriptor or FW setting the
959 * capture_channel mode bit.
960 *
961 * reserved_19
962 * Reserved: HW should fill with 0, FW should ignore.
963 *
964 * bb_length
965 * Indicates the number of bytes of baseband information for
966 * PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
967 * which indicates that this is not a normal PPDU but rather
968 * contains baseband debug information.
969 *
970 * reserved_20
971 * Reserved: HW should fill with 0, FW should ignore.
972 *
973 * ppdu_done
974 * PPDU end status is only valid when ppdu_done bit is set.
975 * Every time HW sets this bit in memory FW/SW must clear this
976 * bit in memory. FW will initialize all the ppdu_done dword
977 * to 0.
978*/
979
980#define FW_RX_DESC_INFO0_DISCARD (1 << 0)
981#define FW_RX_DESC_INFO0_FORWARD (1 << 1)
982#define FW_RX_DESC_INFO0_INSPECT (1 << 5)
983#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
984#define FW_RX_DESC_INFO0_EXT_LSB 6
985
986struct fw_rx_desc_base {
987 u8 info0;
988} __packed;
989
990#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
new file mode 100644
index 000000000000..be7ba1e78afe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -0,0 +1,449 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef __TARGADDRS_H__
19#define __TARGADDRS_H__
20
21/*
22 * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
23 * host_interest structure. It must match the address of the _host_interest
24 * symbol (see linker script).
25 *
26 * Host Interest is shared between Host and Target in order to coordinate
27 * between the two, and is intended to remain constant (with additions only
28 * at the end) across software releases.
29 *
30 * All addresses are available here so that it's possible to
31 * write a single binary that works with all Target Types.
32 * May be used in assembler code as well as C.
33 */
34#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800
35#define HOST_INTEREST_MAX_SIZE 0x200
36
37/*
38 * These are items that the Host may need to access via BMI or via the
39 * Diagnostic Window. The position of items in this structure must remain
40 * constant across firmware revisions! Types for each item must be fixed
41 * size across target and host platforms. More items may be added at the end.
42 */
43struct host_interest {
44 /*
45 * Pointer to application-defined area, if any.
46 * Set by Target application during startup.
47 */
48 u32 hi_app_host_interest; /* 0x00 */
49
50 /* Pointer to register dump area, valid after Target crash. */
51 u32 hi_failure_state; /* 0x04 */
52
53 /* Pointer to debug logging header */
54 u32 hi_dbglog_hdr; /* 0x08 */
55
56 u32 hi_unused0c; /* 0x0c */
57
58 /*
59 * General-purpose flag bits, similar to SOC_OPTION_* flags.
60 * Can be used by application rather than by OS.
61 */
62 u32 hi_option_flag; /* 0x10 */
63
64 /*
65 * Boolean that determines whether or not to
66 * display messages on the serial port.
67 */
68 u32 hi_serial_enable; /* 0x14 */
69
70 /* Start address of DataSet index, if any */
71 u32 hi_dset_list_head; /* 0x18 */
72
73 /* Override Target application start address */
74 u32 hi_app_start; /* 0x1c */
75
76 /* Clock and voltage tuning */
77 u32 hi_skip_clock_init; /* 0x20 */
78 u32 hi_core_clock_setting; /* 0x24 */
79 u32 hi_cpu_clock_setting; /* 0x28 */
80 u32 hi_system_sleep_setting; /* 0x2c */
81 u32 hi_xtal_control_setting; /* 0x30 */
82 u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
83 u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
84 u32 hi_ref_voltage_trim_setting; /* 0x3c */
85 u32 hi_clock_info; /* 0x40 */
86
87 /* Host uses BE CPU or not */
88 u32 hi_be; /* 0x44 */
89
90 u32 hi_stack; /* normal stack */ /* 0x48 */
91 u32 hi_err_stack; /* error stack */ /* 0x4c */
92 u32 hi_desired_cpu_speed_hz; /* 0x50 */
93
94 /* Pointer to Board Data */
95 u32 hi_board_data; /* 0x54 */
96
97 /*
98 * Indication of Board Data state:
99 * 0: board data is not yet initialized.
100 * 1: board data is initialized; unknown size
101 * >1: number of bytes of initialized board data
102 */
103 u32 hi_board_data_initialized; /* 0x58 */
104
105 u32 hi_dset_ram_index_table; /* 0x5c */
106
107 u32 hi_desired_baud_rate; /* 0x60 */
108 u32 hi_dbglog_config; /* 0x64 */
109 u32 hi_end_ram_reserve_sz; /* 0x68 */
110 u32 hi_mbox_io_block_sz; /* 0x6c */
111
112 u32 hi_num_bpatch_streams; /* 0x70 -- unused */
113 u32 hi_mbox_isr_yield_limit; /* 0x74 */
114
115 u32 hi_refclk_hz; /* 0x78 */
116 u32 hi_ext_clk_detected; /* 0x7c */
117 u32 hi_dbg_uart_txpin; /* 0x80 */
118 u32 hi_dbg_uart_rxpin; /* 0x84 */
119 u32 hi_hci_uart_baud; /* 0x88 */
120 u32 hi_hci_uart_pin_assignments; /* 0x8C */
121
122 u32 hi_hci_uart_baud_scale_val; /* 0x90 */
123 u32 hi_hci_uart_baud_step_val; /* 0x94 */
124
125 u32 hi_allocram_start; /* 0x98 */
126 u32 hi_allocram_sz; /* 0x9c */
127 u32 hi_hci_bridge_flags; /* 0xa0 */
128 u32 hi_hci_uart_support_pins; /* 0xa4 */
129
130 u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
131
132 /*
133 * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
134 * [31:16]: wakeup timeout in ms
135 */
136 /* Pointer to extended board Data */
137 u32 hi_board_ext_data; /* 0xac */
138 u32 hi_board_ext_data_config; /* 0xb0 */
139 /*
140 * Bit [0] : valid
141 * Bit[31:16: size
142 */
143 /*
144 * hi_reset_flag is used to do some stuff when target reset.
145 * such as restore app_start after warm reset or
146 * preserve host Interest area, or preserve ROM data, literals etc.
147 */
148 u32 hi_reset_flag; /* 0xb4 */
149 /* indicate hi_reset_flag is valid */
150 u32 hi_reset_flag_valid; /* 0xb8 */
151 u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
152 /* 0xbc - [31:0]: idle timeout in ms */
153 /* ACS flags */
154 u32 hi_acs_flags; /* 0xc0 */
155 u32 hi_console_flags; /* 0xc4 */
156 u32 hi_nvram_state; /* 0xc8 */
157 u32 hi_option_flag2; /* 0xcc */
158
159 /* If non-zero, override values sent to Host in WMI_READY event. */
160 u32 hi_sw_version_override; /* 0xd0 */
161 u32 hi_abi_version_override; /* 0xd4 */
162
163 /*
164 * Percentage of high priority RX traffic to total expected RX traffic
165 * applicable only to ar6004
166 */
167 u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
168
169 /* test applications flags */
170 u32 hi_test_apps_related; /* 0xdc */
171 /* location of test script */
172 u32 hi_ota_testscript; /* 0xe0 */
173 /* location of CAL data */
174 u32 hi_cal_data; /* 0xe4 */
175
176 /* Number of packet log buffers */
177 u32 hi_pktlog_num_buffers; /* 0xe8 */
178
179 /* wow extension configuration */
180 u32 hi_wow_ext_config; /* 0xec */
181 u32 hi_pwr_save_flags; /* 0xf0 */
182
183 /* Spatial Multiplexing Power Save (SMPS) options */
184 u32 hi_smps_options; /* 0xf4 */
185
186 /* Interconnect-specific state */
187 u32 hi_interconnect_state; /* 0xf8 */
188
189 /* Coex configuration flags */
190 u32 hi_coex_config; /* 0xfc */
191
192 /* Early allocation support */
193 u32 hi_early_alloc; /* 0x100 */
194 /* FW swap field */
195 /*
196 * Bits of this 32bit word will be used to pass specific swap
197 * instruction to FW
198 */
199 /*
200 * Bit 0 -- AP Nart descriptor no swap. When this bit is set
201 * FW will not swap TX descriptor. Meaning packets are formed
202 * on the target processor.
203 */
204 /* Bit 1 - unused */
205 u32 hi_fw_swap; /* 0x104 */
206} __packed;
207
208#define HI_ITEM(item) offsetof(struct host_interest, item)
209
210/* Bits defined in hi_option_flag */
211
212/* Enable timer workaround */
213#define HI_OPTION_TIMER_WAR 0x01
214/* Limit BMI command credits */
215#define HI_OPTION_BMI_CRED_LIMIT 0x02
216/* Relay Dot11 hdr to/from host */
217#define HI_OPTION_RELAY_DOT11_HDR 0x04
218/* MAC addr method 0-locally administred 1-globally unique addrs */
219#define HI_OPTION_MAC_ADDR_METHOD 0x08
220/* Firmware Bridging */
221#define HI_OPTION_FW_BRIDGE 0x10
222/* Enable CPU profiling */
223#define HI_OPTION_ENABLE_PROFILE 0x20
224/* Disable debug logging */
225#define HI_OPTION_DISABLE_DBGLOG 0x40
226/* Skip Era Tracking */
227#define HI_OPTION_SKIP_ERA_TRACKING 0x80
228/* Disable PAPRD (debug) */
229#define HI_OPTION_PAPRD_DISABLE 0x100
230#define HI_OPTION_NUM_DEV_LSB 0x200
231#define HI_OPTION_NUM_DEV_MSB 0x800
232#define HI_OPTION_DEV_MODE_LSB 0x1000
233#define HI_OPTION_DEV_MODE_MSB 0x8000000
234/* Disable LowFreq Timer Stabilization */
235#define HI_OPTION_NO_LFT_STBL 0x10000000
236/* Skip regulatory scan */
237#define HI_OPTION_SKIP_REG_SCAN 0x20000000
238/*
239 * Do regulatory scan during init before
240 * sending WMI ready event to host
241 */
242#define HI_OPTION_INIT_REG_SCAN 0x40000000
243
244/* REV6: Do not adjust memory map */
245#define HI_OPTION_SKIP_MEMMAP 0x80000000
246
247#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
248
249/* 2 bits of hi_option_flag are used to represent 3 modes */
250#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
251#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
252#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
253#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
254
255/* 2 bits of hi_option flag are usedto represent 4 submodes */
256#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
257#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
258#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
259#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
260
261/* Num dev Mask */
262#define HI_OPTION_NUM_DEV_MASK 0x7
263#define HI_OPTION_NUM_DEV_SHIFT 0x9
264
265/* firmware bridging */
266#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
267
268/*
269Fw Mode/SubMode Mask
270|-----------------------------------------------------------------------------|
271| SUB | SUB | SUB | SUB | | | | |
272|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
273| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |
274|-----------------------------------------------------------------------------|
275*/
276#define HI_OPTION_FW_MODE_BITS 0x2
277#define HI_OPTION_FW_MODE_MASK 0x3
278#define HI_OPTION_FW_MODE_SHIFT 0xC
279#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
280
281#define HI_OPTION_FW_SUBMODE_BITS 0x2
282#define HI_OPTION_FW_SUBMODE_MASK 0x3
283#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
284#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
285#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
286
287
288/* hi_option_flag2 options */
289#define HI_OPTION_OFFLOAD_AMSDU 0x01
290#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
291#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/
292#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
293#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */
294
295#define HI_OPTION_RF_KILL_SHIFT 0x2
296#define HI_OPTION_RF_KILL_MASK 0x1
297
298/* hi_reset_flag */
299/* preserve App Start address */
300#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
301/* preserve host interest */
302#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
303/* preserve ROM data */
304#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04
305#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
306#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
307#define HI_RESET_FLAG_WARM_RESET 0x20
308
309/* define hi_fw_swap bits */
310#define HI_DESC_IN_FW_BIT 0x01
311
312/* indicate the reset flag is valid */
313#define HI_RESET_FLAG_IS_VALID 0x12345678
314
315/* ACS is enabled */
316#define HI_ACS_FLAGS_ENABLED (1 << 0)
317/* Use physical WWAN device */
318#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
319/* Use test VAP */
320#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
321
322/*
323 * CONSOLE FLAGS
324 *
325 * Bit Range Meaning
326 * --------- --------------------------------
327 * 2..0 UART ID (0 = Default)
328 * 3 Baud Select (0 = 9600, 1 = 115200)
329 * 30..4 Reserved
330 * 31 Enable Console
331 *
332 */
333
334#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
335#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
336#define HI_CONSOLE_FLAGS_UART_SHIFT 0
337#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
338
339/* SM power save options */
340#define HI_SMPS_ALLOW_MASK (0x00000001)
341#define HI_SMPS_MODE_MASK (0x00000002)
342#define HI_SMPS_MODE_STATIC (0x00000000)
343#define HI_SMPS_MODE_DYNAMIC (0x00000002)
344#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004)
345#define HI_SMPS_DATA_THRESH_MASK (0x000007f8)
346#define HI_SMPS_DATA_THRESH_SHIFT (3)
347#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800)
348#define HI_SMPS_RSSI_THRESH_SHIFT (11)
349#define HI_SMPS_LOWPWR_CM_MASK (0x00380000)
350#define HI_SMPS_LOWPWR_CM_SHIFT (15)
351#define HI_SMPS_HIPWR_CM_MASK (0x03c00000)
352#define HI_SMPS_HIPWR_CM_SHIFT (19)
353
354/*
355 * WOW Extension configuration
356 *
357 * Bit Range Meaning
358 * --------- --------------------------------
359 * 8..0 Size of each WOW pattern (max 511)
360 * 15..9 Number of patterns per list (max 127)
361 * 17..16 Number of lists (max 4)
362 * 30..18 Reserved
363 * 31 Enabled
364 *
365 * set values (except enable) to zeros for default settings
366 */
367
368#define HI_WOW_EXT_ENABLED_MASK (1 << 31)
369#define HI_WOW_EXT_NUM_LIST_SHIFT 16
370#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
371#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9
372#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
373#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0
374#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
375
376#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
377 ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
378 HI_WOW_EXT_NUM_LIST_MASK) | \
379 (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
380 HI_WOW_EXT_NUM_PATTERNS_MASK) | \
381 (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
382 HI_WOW_EXT_PATTERN_SIZE_MASK))
383
384#define HI_WOW_EXT_GET_NUM_LISTS(config) \
385 (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
386#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
387 (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
388 HI_WOW_EXT_NUM_PATTERNS_SHIFT)
389#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
390 (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
391 HI_WOW_EXT_PATTERN_SIZE_SHIFT)
392
393/*
394 * Early allocation configuration
395 * Support RAM bank configuration before BMI done and this eases the memory
396 * allocation at very early stage
397 * Bit Range Meaning
398 * --------- ----------------------------------
399 * [0:3] number of bank assigned to be IRAM
400 * [4:15] reserved
401 * [16:31] magic number
402 *
403 * Note:
404 * 1. target firmware would check magic number and if it's a match, firmware
405 * would consider the bits[0:15] are valid and base on that to calculate
406 * the end of DRAM. Early allocation would be located at that area and
407 * may be reclaimed when necesary
408 * 2. if no magic number is found, early allocation would happen at "_end"
409 * symbol of ROM which is located before the app-data and might NOT be
410 * re-claimable. If this is adopted, link script should keep this in
411 * mind to avoid data corruption.
412 */
413#define HI_EARLY_ALLOC_MAGIC 0x6d8a
414#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000
415#define HI_EARLY_ALLOC_MAGIC_SHIFT 16
416#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
417#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0
418
419#define HI_EARLY_ALLOC_VALID() \
420 ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
421 HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
422#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
423 (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
424 >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
425
426/*power save flag bit definitions*/
427#define HI_PWR_SAVE_LPL_ENABLED 0x1
428/*b1-b3 reserved*/
429/*b4-b5 : dev0 LPL type : 0 - none
430 1- Reduce Pwr Search
431 2- Reduce Pwr Listen*/
432/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
433#define HI_PWR_SAVE_LPL_DEV0_LSB 4
434#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
435/*power save related utility macros*/
436#define HI_LPL_ENABLED() \
437 ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
438#define HI_DEV_LPL_TYPE_GET(_devix) \
439 (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
440 (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
441
442#define HOST_INTEREST_SMPS_IS_ALLOWED() \
443 ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
444
445/* Reserve 1024 bytes for extended board data */
446#define QCA988X_BOARD_DATA_SZ 7168
447#define QCA988X_BOARD_EXT_DATA_SZ 0
448
449#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
new file mode 100644
index 000000000000..4a31e2c6fbd4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -0,0 +1,20 @@
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/module.h>
18
19#define CREATE_TRACE_POINTS
20#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
new file mode 100644
index 000000000000..85e806bf7257
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -0,0 +1,170 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
19
20#include <linux/tracepoint.h>
21
22#define _TRACE_H_
23
24/* create empty functions when tracing is disabled */
25#if !defined(CONFIG_ATH10K_TRACING)
26#undef TRACE_EVENT
27#define TRACE_EVENT(name, proto, ...) \
28static inline void trace_ ## name(proto) {}
29#undef DECLARE_EVENT_CLASS
30#define DECLARE_EVENT_CLASS(...)
31#undef DEFINE_EVENT
32#define DEFINE_EVENT(evt_class, name, proto, ...) \
33static inline void trace_ ## name(proto) {}
34#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
35
36#undef TRACE_SYSTEM
37#define TRACE_SYSTEM ath10k
38
39#define ATH10K_MSG_MAX 200
40
41DECLARE_EVENT_CLASS(ath10k_log_event,
42 TP_PROTO(struct va_format *vaf),
43 TP_ARGS(vaf),
44 TP_STRUCT__entry(
45 __dynamic_array(char, msg, ATH10K_MSG_MAX)
46 ),
47 TP_fast_assign(
48 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
49 ATH10K_MSG_MAX,
50 vaf->fmt,
51 *vaf->va) >= ATH10K_MSG_MAX);
52 ),
53 TP_printk("%s", __get_str(msg))
54);
55
56DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
57 TP_PROTO(struct va_format *vaf),
58 TP_ARGS(vaf)
59);
60
61DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
62 TP_PROTO(struct va_format *vaf),
63 TP_ARGS(vaf)
64);
65
66DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
67 TP_PROTO(struct va_format *vaf),
68 TP_ARGS(vaf)
69);
70
71TRACE_EVENT(ath10k_log_dbg,
72 TP_PROTO(unsigned int level, struct va_format *vaf),
73 TP_ARGS(level, vaf),
74 TP_STRUCT__entry(
75 __field(unsigned int, level)
76 __dynamic_array(char, msg, ATH10K_MSG_MAX)
77 ),
78 TP_fast_assign(
79 __entry->level = level;
80 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
81 ATH10K_MSG_MAX,
82 vaf->fmt,
83 *vaf->va) >= ATH10K_MSG_MAX);
84 ),
85 TP_printk("%s", __get_str(msg))
86);
87
88TRACE_EVENT(ath10k_log_dbg_dump,
89 TP_PROTO(const char *msg, const char *prefix,
90 const void *buf, size_t buf_len),
91
92 TP_ARGS(msg, prefix, buf, buf_len),
93
94 TP_STRUCT__entry(
95 __string(msg, msg)
96 __string(prefix, prefix)
97 __field(size_t, buf_len)
98 __dynamic_array(u8, buf, buf_len)
99 ),
100
101 TP_fast_assign(
102 __assign_str(msg, msg);
103 __assign_str(prefix, prefix);
104 __entry->buf_len = buf_len;
105 memcpy(__get_dynamic_array(buf), buf, buf_len);
106 ),
107
108 TP_printk(
109 "%s/%s\n", __get_str(prefix), __get_str(msg)
110 )
111);
112
113TRACE_EVENT(ath10k_wmi_cmd,
114 TP_PROTO(int id, void *buf, size_t buf_len),
115
116 TP_ARGS(id, buf, buf_len),
117
118 TP_STRUCT__entry(
119 __field(unsigned int, id)
120 __field(size_t, buf_len)
121 __dynamic_array(u8, buf, buf_len)
122 ),
123
124 TP_fast_assign(
125 __entry->id = id;
126 __entry->buf_len = buf_len;
127 memcpy(__get_dynamic_array(buf), buf, buf_len);
128 ),
129
130 TP_printk(
131 "id %d len %zu",
132 __entry->id,
133 __entry->buf_len
134 )
135);
136
137TRACE_EVENT(ath10k_wmi_event,
138 TP_PROTO(int id, void *buf, size_t buf_len),
139
140 TP_ARGS(id, buf, buf_len),
141
142 TP_STRUCT__entry(
143 __field(unsigned int, id)
144 __field(size_t, buf_len)
145 __dynamic_array(u8, buf, buf_len)
146 ),
147
148 TP_fast_assign(
149 __entry->id = id;
150 __entry->buf_len = buf_len;
151 memcpy(__get_dynamic_array(buf), buf, buf_len);
152 ),
153
154 TP_printk(
155 "id %d len %zu",
156 __entry->id,
157 __entry->buf_len
158 )
159);
160
161#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
162
163/* we don't want to use include/trace/events */
164#undef TRACE_INCLUDE_PATH
165#define TRACE_INCLUDE_PATH .
166#undef TRACE_INCLUDE_FILE
167#define TRACE_INCLUDE_FILE trace
168
169/* This part must be outside protection */
170#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
new file mode 100644
index 000000000000..68b6faefd1d8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -0,0 +1,417 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "core.h"
19#include "txrx.h"
20#include "htt.h"
21#include "mac.h"
22#include "debug.h"
23
24static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
25{
26 if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
27 return;
28
29 /* If the original wait_for_completion() timed out before
30 * {data,mgmt}_tx_completed() was called then we could complete
31 * offchan_tx_completed for a different skb. Prevent this by using
32 * offchan_tx_skb. */
33 spin_lock_bh(&ar->data_lock);
34 if (ar->offchan_tx_skb != skb) {
35 ath10k_warn("completed old offchannel frame\n");
36 goto out;
37 }
38
39 complete(&ar->offchan_tx_completed);
40 ar->offchan_tx_skb = NULL; /* just for sanity */
41
42 ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
43out:
44 spin_unlock_bh(&ar->data_lock);
45}
46
47void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
48{
49 struct device *dev = htt->ar->dev;
50 struct ieee80211_tx_info *info;
51 struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
52 struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
53 int ret;
54
55 if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
56 return;
57
58 ATH10K_SKB_CB(txdesc)->htt.refcount--;
59
60 if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
61 return;
62
63 if (txfrag) {
64 ret = ath10k_skb_unmap(dev, txfrag);
65 if (ret)
66 ath10k_warn("txfrag unmap failed (%d)\n", ret);
67
68 dev_kfree_skb_any(txfrag);
69 }
70
71 ret = ath10k_skb_unmap(dev, msdu);
72 if (ret)
73 ath10k_warn("data skb unmap failed (%d)\n", ret);
74
75 ath10k_report_offchan_tx(htt->ar, msdu);
76
77 info = IEEE80211_SKB_CB(msdu);
78 memset(&info->status, 0, sizeof(info->status));
79
80 if (ATH10K_SKB_CB(txdesc)->htt.discard) {
81 ieee80211_free_txskb(htt->ar->hw, msdu);
82 goto exit;
83 }
84
85 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
86 info->flags |= IEEE80211_TX_STAT_ACK;
87
88 if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
89 info->flags &= ~IEEE80211_TX_STAT_ACK;
90
91 ieee80211_tx_status(htt->ar->hw, msdu);
92 /* we do not own the msdu anymore */
93
94exit:
95 spin_lock_bh(&htt->tx_lock);
96 htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
97 ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
98 __ath10k_htt_tx_dec_pending(htt);
99 if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
100 wake_up(&htt->empty_tx_wq);
101 spin_unlock_bh(&htt->tx_lock);
102
103 dev_kfree_skb_any(txdesc);
104}
105
106void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
107 const struct htt_tx_done *tx_done)
108{
109 struct sk_buff *txdesc;
110
111 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
112 tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
113
114 if (tx_done->msdu_id >= htt->max_num_pending_tx) {
115 ath10k_warn("warning: msdu_id %d too big, ignoring\n",
116 tx_done->msdu_id);
117 return;
118 }
119
120 txdesc = htt->pending_tx[tx_done->msdu_id];
121
122 ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
123 ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
124
125 ath10k_txrx_tx_unref(htt, txdesc);
126}
127
128static const u8 rx_legacy_rate_idx[] = {
129 3, /* 0x00 - 11Mbps */
130 2, /* 0x01 - 5.5Mbps */
131 1, /* 0x02 - 2Mbps */
132 0, /* 0x03 - 1Mbps */
133 3, /* 0x04 - 11Mbps */
134 2, /* 0x05 - 5.5Mbps */
135 1, /* 0x06 - 2Mbps */
136 0, /* 0x07 - 1Mbps */
137 10, /* 0x08 - 48Mbps */
138 8, /* 0x09 - 24Mbps */
139 6, /* 0x0A - 12Mbps */
140 4, /* 0x0B - 6Mbps */
141 11, /* 0x0C - 54Mbps */
142 9, /* 0x0D - 36Mbps */
143 7, /* 0x0E - 18Mbps */
144 5, /* 0x0F - 9Mbps */
145};
146
147static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
148 enum ieee80211_band band,
149 struct ieee80211_rx_status *status)
150{
151 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
152 u8 info0 = info->rate.info0;
153 u32 info1 = info->rate.info1;
154 u32 info2 = info->rate.info2;
155 u8 preamble = 0;
156
157 /* Check if valid fields */
158 if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
159 return;
160
161 preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
162
163 switch (preamble) {
164 case HTT_RX_LEGACY:
165 cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
166 rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
167 rate_idx = 0;
168
169 if (rate < 0x08 || rate > 0x0F)
170 break;
171
172 switch (band) {
173 case IEEE80211_BAND_2GHZ:
174 if (cck)
175 rate &= ~BIT(3);
176 rate_idx = rx_legacy_rate_idx[rate];
177 break;
178 case IEEE80211_BAND_5GHZ:
179 rate_idx = rx_legacy_rate_idx[rate];
180 /* We are using same rate table registering
181 HW - ath10k_rates[]. In case of 5GHz skip
182 CCK rates, so -4 here */
183 rate_idx -= 4;
184 break;
185 default:
186 break;
187 }
188
189 status->rate_idx = rate_idx;
190 break;
191 case HTT_RX_HT:
192 case HTT_RX_HT_WITH_TXBF:
193 /* HT-SIG - Table 20-11 in info1 and info2 */
194 mcs = info1 & 0x1F;
195 nss = mcs >> 3;
196 bw = (info1 >> 7) & 1;
197 sgi = (info2 >> 7) & 1;
198
199 status->rate_idx = mcs;
200 status->flag |= RX_FLAG_HT;
201 if (sgi)
202 status->flag |= RX_FLAG_SHORT_GI;
203 if (bw)
204 status->flag |= RX_FLAG_40MHZ;
205 break;
206 case HTT_RX_VHT:
207 case HTT_RX_VHT_WITH_TXBF:
208 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
209 TODO check this */
210 mcs = (info2 >> 4) & 0x0F;
211 nss = (info1 >> 10) & 0x07;
212 bw = info1 & 3;
213 sgi = info2 & 1;
214
215 status->rate_idx = mcs;
216 status->vht_nss = nss;
217
218 if (sgi)
219 status->flag |= RX_FLAG_SHORT_GI;
220
221 switch (bw) {
222 /* 20MHZ */
223 case 0:
224 break;
225 /* 40MHZ */
226 case 1:
227 status->flag |= RX_FLAG_40MHZ;
228 break;
229 /* 80MHZ */
230 case 2:
231 status->flag |= RX_FLAG_80MHZ;
232 }
233
234 status->flag |= RX_FLAG_VHT;
235 break;
236 default:
237 break;
238 }
239}
240
241void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
242{
243 struct ieee80211_rx_status *status;
244 struct ieee80211_channel *ch;
245 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
246
247 status = IEEE80211_SKB_RXCB(info->skb);
248 memset(status, 0, sizeof(*status));
249
250 if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
251 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
252 RX_FLAG_MMIC_STRIPPED;
253 hdr->frame_control = __cpu_to_le16(
254 __le16_to_cpu(hdr->frame_control) &
255 ~IEEE80211_FCTL_PROTECTED);
256 }
257
258 if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
259 status->flag |= RX_FLAG_MMIC_ERROR;
260
261 if (info->fcs_err)
262 status->flag |= RX_FLAG_FAILED_FCS_CRC;
263
264 status->signal = info->signal;
265
266 spin_lock_bh(&ar->data_lock);
267 ch = ar->scan_channel;
268 if (!ch)
269 ch = ar->rx_channel;
270 spin_unlock_bh(&ar->data_lock);
271
272 if (!ch) {
273 ath10k_warn("no channel configured; ignoring frame!\n");
274 dev_kfree_skb_any(info->skb);
275 return;
276 }
277
278 process_rx_rates(ar, info, ch->band, status);
279 status->band = ch->band;
280 status->freq = ch->center_freq;
281
282 ath10k_dbg(ATH10K_DBG_DATA,
283 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
284 info->skb,
285 info->skb->len,
286 status->flag == 0 ? "legacy" : "",
287 status->flag & RX_FLAG_HT ? "ht" : "",
288 status->flag & RX_FLAG_VHT ? "vht" : "",
289 status->flag & RX_FLAG_40MHZ ? "40" : "",
290 status->flag & RX_FLAG_80MHZ ? "80" : "",
291 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
292 status->rate_idx,
293 status->vht_nss,
294 status->freq,
295 status->band);
296
297 ieee80211_rx(ar->hw, info->skb);
298}
299
300struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
301 const u8 *addr)
302{
303 struct ath10k_peer *peer;
304
305 lockdep_assert_held(&ar->data_lock);
306
307 list_for_each_entry(peer, &ar->peers, list) {
308 if (peer->vdev_id != vdev_id)
309 continue;
310 if (memcmp(peer->addr, addr, ETH_ALEN))
311 continue;
312
313 return peer;
314 }
315
316 return NULL;
317}
318
319static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
320 int peer_id)
321{
322 struct ath10k_peer *peer;
323
324 lockdep_assert_held(&ar->data_lock);
325
326 list_for_each_entry(peer, &ar->peers, list)
327 if (test_bit(peer_id, peer->peer_ids))
328 return peer;
329
330 return NULL;
331}
332
333static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
334 const u8 *addr, bool expect_mapped)
335{
336 int ret;
337
338 ret = wait_event_timeout(ar->peer_mapping_wq, ({
339 bool mapped;
340
341 spin_lock_bh(&ar->data_lock);
342 mapped = !!ath10k_peer_find(ar, vdev_id, addr);
343 spin_unlock_bh(&ar->data_lock);
344
345 mapped == expect_mapped;
346 }), 3*HZ);
347
348 if (ret <= 0)
349 return -ETIMEDOUT;
350
351 return 0;
352}
353
354int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
355{
356 return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
357}
358
359int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
360{
361 return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
362}
363
364void ath10k_peer_map_event(struct ath10k_htt *htt,
365 struct htt_peer_map_event *ev)
366{
367 struct ath10k *ar = htt->ar;
368 struct ath10k_peer *peer;
369
370 spin_lock_bh(&ar->data_lock);
371 peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
372 if (!peer) {
373 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
374 if (!peer)
375 goto exit;
376
377 peer->vdev_id = ev->vdev_id;
378 memcpy(peer->addr, ev->addr, ETH_ALEN);
379 list_add(&peer->list, &ar->peers);
380 wake_up(&ar->peer_mapping_wq);
381 }
382
383 ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
384 ev->vdev_id, ev->addr, ev->peer_id);
385
386 set_bit(ev->peer_id, peer->peer_ids);
387exit:
388 spin_unlock_bh(&ar->data_lock);
389}
390
391void ath10k_peer_unmap_event(struct ath10k_htt *htt,
392 struct htt_peer_unmap_event *ev)
393{
394 struct ath10k *ar = htt->ar;
395 struct ath10k_peer *peer;
396
397 spin_lock_bh(&ar->data_lock);
398 peer = ath10k_peer_find_by_id(ar, ev->peer_id);
399 if (!peer) {
400 ath10k_warn("unknown peer id %d\n", ev->peer_id);
401 goto exit;
402 }
403
404 ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
405 peer->vdev_id, peer->addr, ev->peer_id);
406
407 clear_bit(ev->peer_id, peer->peer_ids);
408
409 if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
410 list_del(&peer->list);
411 kfree(peer);
412 wake_up(&ar->peer_mapping_wq);
413 }
414
415exit:
416 spin_unlock_bh(&ar->data_lock);
417}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
new file mode 100644
index 000000000000..e78632a76df7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17#ifndef _TXRX_H_
18#define _TXRX_H_
19
20#include "htt.h"
21
22void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
23void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
24 const struct htt_tx_done *tx_done);
25void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
26
27struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
28 const u8 *addr);
29int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
30 const u8 *addr);
31int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
32 const u8 *addr);
33
34void ath10k_peer_map_event(struct ath10k_htt *htt,
35 struct htt_peer_map_event *ev);
36void ath10k_peer_unmap_event(struct ath10k_htt *htt,
37 struct htt_peer_unmap_event *ev);
38
39#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
new file mode 100644
index 000000000000..7d4b7987422d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -0,0 +1,2081 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/skbuff.h>
19
20#include "core.h"
21#include "htc.h"
22#include "debug.h"
23#include "wmi.h"
24#include "mac.h"
25
26void ath10k_wmi_flush_tx(struct ath10k *ar)
27{
28 int ret;
29
30 ret = wait_event_timeout(ar->wmi.wq,
31 atomic_read(&ar->wmi.pending_tx_count) == 0,
32 5*HZ);
33 if (atomic_read(&ar->wmi.pending_tx_count) == 0)
34 return;
35
36 if (ret == 0)
37 ret = -ETIMEDOUT;
38
39 if (ret < 0)
40 ath10k_warn("wmi flush failed (%d)\n", ret);
41}
42
43int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
44{
45 int ret;
46 ret = wait_for_completion_timeout(&ar->wmi.service_ready,
47 WMI_SERVICE_READY_TIMEOUT_HZ);
48 return ret;
49}
50
51int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
52{
53 int ret;
54 ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
55 WMI_UNIFIED_READY_TIMEOUT_HZ);
56 return ret;
57}
58
59static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
60{
61 struct sk_buff *skb;
62 u32 round_len = roundup(len, 4);
63
64 skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
65 if (!skb)
66 return NULL;
67
68 skb_reserve(skb, WMI_SKB_HEADROOM);
69 if (!IS_ALIGNED((unsigned long)skb->data, 4))
70 ath10k_warn("Unaligned WMI skb\n");
71
72 skb_put(skb, round_len);
73 memset(skb->data, 0, round_len);
74
75 return skb;
76}
77
78static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
79{
80 dev_kfree_skb(skb);
81
82 if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
83 wake_up(&ar->wmi.wq);
84}
85
86/* WMI command API */
87static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
88 enum wmi_cmd_id cmd_id)
89{
90 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
91 struct wmi_cmd_hdr *cmd_hdr;
92 int status;
93 u32 cmd = 0;
94
95 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
96 return -ENOMEM;
97
98 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
99
100 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
101 cmd_hdr->cmd_id = __cpu_to_le32(cmd);
102
103 if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
104 WMI_MAX_PENDING_TX_COUNT) {
105 /* avoid using up memory when FW hangs */
106 atomic_dec(&ar->wmi.pending_tx_count);
107 return -EBUSY;
108 }
109
110 memset(skb_cb, 0, sizeof(*skb_cb));
111
112 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
113
114 status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
115 if (status) {
116 dev_kfree_skb_any(skb);
117 atomic_dec(&ar->wmi.pending_tx_count);
118 return status;
119 }
120
121 return 0;
122}
123
124static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
125{
126 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
127 enum wmi_scan_event_type event_type;
128 enum wmi_scan_completion_reason reason;
129 u32 freq;
130 u32 req_id;
131 u32 scan_id;
132 u32 vdev_id;
133
134 event_type = __le32_to_cpu(event->event_type);
135 reason = __le32_to_cpu(event->reason);
136 freq = __le32_to_cpu(event->channel_freq);
137 req_id = __le32_to_cpu(event->scan_req_id);
138 scan_id = __le32_to_cpu(event->scan_id);
139 vdev_id = __le32_to_cpu(event->vdev_id);
140
141 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
142 ath10k_dbg(ATH10K_DBG_WMI,
143 "scan event type %d reason %d freq %d req_id %d "
144 "scan_id %d vdev_id %d\n",
145 event_type, reason, freq, req_id, scan_id, vdev_id);
146
147 spin_lock_bh(&ar->data_lock);
148
149 switch (event_type) {
150 case WMI_SCAN_EVENT_STARTED:
151 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
152 if (ar->scan.in_progress && ar->scan.is_roc)
153 ieee80211_ready_on_channel(ar->hw);
154
155 complete(&ar->scan.started);
156 break;
157 case WMI_SCAN_EVENT_COMPLETED:
158 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
159 switch (reason) {
160 case WMI_SCAN_REASON_COMPLETED:
161 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
162 break;
163 case WMI_SCAN_REASON_CANCELLED:
164 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
165 break;
166 case WMI_SCAN_REASON_PREEMPTED:
167 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
168 break;
169 case WMI_SCAN_REASON_TIMEDOUT:
170 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
171 break;
172 default:
173 break;
174 }
175
176 ar->scan_channel = NULL;
177 if (!ar->scan.in_progress) {
178 ath10k_warn("no scan requested, ignoring\n");
179 break;
180 }
181
182 if (ar->scan.is_roc) {
183 ath10k_offchan_tx_purge(ar);
184
185 if (!ar->scan.aborting)
186 ieee80211_remain_on_channel_expired(ar->hw);
187 } else {
188 ieee80211_scan_completed(ar->hw, ar->scan.aborting);
189 }
190
191 del_timer(&ar->scan.timeout);
192 complete_all(&ar->scan.completed);
193 ar->scan.in_progress = false;
194 break;
195 case WMI_SCAN_EVENT_BSS_CHANNEL:
196 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
197 ar->scan_channel = NULL;
198 break;
199 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
200 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
201 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
202 if (ar->scan.in_progress && ar->scan.is_roc &&
203 ar->scan.roc_freq == freq) {
204 complete(&ar->scan.on_channel);
205 }
206 break;
207 case WMI_SCAN_EVENT_DEQUEUED:
208 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
209 break;
210 case WMI_SCAN_EVENT_PREEMPTED:
211 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
212 break;
213 case WMI_SCAN_EVENT_START_FAILED:
214 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
215 break;
216 default:
217 break;
218 }
219
220 spin_unlock_bh(&ar->data_lock);
221 return 0;
222}
223
224static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
225{
226 enum ieee80211_band band;
227
228 switch (phy_mode) {
229 case MODE_11A:
230 case MODE_11NA_HT20:
231 case MODE_11NA_HT40:
232 case MODE_11AC_VHT20:
233 case MODE_11AC_VHT40:
234 case MODE_11AC_VHT80:
235 band = IEEE80211_BAND_5GHZ;
236 break;
237 case MODE_11G:
238 case MODE_11B:
239 case MODE_11GONLY:
240 case MODE_11NG_HT20:
241 case MODE_11NG_HT40:
242 case MODE_11AC_VHT20_2G:
243 case MODE_11AC_VHT40_2G:
244 case MODE_11AC_VHT80_2G:
245 default:
246 band = IEEE80211_BAND_2GHZ;
247 }
248
249 return band;
250}
251
252static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
253{
254 u8 rate_idx = 0;
255
256 /* rate in Kbps */
257 switch (rate) {
258 case 1000:
259 rate_idx = 0;
260 break;
261 case 2000:
262 rate_idx = 1;
263 break;
264 case 5500:
265 rate_idx = 2;
266 break;
267 case 11000:
268 rate_idx = 3;
269 break;
270 case 6000:
271 rate_idx = 4;
272 break;
273 case 9000:
274 rate_idx = 5;
275 break;
276 case 12000:
277 rate_idx = 6;
278 break;
279 case 18000:
280 rate_idx = 7;
281 break;
282 case 24000:
283 rate_idx = 8;
284 break;
285 case 36000:
286 rate_idx = 9;
287 break;
288 case 48000:
289 rate_idx = 10;
290 break;
291 case 54000:
292 rate_idx = 11;
293 break;
294 default:
295 break;
296 }
297
298 if (band == IEEE80211_BAND_5GHZ) {
299 if (rate_idx > 3)
300 /* Omit CCK rates */
301 rate_idx -= 4;
302 else
303 rate_idx = 0;
304 }
305
306 return rate_idx;
307}
308
309static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
310{
311 struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
312 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
313 struct ieee80211_hdr *hdr;
314 u32 rx_status;
315 u32 channel;
316 u32 phy_mode;
317 u32 snr;
318 u32 rate;
319 u32 buf_len;
320 u16 fc;
321
322 channel = __le32_to_cpu(event->hdr.channel);
323 buf_len = __le32_to_cpu(event->hdr.buf_len);
324 rx_status = __le32_to_cpu(event->hdr.status);
325 snr = __le32_to_cpu(event->hdr.snr);
326 phy_mode = __le32_to_cpu(event->hdr.phy_mode);
327 rate = __le32_to_cpu(event->hdr.rate);
328
329 memset(status, 0, sizeof(*status));
330
331 ath10k_dbg(ATH10K_DBG_MGMT,
332 "event mgmt rx status %08x\n", rx_status);
333
334 if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
335 dev_kfree_skb(skb);
336 return 0;
337 }
338
339 if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
340 dev_kfree_skb(skb);
341 return 0;
342 }
343
344 if (rx_status & WMI_RX_STATUS_ERR_CRC)
345 status->flag |= RX_FLAG_FAILED_FCS_CRC;
346 if (rx_status & WMI_RX_STATUS_ERR_MIC)
347 status->flag |= RX_FLAG_MMIC_ERROR;
348
349 status->band = phy_mode_to_band(phy_mode);
350 status->freq = ieee80211_channel_to_frequency(channel, status->band);
351 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
352 status->rate_idx = get_rate_idx(rate, status->band);
353
354 skb_pull(skb, sizeof(event->hdr));
355
356 hdr = (struct ieee80211_hdr *)skb->data;
357 fc = le16_to_cpu(hdr->frame_control);
358
359 if (fc & IEEE80211_FCTL_PROTECTED) {
360 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
361 RX_FLAG_MMIC_STRIPPED;
362 hdr->frame_control = __cpu_to_le16(fc &
363 ~IEEE80211_FCTL_PROTECTED);
364 }
365
366 ath10k_dbg(ATH10K_DBG_MGMT,
367 "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
368 skb, skb->len,
369 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
370
371 ath10k_dbg(ATH10K_DBG_MGMT,
372 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
373 status->freq, status->band, status->signal,
374 status->rate_idx);
375
376 /*
377 * packets from HTC come aligned to 4byte boundaries
378 * because they can originally come in along with a trailer
379 */
380 skb_trim(skb, buf_len);
381
382 ieee80211_rx(ar->hw, skb);
383 return 0;
384}
385
386static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
387{
388 ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
389}
390
391static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
392{
393 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
394}
395
396static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
397{
398 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
399}
400
401static void ath10k_wmi_event_update_stats(struct ath10k *ar,
402 struct sk_buff *skb)
403{
404 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
405
406 ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
407
408 ath10k_debug_read_target_stats(ar, ev);
409}
410
411static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
412 struct sk_buff *skb)
413{
414 struct wmi_vdev_start_response_event *ev;
415
416 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
417
418 ev = (struct wmi_vdev_start_response_event *)skb->data;
419
420 if (WARN_ON(__le32_to_cpu(ev->status)))
421 return;
422
423 complete(&ar->vdev_setup_done);
424}
425
426static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
427 struct sk_buff *skb)
428{
429 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
430 complete(&ar->vdev_setup_done);
431}
432
433static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
434 struct sk_buff *skb)
435{
436 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
437}
438
439/*
440 * FIXME
441 *
442 * We don't report to mac80211 sleep state of connected
443 * stations. Due to this mac80211 can't fill in TIM IE
444 * correctly.
445 *
446 * I know of no way of getting nullfunc frames that contain
447 * sleep transition from connected stations - these do not
448 * seem to be sent from the target to the host. There also
449 * doesn't seem to be a dedicated event for that. So the
450 * only way left to do this would be to read tim_bitmap
451 * during SWBA.
452 *
453 * We could probably try using tim_bitmap from SWBA to tell
454 * mac80211 which stations are asleep and which are not. The
455 * problem here is calling mac80211 functions so many times
456 * could take too long and make us miss the time to submit
457 * the beacon to the target.
458 *
459 * So as a workaround we try to extend the TIM IE if there
460 * is unicast buffered for stations with aid > 7 and fill it
461 * in ourselves.
462 */
463static void ath10k_wmi_update_tim(struct ath10k *ar,
464 struct ath10k_vif *arvif,
465 struct sk_buff *bcn,
466 struct wmi_bcn_info *bcn_info)
467{
468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
469 struct ieee80211_tim_ie *tim;
470 u8 *ies, *ie;
471 u8 ie_len, pvm_len;
472
473 /* if next SWBA has no tim_changed the tim_bitmap is garbage.
474 * we must copy the bitmap upon change and reuse it later */
475 if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
476 int i;
477
478 BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
479 sizeof(bcn_info->tim_info.tim_bitmap));
480
481 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
482 __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
483 u32 v = __le32_to_cpu(t);
484 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
485 }
486
487 /* FW reports either length 0 or 16
488 * so we calculate this on our own */
489 arvif->u.ap.tim_len = 0;
490 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
491 if (arvif->u.ap.tim_bitmap[i])
492 arvif->u.ap.tim_len = i;
493
494 arvif->u.ap.tim_len++;
495 }
496
497 ies = bcn->data;
498 ies += ieee80211_hdrlen(hdr->frame_control);
499 ies += 12; /* fixed parameters */
500
501 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
502 (u8 *)skb_tail_pointer(bcn) - ies);
503 if (!ie) {
504 /* highly unlikely for mac80211 */
505 ath10k_warn("no tim ie found;\n");
506 return;
507 }
508
509 tim = (void *)ie + 2;
510 ie_len = ie[1];
511 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
512
513 if (pvm_len < arvif->u.ap.tim_len) {
514 int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
515 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
516 void *next_ie = ie + 2 + ie_len;
517
518 if (skb_put(bcn, expand_size)) {
519 memmove(next_ie + expand_size, next_ie, move_size);
520
521 ie[1] += expand_size;
522 ie_len += expand_size;
523 pvm_len += expand_size;
524 } else {
525 ath10k_warn("tim expansion failed\n");
526 }
527 }
528
529 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
530 ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
531 return;
532 }
533
534 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
535 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
536
537 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
538 tim->dtim_count, tim->dtim_period,
539 tim->bitmap_ctrl, pvm_len);
540}
541
542static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
543 struct wmi_p2p_noa_info *noa)
544{
545 struct ieee80211_p2p_noa_attr *noa_attr;
546 u8 ctwindow_oppps = noa->ctwindow_oppps;
547 u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
548 bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
549 __le16 *noa_attr_len;
550 u16 attr_len;
551 u8 noa_descriptors = noa->num_descriptors;
552 int i;
553
554 /* P2P IE */
555 data[0] = WLAN_EID_VENDOR_SPECIFIC;
556 data[1] = len - 2;
557 data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
558 data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
559 data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
560 data[5] = WLAN_OUI_TYPE_WFA_P2P;
561
562 /* NOA ATTR */
563 data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
564 noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
565 noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
566
567 noa_attr->index = noa->index;
568 noa_attr->oppps_ctwindow = ctwindow;
569 if (oppps)
570 noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
571
572 for (i = 0; i < noa_descriptors; i++) {
573 noa_attr->desc[i].count =
574 __le32_to_cpu(noa->descriptors[i].type_count);
575 noa_attr->desc[i].duration = noa->descriptors[i].duration;
576 noa_attr->desc[i].interval = noa->descriptors[i].interval;
577 noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
578 }
579
580 attr_len = 2; /* index + oppps_ctwindow */
581 attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
582 *noa_attr_len = __cpu_to_le16(attr_len);
583}
584
585static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
586{
587 u32 len = 0;
588 u8 noa_descriptors = noa->num_descriptors;
589 u8 opp_ps_info = noa->ctwindow_oppps;
590 bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
591
592
593 if (!noa_descriptors && !opps_enabled)
594 return len;
595
596 len += 1 + 1 + 4; /* EID + len + OUI */
597 len += 1 + 2; /* noa attr + attr len */
598 len += 1 + 1; /* index + oppps_ctwindow */
599 len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
600
601 return len;
602}
603
604static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
605 struct sk_buff *bcn,
606 struct wmi_bcn_info *bcn_info)
607{
608 struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
609 u8 *new_data, *old_data = arvif->u.ap.noa_data;
610 u32 new_len;
611
612 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
613 return;
614
615 ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
616 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
617 new_len = ath10k_p2p_calc_noa_ie_len(noa);
618 if (!new_len)
619 goto cleanup;
620
621 new_data = kmalloc(new_len, GFP_ATOMIC);
622 if (!new_data)
623 goto cleanup;
624
625 ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
626
627 spin_lock_bh(&ar->data_lock);
628 arvif->u.ap.noa_data = new_data;
629 arvif->u.ap.noa_len = new_len;
630 spin_unlock_bh(&ar->data_lock);
631 kfree(old_data);
632 }
633
634 if (arvif->u.ap.noa_data)
635 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
636 memcpy(skb_put(bcn, arvif->u.ap.noa_len),
637 arvif->u.ap.noa_data,
638 arvif->u.ap.noa_len);
639 return;
640
641cleanup:
642 spin_lock_bh(&ar->data_lock);
643 arvif->u.ap.noa_data = NULL;
644 arvif->u.ap.noa_len = 0;
645 spin_unlock_bh(&ar->data_lock);
646 kfree(old_data);
647}
648
649
650static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
651{
652 struct wmi_host_swba_event *ev;
653 u32 map;
654 int i = -1;
655 struct wmi_bcn_info *bcn_info;
656 struct ath10k_vif *arvif;
657 struct wmi_bcn_tx_arg arg;
658 struct sk_buff *bcn;
659 int vdev_id = 0;
660 int ret;
661
662 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
663
664 ev = (struct wmi_host_swba_event *)skb->data;
665 map = __le32_to_cpu(ev->vdev_map);
666
667 ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
668 "-vdev map 0x%x\n",
669 ev->vdev_map);
670
671 for (; map; map >>= 1, vdev_id++) {
672 if (!(map & 0x1))
673 continue;
674
675 i++;
676
677 if (i >= WMI_MAX_AP_VDEV) {
678 ath10k_warn("swba has corrupted vdev map\n");
679 break;
680 }
681
682 bcn_info = &ev->bcn_info[i];
683
684 ath10k_dbg(ATH10K_DBG_MGMT,
685 "-bcn_info[%d]:\n"
686 "--tim_len %d\n"
687 "--tim_mcast %d\n"
688 "--tim_changed %d\n"
689 "--tim_num_ps_pending %d\n"
690 "--tim_bitmap 0x%08x%08x%08x%08x\n",
691 i,
692 __le32_to_cpu(bcn_info->tim_info.tim_len),
693 __le32_to_cpu(bcn_info->tim_info.tim_mcast),
694 __le32_to_cpu(bcn_info->tim_info.tim_changed),
695 __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
696 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
697 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
698 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
699 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
700
701 arvif = ath10k_get_arvif(ar, vdev_id);
702 if (arvif == NULL) {
703 ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
704 continue;
705 }
706
707 bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
708 if (!bcn) {
709 ath10k_warn("could not get mac80211 beacon\n");
710 continue;
711 }
712
713 ath10k_tx_h_seq_no(bcn);
714 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
715 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
716
717 arg.vdev_id = arvif->vdev_id;
718 arg.tx_rate = 0;
719 arg.tx_power = 0;
720 arg.bcn = bcn->data;
721 arg.bcn_len = bcn->len;
722
723 ret = ath10k_wmi_beacon_send(ar, &arg);
724 if (ret)
725 ath10k_warn("could not send beacon (%d)\n", ret);
726
727 dev_kfree_skb_any(bcn);
728 }
729}
730
731static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
732 struct sk_buff *skb)
733{
734 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
735}
736
737static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
738{
739 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
740}
741
742static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
743{
744 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
745}
746
747static void ath10k_wmi_event_profile_match(struct ath10k *ar,
748 struct sk_buff *skb)
749{
750 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
751}
752
753static void ath10k_wmi_event_debug_print(struct ath10k *ar,
754 struct sk_buff *skb)
755{
756 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
757}
758
759static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
760{
761 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
762}
763
764static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
765 struct sk_buff *skb)
766{
767 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
768}
769
770static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
771 struct sk_buff *skb)
772{
773 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
774}
775
776static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
777 struct sk_buff *skb)
778{
779 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
780}
781
782static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
783 struct sk_buff *skb)
784{
785 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
786}
787
788static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
789 struct sk_buff *skb)
790{
791 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
792}
793
794static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
795 struct sk_buff *skb)
796{
797 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
798}
799
800static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
801 struct sk_buff *skb)
802{
803 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
804}
805
806static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
807 struct sk_buff *skb)
808{
809 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
810}
811
812static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
813 struct sk_buff *skb)
814{
815 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
816}
817
818static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
819 struct sk_buff *skb)
820{
821 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
822}
823
824static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
825 struct sk_buff *skb)
826{
827 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
828}
829
830static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
831 struct sk_buff *skb)
832{
833 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
834}
835
836static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
837 struct sk_buff *skb)
838{
839 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
840}
841
842static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
843 struct sk_buff *skb)
844{
845 struct wmi_service_ready_event *ev = (void *)skb->data;
846
847 if (skb->len < sizeof(*ev)) {
848 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
849 skb->len, sizeof(*ev));
850 return;
851 }
852
853 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
854 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
855 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
856 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
857 ar->fw_version_major =
858 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
859 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
860 ar->fw_version_release =
861 (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
862 ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
863 ar->phy_capability = __le32_to_cpu(ev->phy_capability);
864
865 ar->ath_common.regulatory.current_rd =
866 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
867
868 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
869 sizeof(ev->wmi_service_bitmap));
870
871 if (strlen(ar->hw->wiphy->fw_version) == 0) {
872 snprintf(ar->hw->wiphy->fw_version,
873 sizeof(ar->hw->wiphy->fw_version),
874 "%u.%u.%u.%u",
875 ar->fw_version_major,
876 ar->fw_version_minor,
877 ar->fw_version_release,
878 ar->fw_version_build);
879 }
880
881 /* FIXME: it probably should be better to support this */
882 if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
883 ath10k_warn("target requested %d memory chunks; ignoring\n",
884 __le32_to_cpu(ev->num_mem_reqs));
885 }
886
887 ath10k_dbg(ATH10K_DBG_WMI,
888 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
889 __le32_to_cpu(ev->sw_version),
890 __le32_to_cpu(ev->sw_version_1),
891 __le32_to_cpu(ev->abi_version),
892 __le32_to_cpu(ev->phy_capability),
893 __le32_to_cpu(ev->ht_cap_info),
894 __le32_to_cpu(ev->vht_cap_info),
895 __le32_to_cpu(ev->vht_supp_mcs),
896 __le32_to_cpu(ev->sys_cap_info),
897 __le32_to_cpu(ev->num_mem_reqs));
898
899 complete(&ar->wmi.service_ready);
900}
901
902static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
903{
904 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
905
906 if (WARN_ON(skb->len < sizeof(*ev)))
907 return -EINVAL;
908
909 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
910
911 ath10k_dbg(ATH10K_DBG_WMI,
912 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
913 __le32_to_cpu(ev->sw_version),
914 __le32_to_cpu(ev->abi_version),
915 ev->mac_addr.addr,
916 __le32_to_cpu(ev->status));
917
918 complete(&ar->wmi.unified_ready);
919 return 0;
920}
921
922static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
923{
924 struct wmi_cmd_hdr *cmd_hdr;
925 enum wmi_event_id id;
926 u16 len;
927
928 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
929 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
930
931 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
932 return;
933
934 len = skb->len;
935
936 trace_ath10k_wmi_event(id, skb->data, skb->len);
937
938 switch (id) {
939 case WMI_MGMT_RX_EVENTID:
940 ath10k_wmi_event_mgmt_rx(ar, skb);
941 /* mgmt_rx() owns the skb now! */
942 return;
943 case WMI_SCAN_EVENTID:
944 ath10k_wmi_event_scan(ar, skb);
945 break;
946 case WMI_CHAN_INFO_EVENTID:
947 ath10k_wmi_event_chan_info(ar, skb);
948 break;
949 case WMI_ECHO_EVENTID:
950 ath10k_wmi_event_echo(ar, skb);
951 break;
952 case WMI_DEBUG_MESG_EVENTID:
953 ath10k_wmi_event_debug_mesg(ar, skb);
954 break;
955 case WMI_UPDATE_STATS_EVENTID:
956 ath10k_wmi_event_update_stats(ar, skb);
957 break;
958 case WMI_VDEV_START_RESP_EVENTID:
959 ath10k_wmi_event_vdev_start_resp(ar, skb);
960 break;
961 case WMI_VDEV_STOPPED_EVENTID:
962 ath10k_wmi_event_vdev_stopped(ar, skb);
963 break;
964 case WMI_PEER_STA_KICKOUT_EVENTID:
965 ath10k_wmi_event_peer_sta_kickout(ar, skb);
966 break;
967 case WMI_HOST_SWBA_EVENTID:
968 ath10k_wmi_event_host_swba(ar, skb);
969 break;
970 case WMI_TBTTOFFSET_UPDATE_EVENTID:
971 ath10k_wmi_event_tbttoffset_update(ar, skb);
972 break;
973 case WMI_PHYERR_EVENTID:
974 ath10k_wmi_event_phyerr(ar, skb);
975 break;
976 case WMI_ROAM_EVENTID:
977 ath10k_wmi_event_roam(ar, skb);
978 break;
979 case WMI_PROFILE_MATCH:
980 ath10k_wmi_event_profile_match(ar, skb);
981 break;
982 case WMI_DEBUG_PRINT_EVENTID:
983 ath10k_wmi_event_debug_print(ar, skb);
984 break;
985 case WMI_PDEV_QVIT_EVENTID:
986 ath10k_wmi_event_pdev_qvit(ar, skb);
987 break;
988 case WMI_WLAN_PROFILE_DATA_EVENTID:
989 ath10k_wmi_event_wlan_profile_data(ar, skb);
990 break;
991 case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
992 ath10k_wmi_event_rtt_measurement_report(ar, skb);
993 break;
994 case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
995 ath10k_wmi_event_tsf_measurement_report(ar, skb);
996 break;
997 case WMI_RTT_ERROR_REPORT_EVENTID:
998 ath10k_wmi_event_rtt_error_report(ar, skb);
999 break;
1000 case WMI_WOW_WAKEUP_HOST_EVENTID:
1001 ath10k_wmi_event_wow_wakeup_host(ar, skb);
1002 break;
1003 case WMI_DCS_INTERFERENCE_EVENTID:
1004 ath10k_wmi_event_dcs_interference(ar, skb);
1005 break;
1006 case WMI_PDEV_TPC_CONFIG_EVENTID:
1007 ath10k_wmi_event_pdev_tpc_config(ar, skb);
1008 break;
1009 case WMI_PDEV_FTM_INTG_EVENTID:
1010 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
1011 break;
1012 case WMI_GTK_OFFLOAD_STATUS_EVENTID:
1013 ath10k_wmi_event_gtk_offload_status(ar, skb);
1014 break;
1015 case WMI_GTK_REKEY_FAIL_EVENTID:
1016 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
1017 break;
1018 case WMI_TX_DELBA_COMPLETE_EVENTID:
1019 ath10k_wmi_event_delba_complete(ar, skb);
1020 break;
1021 case WMI_TX_ADDBA_COMPLETE_EVENTID:
1022 ath10k_wmi_event_addba_complete(ar, skb);
1023 break;
1024 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
1025 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
1026 break;
1027 case WMI_SERVICE_READY_EVENTID:
1028 ath10k_wmi_service_ready_event_rx(ar, skb);
1029 break;
1030 case WMI_READY_EVENTID:
1031 ath10k_wmi_ready_event_rx(ar, skb);
1032 break;
1033 default:
1034 ath10k_warn("Unknown eventid: %d\n", id);
1035 break;
1036 }
1037
1038 dev_kfree_skb(skb);
1039}
1040
1041static void ath10k_wmi_event_work(struct work_struct *work)
1042{
1043 struct ath10k *ar = container_of(work, struct ath10k,
1044 wmi.wmi_event_work);
1045 struct sk_buff *skb;
1046
1047 for (;;) {
1048 skb = skb_dequeue(&ar->wmi.wmi_event_list);
1049 if (!skb)
1050 break;
1051
1052 ath10k_wmi_event_process(ar, skb);
1053 }
1054}
1055
1056static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
1057{
1058 struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1059 enum wmi_event_id event_id;
1060
1061 event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
1062
1063 /* some events require to be handled ASAP
1064 * thus can't be defered to a worker thread */
1065 switch (event_id) {
1066 case WMI_HOST_SWBA_EVENTID:
1067 case WMI_MGMT_RX_EVENTID:
1068 ath10k_wmi_event_process(ar, skb);
1069 return;
1070 default:
1071 break;
1072 }
1073
1074 skb_queue_tail(&ar->wmi.wmi_event_list, skb);
1075 queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
1076}
1077
1078/* WMI Initialization functions */
1079int ath10k_wmi_attach(struct ath10k *ar)
1080{
1081 init_completion(&ar->wmi.service_ready);
1082 init_completion(&ar->wmi.unified_ready);
1083 init_waitqueue_head(&ar->wmi.wq);
1084
1085 skb_queue_head_init(&ar->wmi.wmi_event_list);
1086 INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
1087
1088 return 0;
1089}
1090
1091void ath10k_wmi_detach(struct ath10k *ar)
1092{
1093 /* HTC should've drained the packets already */
1094 if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
1095 ath10k_warn("there are still pending packets\n");
1096
1097 cancel_work_sync(&ar->wmi.wmi_event_work);
1098 skb_queue_purge(&ar->wmi.wmi_event_list);
1099}
1100
1101int ath10k_wmi_connect_htc_service(struct ath10k *ar)
1102{
1103 int status;
1104 struct ath10k_htc_svc_conn_req conn_req;
1105 struct ath10k_htc_svc_conn_resp conn_resp;
1106
1107 memset(&conn_req, 0, sizeof(conn_req));
1108 memset(&conn_resp, 0, sizeof(conn_resp));
1109
1110 /* these fields are the same for all service endpoints */
1111 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
1112 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
1113
1114 /* connect to control service */
1115 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
1116
1117 status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
1118 if (status) {
1119 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
1120 status);
1121 return status;
1122 }
1123
1124 ar->wmi.eid = conn_resp.eid;
1125 return 0;
1126}
1127
1128int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
1129 u16 rd5g, u16 ctl2g, u16 ctl5g)
1130{
1131 struct wmi_pdev_set_regdomain_cmd *cmd;
1132 struct sk_buff *skb;
1133
1134 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1135 if (!skb)
1136 return -ENOMEM;
1137
1138 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1139 cmd->reg_domain = __cpu_to_le32(rd);
1140 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
1141 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
1142 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
1143 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
1144
1145 ath10k_dbg(ATH10K_DBG_WMI,
1146 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
1147 rd, rd2g, rd5g, ctl2g, ctl5g);
1148
1149 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1150}
1151
1152int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
1153 const struct wmi_channel_arg *arg)
1154{
1155 struct wmi_set_channel_cmd *cmd;
1156 struct sk_buff *skb;
1157
1158 if (arg->passive)
1159 return -EINVAL;
1160
1161 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1162 if (!skb)
1163 return -ENOMEM;
1164
1165 cmd = (struct wmi_set_channel_cmd *)skb->data;
1166 cmd->chan.mhz = __cpu_to_le32(arg->freq);
1167 cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
1168 cmd->chan.mode = arg->mode;
1169 cmd->chan.min_power = arg->min_power;
1170 cmd->chan.max_power = arg->max_power;
1171 cmd->chan.reg_power = arg->max_reg_power;
1172 cmd->chan.reg_classid = arg->reg_class_id;
1173 cmd->chan.antenna_max = arg->max_antenna_gain;
1174
1175 ath10k_dbg(ATH10K_DBG_WMI,
1176 "wmi set channel mode %d freq %d\n",
1177 arg->mode, arg->freq);
1178
1179 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
1180}
1181
1182int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
1183{
1184 struct wmi_pdev_suspend_cmd *cmd;
1185 struct sk_buff *skb;
1186
1187 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1188 if (!skb)
1189 return -ENOMEM;
1190
1191 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1192 cmd->suspend_opt = WMI_PDEV_SUSPEND;
1193
1194 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
1195}
1196
1197int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
1198{
1199 struct sk_buff *skb;
1200
1201 skb = ath10k_wmi_alloc_skb(0);
1202 if (skb == NULL)
1203 return -ENOMEM;
1204
1205 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
1206}
1207
1208int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
1209 u32 value)
1210{
1211 struct wmi_pdev_set_param_cmd *cmd;
1212 struct sk_buff *skb;
1213
1214 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1215 if (!skb)
1216 return -ENOMEM;
1217
1218 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1219 cmd->param_id = __cpu_to_le32(id);
1220 cmd->param_value = __cpu_to_le32(value);
1221
1222 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
1223 id, value);
1224 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
1225}
1226
1227int ath10k_wmi_cmd_init(struct ath10k *ar)
1228{
1229 struct wmi_init_cmd *cmd;
1230 struct sk_buff *buf;
1231 struct wmi_resource_config config = {};
1232 u32 val;
1233
1234 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
1235 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
1236 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
1237
1238 config.num_offload_reorder_bufs =
1239 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
1240
1241 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
1242 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
1243 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
1244 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
1245 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
1246 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1247 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1248 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1249 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
1250 config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
1251
1252 config.scan_max_pending_reqs =
1253 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
1254
1255 config.bmiss_offload_max_vdev =
1256 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
1257
1258 config.roam_offload_max_vdev =
1259 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
1260
1261 config.roam_offload_max_ap_profiles =
1262 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
1263
1264 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
1265 config.num_mcast_table_elems =
1266 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
1267
1268 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
1269 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
1270 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
1271 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
1272 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
1273
1274 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
1275 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
1276
1277 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
1278
1279 config.gtk_offload_max_vdev =
1280 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
1281
1282 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
1283 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
1284
1285 buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
1286 if (!buf)
1287 return -ENOMEM;
1288
1289 cmd = (struct wmi_init_cmd *)buf->data;
1290 cmd->num_host_mem_chunks = 0;
1291 memcpy(&cmd->resource_config, &config, sizeof(config));
1292
1293 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
1294 return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
1295}
1296
1297static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
1298{
1299 int len;
1300
1301 len = sizeof(struct wmi_start_scan_cmd);
1302
1303 if (arg->ie_len) {
1304 if (!arg->ie)
1305 return -EINVAL;
1306 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
1307 return -EINVAL;
1308
1309 len += sizeof(struct wmi_ie_data);
1310 len += roundup(arg->ie_len, 4);
1311 }
1312
1313 if (arg->n_channels) {
1314 if (!arg->channels)
1315 return -EINVAL;
1316 if (arg->n_channels > ARRAY_SIZE(arg->channels))
1317 return -EINVAL;
1318
1319 len += sizeof(struct wmi_chan_list);
1320 len += sizeof(__le32) * arg->n_channels;
1321 }
1322
1323 if (arg->n_ssids) {
1324 if (!arg->ssids)
1325 return -EINVAL;
1326 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
1327 return -EINVAL;
1328
1329 len += sizeof(struct wmi_ssid_list);
1330 len += sizeof(struct wmi_ssid) * arg->n_ssids;
1331 }
1332
1333 if (arg->n_bssids) {
1334 if (!arg->bssids)
1335 return -EINVAL;
1336 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
1337 return -EINVAL;
1338
1339 len += sizeof(struct wmi_bssid_list);
1340 len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
1341 }
1342
1343 return len;
1344}
1345
1346int ath10k_wmi_start_scan(struct ath10k *ar,
1347 const struct wmi_start_scan_arg *arg)
1348{
1349 struct wmi_start_scan_cmd *cmd;
1350 struct sk_buff *skb;
1351 struct wmi_ie_data *ie;
1352 struct wmi_chan_list *channels;
1353 struct wmi_ssid_list *ssids;
1354 struct wmi_bssid_list *bssids;
1355 u32 scan_id;
1356 u32 scan_req_id;
1357 int off;
1358 int len = 0;
1359 int i;
1360
1361 len = ath10k_wmi_start_scan_calc_len(arg);
1362 if (len < 0)
1363 return len; /* len contains error code here */
1364
1365 skb = ath10k_wmi_alloc_skb(len);
1366 if (!skb)
1367 return -ENOMEM;
1368
1369 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
1370 scan_id |= arg->scan_id;
1371
1372 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1373 scan_req_id |= arg->scan_req_id;
1374
1375 cmd = (struct wmi_start_scan_cmd *)skb->data;
1376 cmd->scan_id = __cpu_to_le32(scan_id);
1377 cmd->scan_req_id = __cpu_to_le32(scan_req_id);
1378 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1379 cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
1380 cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
1381 cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
1382 cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
1383 cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
1384 cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
1385 cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
1386 cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
1387 cmd->idle_time = __cpu_to_le32(arg->idle_time);
1388 cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
1389 cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
1390 cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
1391
1392 /* TLV list starts after fields included in the struct */
1393 off = sizeof(*cmd);
1394
1395 if (arg->n_channels) {
1396 channels = (void *)skb->data + off;
1397 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
1398 channels->num_chan = __cpu_to_le32(arg->n_channels);
1399
1400 for (i = 0; i < arg->n_channels; i++)
1401 channels->channel_list[i] =
1402 __cpu_to_le32(arg->channels[i]);
1403
1404 off += sizeof(*channels);
1405 off += sizeof(__le32) * arg->n_channels;
1406 }
1407
1408 if (arg->n_ssids) {
1409 ssids = (void *)skb->data + off;
1410 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
1411 ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
1412
1413 for (i = 0; i < arg->n_ssids; i++) {
1414 ssids->ssids[i].ssid_len =
1415 __cpu_to_le32(arg->ssids[i].len);
1416 memcpy(&ssids->ssids[i].ssid,
1417 arg->ssids[i].ssid,
1418 arg->ssids[i].len);
1419 }
1420
1421 off += sizeof(*ssids);
1422 off += sizeof(struct wmi_ssid) * arg->n_ssids;
1423 }
1424
1425 if (arg->n_bssids) {
1426 bssids = (void *)skb->data + off;
1427 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
1428 bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
1429
1430 for (i = 0; i < arg->n_bssids; i++)
1431 memcpy(&bssids->bssid_list[i],
1432 arg->bssids[i].bssid,
1433 ETH_ALEN);
1434
1435 off += sizeof(*bssids);
1436 off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
1437 }
1438
1439 if (arg->ie_len) {
1440 ie = (void *)skb->data + off;
1441 ie->tag = __cpu_to_le32(WMI_IE_TAG);
1442 ie->ie_len = __cpu_to_le32(arg->ie_len);
1443 memcpy(ie->ie_data, arg->ie, arg->ie_len);
1444
1445 off += sizeof(*ie);
1446 off += roundup(arg->ie_len, 4);
1447 }
1448
1449 if (off != skb->len) {
1450 dev_kfree_skb(skb);
1451 return -EINVAL;
1452 }
1453
1454 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
1455 return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
1456}
1457
1458void ath10k_wmi_start_scan_init(struct ath10k *ar,
1459 struct wmi_start_scan_arg *arg)
1460{
1461 /* setup commonly used values */
1462 arg->scan_req_id = 1;
1463 arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
1464 arg->dwell_time_active = 50;
1465 arg->dwell_time_passive = 150;
1466 arg->min_rest_time = 50;
1467 arg->max_rest_time = 500;
1468 arg->repeat_probe_time = 0;
1469 arg->probe_spacing_time = 0;
1470 arg->idle_time = 0;
1471 arg->max_scan_time = 5000;
1472 arg->probe_delay = 5;
1473 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
1474 | WMI_SCAN_EVENT_COMPLETED
1475 | WMI_SCAN_EVENT_BSS_CHANNEL
1476 | WMI_SCAN_EVENT_FOREIGN_CHANNEL
1477 | WMI_SCAN_EVENT_DEQUEUED;
1478 arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
1479 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
1480 arg->n_bssids = 1;
1481 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
1482}
1483
1484int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
1485{
1486 struct wmi_stop_scan_cmd *cmd;
1487 struct sk_buff *skb;
1488 u32 scan_id;
1489 u32 req_id;
1490
1491 if (arg->req_id > 0xFFF)
1492 return -EINVAL;
1493 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
1494 return -EINVAL;
1495
1496 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1497 if (!skb)
1498 return -ENOMEM;
1499
1500 scan_id = arg->u.scan_id;
1501 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
1502
1503 req_id = arg->req_id;
1504 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1505
1506 cmd = (struct wmi_stop_scan_cmd *)skb->data;
1507 cmd->req_type = __cpu_to_le32(arg->req_type);
1508 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
1509 cmd->scan_id = __cpu_to_le32(scan_id);
1510 cmd->scan_req_id = __cpu_to_le32(req_id);
1511
1512 ath10k_dbg(ATH10K_DBG_WMI,
1513 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
1514 arg->req_id, arg->req_type, arg->u.scan_id);
1515 return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
1516}
1517
1518int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
1519 enum wmi_vdev_type type,
1520 enum wmi_vdev_subtype subtype,
1521 const u8 macaddr[ETH_ALEN])
1522{
1523 struct wmi_vdev_create_cmd *cmd;
1524 struct sk_buff *skb;
1525
1526 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1527 if (!skb)
1528 return -ENOMEM;
1529
1530 cmd = (struct wmi_vdev_create_cmd *)skb->data;
1531 cmd->vdev_id = __cpu_to_le32(vdev_id);
1532 cmd->vdev_type = __cpu_to_le32(type);
1533 cmd->vdev_subtype = __cpu_to_le32(subtype);
1534 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
1535
1536 ath10k_dbg(ATH10K_DBG_WMI,
1537 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
1538 vdev_id, type, subtype, macaddr);
1539
1540 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
1541}
1542
1543int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
1544{
1545 struct wmi_vdev_delete_cmd *cmd;
1546 struct sk_buff *skb;
1547
1548 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1549 if (!skb)
1550 return -ENOMEM;
1551
1552 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
1553 cmd->vdev_id = __cpu_to_le32(vdev_id);
1554
1555 ath10k_dbg(ATH10K_DBG_WMI,
1556 "WMI vdev delete id %d\n", vdev_id);
1557
1558 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
1559}
1560
1561static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
1562 const struct wmi_vdev_start_request_arg *arg,
1563 enum wmi_cmd_id cmd_id)
1564{
1565 struct wmi_vdev_start_request_cmd *cmd;
1566 struct sk_buff *skb;
1567 const char *cmdname;
1568 u32 flags = 0;
1569
1570 if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
1571 cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
1572 return -EINVAL;
1573 if (WARN_ON(arg->ssid && arg->ssid_len == 0))
1574 return -EINVAL;
1575 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
1576 return -EINVAL;
1577 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1578 return -EINVAL;
1579
1580 if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
1581 cmdname = "start";
1582 else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
1583 cmdname = "restart";
1584 else
1585 return -EINVAL; /* should not happen, we already check cmd_id */
1586
1587 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1588 if (!skb)
1589 return -ENOMEM;
1590
1591 if (arg->hidden_ssid)
1592 flags |= WMI_VDEV_START_HIDDEN_SSID;
1593 if (arg->pmf_enabled)
1594 flags |= WMI_VDEV_START_PMF_ENABLED;
1595
1596 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1597 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1598 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
1599 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
1600 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
1601 cmd->flags = __cpu_to_le32(flags);
1602 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
1603 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
1604
1605 if (arg->ssid) {
1606 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
1607 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1608 }
1609
1610 cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
1611
1612 cmd->chan.band_center_freq1 =
1613 __cpu_to_le32(arg->channel.band_center_freq1);
1614
1615 cmd->chan.mode = arg->channel.mode;
1616 cmd->chan.min_power = arg->channel.min_power;
1617 cmd->chan.max_power = arg->channel.max_power;
1618 cmd->chan.reg_power = arg->channel.max_reg_power;
1619 cmd->chan.reg_classid = arg->channel.reg_class_id;
1620 cmd->chan.antenna_max = arg->channel.max_antenna_gain;
1621
1622 ath10k_dbg(ATH10K_DBG_WMI,
1623 "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
1624 "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
1625 arg->channel.mode, flags, arg->channel.max_power);
1626
1627 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1628}
1629
1630int ath10k_wmi_vdev_start(struct ath10k *ar,
1631 const struct wmi_vdev_start_request_arg *arg)
1632{
1633 return ath10k_wmi_vdev_start_restart(ar, arg,
1634 WMI_VDEV_START_REQUEST_CMDID);
1635}
1636
1637int ath10k_wmi_vdev_restart(struct ath10k *ar,
1638 const struct wmi_vdev_start_request_arg *arg)
1639{
1640 return ath10k_wmi_vdev_start_restart(ar, arg,
1641 WMI_VDEV_RESTART_REQUEST_CMDID);
1642}
1643
1644int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
1645{
1646 struct wmi_vdev_stop_cmd *cmd;
1647 struct sk_buff *skb;
1648
1649 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1650 if (!skb)
1651 return -ENOMEM;
1652
1653 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
1654 cmd->vdev_id = __cpu_to_le32(vdev_id);
1655
1656 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
1657
1658 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
1659}
1660
1661int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1662{
1663 struct wmi_vdev_up_cmd *cmd;
1664 struct sk_buff *skb;
1665
1666 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1667 if (!skb)
1668 return -ENOMEM;
1669
1670 cmd = (struct wmi_vdev_up_cmd *)skb->data;
1671 cmd->vdev_id = __cpu_to_le32(vdev_id);
1672 cmd->vdev_assoc_id = __cpu_to_le32(aid);
1673 memcpy(&cmd->vdev_bssid.addr, bssid, 6);
1674
1675 ath10k_dbg(ATH10K_DBG_WMI,
1676 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1677 vdev_id, aid, bssid);
1678
1679 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
1680}
1681
1682int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
1683{
1684 struct wmi_vdev_down_cmd *cmd;
1685 struct sk_buff *skb;
1686
1687 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1688 if (!skb)
1689 return -ENOMEM;
1690
1691 cmd = (struct wmi_vdev_down_cmd *)skb->data;
1692 cmd->vdev_id = __cpu_to_le32(vdev_id);
1693
1694 ath10k_dbg(ATH10K_DBG_WMI,
1695 "wmi mgmt vdev down id 0x%x\n", vdev_id);
1696
1697 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
1698}
1699
1700int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1701 enum wmi_vdev_param param_id, u32 param_value)
1702{
1703 struct wmi_vdev_set_param_cmd *cmd;
1704 struct sk_buff *skb;
1705
1706 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1707 if (!skb)
1708 return -ENOMEM;
1709
1710 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1711 cmd->vdev_id = __cpu_to_le32(vdev_id);
1712 cmd->param_id = __cpu_to_le32(param_id);
1713 cmd->param_value = __cpu_to_le32(param_value);
1714
1715 ath10k_dbg(ATH10K_DBG_WMI,
1716 "wmi vdev id 0x%x set param %d value %d\n",
1717 vdev_id, param_id, param_value);
1718
1719 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
1720}
1721
1722int ath10k_wmi_vdev_install_key(struct ath10k *ar,
1723 const struct wmi_vdev_install_key_arg *arg)
1724{
1725 struct wmi_vdev_install_key_cmd *cmd;
1726 struct sk_buff *skb;
1727
1728 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
1729 return -EINVAL;
1730 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
1731 return -EINVAL;
1732
1733 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
1734 if (!skb)
1735 return -ENOMEM;
1736
1737 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1738 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1739 cmd->key_idx = __cpu_to_le32(arg->key_idx);
1740 cmd->key_flags = __cpu_to_le32(arg->key_flags);
1741 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
1742 cmd->key_len = __cpu_to_le32(arg->key_len);
1743 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
1744 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
1745
1746 if (arg->macaddr)
1747 memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
1748 if (arg->key_data)
1749 memcpy(cmd->key_data, arg->key_data, arg->key_len);
1750
1751 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1752}
1753
1754int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
1755 const u8 peer_addr[ETH_ALEN])
1756{
1757 struct wmi_peer_create_cmd *cmd;
1758 struct sk_buff *skb;
1759
1760 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1761 if (!skb)
1762 return -ENOMEM;
1763
1764 cmd = (struct wmi_peer_create_cmd *)skb->data;
1765 cmd->vdev_id = __cpu_to_le32(vdev_id);
1766 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1767
1768 ath10k_dbg(ATH10K_DBG_WMI,
1769 "wmi peer create vdev_id %d peer_addr %pM\n",
1770 vdev_id, peer_addr);
1771 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
1772}
1773
1774int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
1775 const u8 peer_addr[ETH_ALEN])
1776{
1777 struct wmi_peer_delete_cmd *cmd;
1778 struct sk_buff *skb;
1779
1780 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1781 if (!skb)
1782 return -ENOMEM;
1783
1784 cmd = (struct wmi_peer_delete_cmd *)skb->data;
1785 cmd->vdev_id = __cpu_to_le32(vdev_id);
1786 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1787
1788 ath10k_dbg(ATH10K_DBG_WMI,
1789 "wmi peer delete vdev_id %d peer_addr %pM\n",
1790 vdev_id, peer_addr);
1791 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
1792}
1793
1794int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
1795 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
1796{
1797 struct wmi_peer_flush_tids_cmd *cmd;
1798 struct sk_buff *skb;
1799
1800 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1801 if (!skb)
1802 return -ENOMEM;
1803
1804 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1805 cmd->vdev_id = __cpu_to_le32(vdev_id);
1806 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
1807 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1808
1809 ath10k_dbg(ATH10K_DBG_WMI,
1810 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
1811 vdev_id, peer_addr, tid_bitmap);
1812 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1813}
1814
1815int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
1816 const u8 *peer_addr, enum wmi_peer_param param_id,
1817 u32 param_value)
1818{
1819 struct wmi_peer_set_param_cmd *cmd;
1820 struct sk_buff *skb;
1821
1822 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1823 if (!skb)
1824 return -ENOMEM;
1825
1826 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1827 cmd->vdev_id = __cpu_to_le32(vdev_id);
1828 cmd->param_id = __cpu_to_le32(param_id);
1829 cmd->param_value = __cpu_to_le32(param_value);
1830 memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
1831
1832 ath10k_dbg(ATH10K_DBG_WMI,
1833 "wmi vdev %d peer 0x%pM set param %d value %d\n",
1834 vdev_id, peer_addr, param_id, param_value);
1835
1836 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
1837}
1838
1839int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
1840 enum wmi_sta_ps_mode psmode)
1841{
1842 struct wmi_sta_powersave_mode_cmd *cmd;
1843 struct sk_buff *skb;
1844
1845 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1846 if (!skb)
1847 return -ENOMEM;
1848
1849 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
1850 cmd->vdev_id = __cpu_to_le32(vdev_id);
1851 cmd->sta_ps_mode = __cpu_to_le32(psmode);
1852
1853 ath10k_dbg(ATH10K_DBG_WMI,
1854 "wmi set powersave id 0x%x mode %d\n",
1855 vdev_id, psmode);
1856
1857 return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1858}
1859
1860int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
1861 enum wmi_sta_powersave_param param_id,
1862 u32 value)
1863{
1864 struct wmi_sta_powersave_param_cmd *cmd;
1865 struct sk_buff *skb;
1866
1867 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1868 if (!skb)
1869 return -ENOMEM;
1870
1871 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1872 cmd->vdev_id = __cpu_to_le32(vdev_id);
1873 cmd->param_id = __cpu_to_le32(param_id);
1874 cmd->param_value = __cpu_to_le32(value);
1875
1876 ath10k_dbg(ATH10K_DBG_WMI,
1877 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
1878 vdev_id, param_id, value);
1879 return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1880}
1881
1882int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1883 enum wmi_ap_ps_peer_param param_id, u32 value)
1884{
1885 struct wmi_ap_ps_peer_cmd *cmd;
1886 struct sk_buff *skb;
1887
1888 if (!mac)
1889 return -EINVAL;
1890
1891 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1892 if (!skb)
1893 return -ENOMEM;
1894
1895 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1896 cmd->vdev_id = __cpu_to_le32(vdev_id);
1897 cmd->param_id = __cpu_to_le32(param_id);
1898 cmd->param_value = __cpu_to_le32(value);
1899 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
1900
1901 ath10k_dbg(ATH10K_DBG_WMI,
1902 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
1903 vdev_id, param_id, value, mac);
1904
1905 return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1906}
1907
1908int ath10k_wmi_scan_chan_list(struct ath10k *ar,
1909 const struct wmi_scan_chan_list_arg *arg)
1910{
1911 struct wmi_scan_chan_list_cmd *cmd;
1912 struct sk_buff *skb;
1913 struct wmi_channel_arg *ch;
1914 struct wmi_channel *ci;
1915 int len;
1916 int i;
1917
1918 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
1919
1920 skb = ath10k_wmi_alloc_skb(len);
1921 if (!skb)
1922 return -EINVAL;
1923
1924 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
1925 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
1926
1927 for (i = 0; i < arg->n_channels; i++) {
1928 u32 flags = 0;
1929
1930 ch = &arg->channels[i];
1931 ci = &cmd->chan_info[i];
1932
1933 if (ch->passive)
1934 flags |= WMI_CHAN_FLAG_PASSIVE;
1935 if (ch->allow_ibss)
1936 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1937 if (ch->allow_ht)
1938 flags |= WMI_CHAN_FLAG_ALLOW_HT;
1939 if (ch->allow_vht)
1940 flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1941 if (ch->ht40plus)
1942 flags |= WMI_CHAN_FLAG_HT40_PLUS;
1943
1944 ci->mhz = __cpu_to_le32(ch->freq);
1945 ci->band_center_freq1 = __cpu_to_le32(ch->freq);
1946 ci->band_center_freq2 = 0;
1947 ci->min_power = ch->min_power;
1948 ci->max_power = ch->max_power;
1949 ci->reg_power = ch->max_reg_power;
1950 ci->antenna_max = ch->max_antenna_gain;
1951 ci->antenna_max = 0;
1952
1953 /* mode & flags share storage */
1954 ci->mode = ch->mode;
1955 ci->flags |= __cpu_to_le32(flags);
1956 }
1957
1958 return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
1959}
1960
1961int ath10k_wmi_peer_assoc(struct ath10k *ar,
1962 const struct wmi_peer_assoc_complete_arg *arg)
1963{
1964 struct wmi_peer_assoc_complete_cmd *cmd;
1965 struct sk_buff *skb;
1966
1967 if (arg->peer_mpdu_density > 16)
1968 return -EINVAL;
1969 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
1970 return -EINVAL;
1971 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
1972 return -EINVAL;
1973
1974 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1975 if (!skb)
1976 return -ENOMEM;
1977
1978 cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
1979 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1980 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
1981 cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
1982 cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
1983 cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
1984 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
1985 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
1986 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
1987 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
1988 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
1989 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
1990 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
1991 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
1992
1993 memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
1994
1995 cmd->peer_legacy_rates.num_rates =
1996 __cpu_to_le32(arg->peer_legacy_rates.num_rates);
1997 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
1998 arg->peer_legacy_rates.num_rates);
1999
2000 cmd->peer_ht_rates.num_rates =
2001 __cpu_to_le32(arg->peer_ht_rates.num_rates);
2002 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
2003 arg->peer_ht_rates.num_rates);
2004
2005 cmd->peer_vht_rates.rx_max_rate =
2006 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2007 cmd->peer_vht_rates.rx_mcs_set =
2008 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2009 cmd->peer_vht_rates.tx_max_rate =
2010 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2011 cmd->peer_vht_rates.tx_mcs_set =
2012 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2013
2014 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
2015}
2016
2017int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
2018{
2019 struct wmi_bcn_tx_cmd *cmd;
2020 struct sk_buff *skb;
2021
2022 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
2023 if (!skb)
2024 return -ENOMEM;
2025
2026 cmd = (struct wmi_bcn_tx_cmd *)skb->data;
2027 cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id);
2028 cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate);
2029 cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
2030 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
2031 memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
2032
2033 return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
2034}
2035
2036static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
2037 const struct wmi_wmm_params_arg *arg)
2038{
2039 params->cwmin = __cpu_to_le32(arg->cwmin);
2040 params->cwmax = __cpu_to_le32(arg->cwmax);
2041 params->aifs = __cpu_to_le32(arg->aifs);
2042 params->txop = __cpu_to_le32(arg->txop);
2043 params->acm = __cpu_to_le32(arg->acm);
2044 params->no_ack = __cpu_to_le32(arg->no_ack);
2045}
2046
2047int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
2048 const struct wmi_pdev_set_wmm_params_arg *arg)
2049{
2050 struct wmi_pdev_set_wmm_params *cmd;
2051 struct sk_buff *skb;
2052
2053 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2054 if (!skb)
2055 return -ENOMEM;
2056
2057 cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
2058 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
2059 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
2060 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
2061 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
2062
2063 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
2064 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
2065}
2066
2067int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
2068{
2069 struct wmi_request_stats_cmd *cmd;
2070 struct sk_buff *skb;
2071
2072 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2073 if (!skb)
2074 return -ENOMEM;
2075
2076 cmd = (struct wmi_request_stats_cmd *)skb->data;
2077 cmd->stats_id = __cpu_to_le32(stats_id);
2078
2079 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
2080 return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
2081}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
new file mode 100644
index 000000000000..9555f5a0e041
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -0,0 +1,3052 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _WMI_H_
19#define _WMI_H_
20
21#include <linux/types.h>
22#include <net/mac80211.h>
23
24/*
25 * This file specifies the WMI interface for the Unified Software
26 * Architecture.
27 *
28 * It includes definitions of all the commands and events. Commands are
29 * messages from the host to the target. Events and Replies are messages
30 * from the target to the host.
31 *
32 * Ownership of correctness in regards to WMI commands belongs to the host
33 * driver and the target is not required to validate parameters for value,
34 * proper range, or any other checking.
35 *
36 * Guidelines for extending this interface are below.
37 *
38 * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff
39 *
40 * 2. Use ONLY u32 type for defining member variables within WMI
41 * command/event structures. Do not use u8, u16, bool or
42 * enum types within these structures.
43 *
44 * 3. DO NOT define bit fields within structures. Implement bit fields
45 * using masks if necessary. Do not use the programming language's bit
46 * field definition.
47 *
48 * 4. Define macros for encode/decode of u8, u16 fields within
49 * the u32 variables. Use these macros for set/get of these fields.
50 * Try to use this to optimize the structure without bloating it with
51 * u32 variables for every lower sized field.
52 *
53 * 5. Do not use PACK/UNPACK attributes for the structures as each member
54 * variable is already 4-byte aligned by virtue of being a u32
55 * type.
56 *
57 * 6. Comment each parameter part of the WMI command/event structure by
58 * using the 2 stars at the begining of C comment instead of one star to
59 * enable HTML document generation using Doxygen.
60 *
61 */
62
63/* Control Path */
64struct wmi_cmd_hdr {
65 __le32 cmd_id;
66} __packed;
67
68#define WMI_CMD_HDR_CMD_ID_MASK 0x00FFFFFF
69#define WMI_CMD_HDR_CMD_ID_LSB 0
70#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000
71#define WMI_CMD_HDR_PLT_PRIV_LSB 24
72
73#define HTC_PROTOCOL_VERSION 0x0002
74#define WMI_PROTOCOL_VERSION 0x0002
75
76enum wmi_service_id {
77 WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */
78 WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */
79 WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */
80 WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */
81 WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */
82 WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */
83 WMI_SERVICE_AP_UAPSD, /* uapsd on AP */
84 WMI_SERVICE_AP_DFS, /* DFS on AP */
85 WMI_SERVICE_11AC, /* supports 11ac */
86 WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/
87 WMI_SERVICE_PHYERR, /* PHY error */
88 WMI_SERVICE_BCN_FILTER, /* Beacon filter support */
89 WMI_SERVICE_RTT, /* RTT (round trip time) support */
90 WMI_SERVICE_RATECTRL, /* Rate-control */
91 WMI_SERVICE_WOW, /* WOW Support */
92 WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */
93 WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */
94 WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */
95 WMI_SERVICE_NLO, /* Network list offload service */
96 WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */
97 WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */
98 WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */
99 WMI_SERVICE_CHATTER, /* Chatter service */
100 WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */
101 WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */
102 WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */
103 WMI_SERVICE_GPIO, /* GPIO service */
104 WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */
105 WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */
106 WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */
107 WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */
108 WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */
109
110 WMI_SERVICE_LAST,
111 WMI_MAX_SERVICE = 64 /* max service */
112};
113
114static inline char *wmi_service_name(int service_id)
115{
116 switch (service_id) {
117 case WMI_SERVICE_BEACON_OFFLOAD:
118 return "BEACON_OFFLOAD";
119 case WMI_SERVICE_SCAN_OFFLOAD:
120 return "SCAN_OFFLOAD";
121 case WMI_SERVICE_ROAM_OFFLOAD:
122 return "ROAM_OFFLOAD";
123 case WMI_SERVICE_BCN_MISS_OFFLOAD:
124 return "BCN_MISS_OFFLOAD";
125 case WMI_SERVICE_STA_PWRSAVE:
126 return "STA_PWRSAVE";
127 case WMI_SERVICE_STA_ADVANCED_PWRSAVE:
128 return "STA_ADVANCED_PWRSAVE";
129 case WMI_SERVICE_AP_UAPSD:
130 return "AP_UAPSD";
131 case WMI_SERVICE_AP_DFS:
132 return "AP_DFS";
133 case WMI_SERVICE_11AC:
134 return "11AC";
135 case WMI_SERVICE_BLOCKACK:
136 return "BLOCKACK";
137 case WMI_SERVICE_PHYERR:
138 return "PHYERR";
139 case WMI_SERVICE_BCN_FILTER:
140 return "BCN_FILTER";
141 case WMI_SERVICE_RTT:
142 return "RTT";
143 case WMI_SERVICE_RATECTRL:
144 return "RATECTRL";
145 case WMI_SERVICE_WOW:
146 return "WOW";
147 case WMI_SERVICE_RATECTRL_CACHE:
148 return "RATECTRL CACHE";
149 case WMI_SERVICE_IRAM_TIDS:
150 return "IRAM TIDS";
151 case WMI_SERVICE_ARPNS_OFFLOAD:
152 return "ARPNS_OFFLOAD";
153 case WMI_SERVICE_NLO:
154 return "NLO";
155 case WMI_SERVICE_GTK_OFFLOAD:
156 return "GTK_OFFLOAD";
157 case WMI_SERVICE_SCAN_SCH:
158 return "SCAN_SCH";
159 case WMI_SERVICE_CSA_OFFLOAD:
160 return "CSA_OFFLOAD";
161 case WMI_SERVICE_CHATTER:
162 return "CHATTER";
163 case WMI_SERVICE_COEX_FREQAVOID:
164 return "COEX_FREQAVOID";
165 case WMI_SERVICE_PACKET_POWER_SAVE:
166 return "PACKET_POWER_SAVE";
167 case WMI_SERVICE_FORCE_FW_HANG:
168 return "FORCE FW HANG";
169 case WMI_SERVICE_GPIO:
170 return "GPIO";
171 case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM:
172 return "MODULATED DTIM";
173 case WMI_STA_UAPSD_BASIC_AUTO_TRIG:
174 return "BASIC UAPSD";
175 case WMI_STA_UAPSD_VAR_AUTO_TRIG:
176 return "VAR UAPSD";
177 case WMI_SERVICE_STA_KEEP_ALIVE:
178 return "STA KEEP ALIVE";
179 case WMI_SERVICE_TX_ENCAP:
180 return "TX ENCAP";
181 default:
182 return "UNKNOWN SERVICE\n";
183 }
184}
185
186
187#define WMI_SERVICE_BM_SIZE \
188 ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32))
189
190/* 2 word representation of MAC addr */
191struct wmi_mac_addr {
192 union {
193 u8 addr[6];
194 struct {
195 u32 word0;
196 u32 word1;
197 } __packed;
198 } __packed;
199} __packed;
200
201/* macro to convert MAC address from WMI word format to char array */
202#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
203 (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
204 (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
205 (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
206 (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
207 (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
208 (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
209 } while (0)
210
211/*
212 * wmi command groups.
213 */
214enum wmi_cmd_group {
215 /* 0 to 2 are reserved */
216 WMI_GRP_START = 0x3,
217 WMI_GRP_SCAN = WMI_GRP_START,
218 WMI_GRP_PDEV,
219 WMI_GRP_VDEV,
220 WMI_GRP_PEER,
221 WMI_GRP_MGMT,
222 WMI_GRP_BA_NEG,
223 WMI_GRP_STA_PS,
224 WMI_GRP_DFS,
225 WMI_GRP_ROAM,
226 WMI_GRP_OFL_SCAN,
227 WMI_GRP_P2P,
228 WMI_GRP_AP_PS,
229 WMI_GRP_RATE_CTRL,
230 WMI_GRP_PROFILE,
231 WMI_GRP_SUSPEND,
232 WMI_GRP_BCN_FILTER,
233 WMI_GRP_WOW,
234 WMI_GRP_RTT,
235 WMI_GRP_SPECTRAL,
236 WMI_GRP_STATS,
237 WMI_GRP_ARP_NS_OFL,
238 WMI_GRP_NLO_OFL,
239 WMI_GRP_GTK_OFL,
240 WMI_GRP_CSA_OFL,
241 WMI_GRP_CHATTER,
242 WMI_GRP_TID_ADDBA,
243 WMI_GRP_MISC,
244 WMI_GRP_GPIO,
245};
246
247#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
248#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
249
250/* Command IDs and commande events. */
251enum wmi_cmd_id {
252 WMI_INIT_CMDID = 0x1,
253
254 /* Scan specific commands */
255 WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN),
256 WMI_STOP_SCAN_CMDID,
257 WMI_SCAN_CHAN_LIST_CMDID,
258 WMI_SCAN_SCH_PRIO_TBL_CMDID,
259
260 /* PDEV (physical device) specific commands */
261 WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV),
262 WMI_PDEV_SET_CHANNEL_CMDID,
263 WMI_PDEV_SET_PARAM_CMDID,
264 WMI_PDEV_PKTLOG_ENABLE_CMDID,
265 WMI_PDEV_PKTLOG_DISABLE_CMDID,
266 WMI_PDEV_SET_WMM_PARAMS_CMDID,
267 WMI_PDEV_SET_HT_CAP_IE_CMDID,
268 WMI_PDEV_SET_VHT_CAP_IE_CMDID,
269 WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
270 WMI_PDEV_SET_QUIET_MODE_CMDID,
271 WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
272 WMI_PDEV_GET_TPC_CONFIG_CMDID,
273 WMI_PDEV_SET_BASE_MACADDR_CMDID,
274
275 /* VDEV (virtual device) specific commands */
276 WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV),
277 WMI_VDEV_DELETE_CMDID,
278 WMI_VDEV_START_REQUEST_CMDID,
279 WMI_VDEV_RESTART_REQUEST_CMDID,
280 WMI_VDEV_UP_CMDID,
281 WMI_VDEV_STOP_CMDID,
282 WMI_VDEV_DOWN_CMDID,
283 WMI_VDEV_SET_PARAM_CMDID,
284 WMI_VDEV_INSTALL_KEY_CMDID,
285
286 /* peer specific commands */
287 WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER),
288 WMI_PEER_DELETE_CMDID,
289 WMI_PEER_FLUSH_TIDS_CMDID,
290 WMI_PEER_SET_PARAM_CMDID,
291 WMI_PEER_ASSOC_CMDID,
292 WMI_PEER_ADD_WDS_ENTRY_CMDID,
293 WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
294 WMI_PEER_MCAST_GROUP_CMDID,
295
296 /* beacon/management specific commands */
297 WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT),
298 WMI_PDEV_SEND_BCN_CMDID,
299 WMI_BCN_TMPL_CMDID,
300 WMI_BCN_FILTER_RX_CMDID,
301 WMI_PRB_REQ_FILTER_RX_CMDID,
302 WMI_MGMT_TX_CMDID,
303 WMI_PRB_TMPL_CMDID,
304
305 /* commands to directly control BA negotiation directly from host. */
306 WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG),
307 WMI_ADDBA_SEND_CMDID,
308 WMI_ADDBA_STATUS_CMDID,
309 WMI_DELBA_SEND_CMDID,
310 WMI_ADDBA_SET_RESP_CMDID,
311 WMI_SEND_SINGLEAMSDU_CMDID,
312
313 /* Station power save specific config */
314 WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS),
315 WMI_STA_POWERSAVE_PARAM_CMDID,
316 WMI_STA_MIMO_PS_MODE_CMDID,
317
318 /** DFS-specific commands */
319 WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS),
320 WMI_PDEV_DFS_DISABLE_CMDID,
321
322 /* Roaming specific commands */
323 WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM),
324 WMI_ROAM_SCAN_RSSI_THRESHOLD,
325 WMI_ROAM_SCAN_PERIOD,
326 WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
327 WMI_ROAM_AP_PROFILE,
328
329 /* offload scan specific commands */
330 WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN),
331 WMI_OFL_SCAN_REMOVE_AP_PROFILE,
332 WMI_OFL_SCAN_PERIOD,
333
334 /* P2P specific commands */
335 WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P),
336 WMI_P2P_DEV_SET_DISCOVERABILITY,
337 WMI_P2P_GO_SET_BEACON_IE,
338 WMI_P2P_GO_SET_PROBE_RESP_IE,
339 WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
340
341 /* AP power save specific config */
342 WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS),
343 WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
344
345 /* Rate-control specific commands */
346 WMI_PEER_RATE_RETRY_SCHED_CMDID =
347 WMI_CMD_GRP(WMI_GRP_RATE_CTRL),
348
349 /* WLAN Profiling commands. */
350 WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE),
351 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
352 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
353 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
354 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
355
356 /* Suspend resume command Ids */
357 WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND),
358 WMI_PDEV_RESUME_CMDID,
359
360 /* Beacon filter commands */
361 WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER),
362 WMI_RMV_BCN_FILTER_CMDID,
363
364 /* WOW Specific WMI commands*/
365 WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW),
366 WMI_WOW_DEL_WAKE_PATTERN_CMDID,
367 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
368 WMI_WOW_ENABLE_CMDID,
369 WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
370
371 /* RTT measurement related cmd */
372 WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT),
373 WMI_RTT_TSF_CMDID,
374
375 /* spectral scan commands */
376 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL),
377 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
378
379 /* F/W stats */
380 WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS),
381
382 /* ARP OFFLOAD REQUEST*/
383 WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL),
384
385 /* NS offload confid*/
386 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL),
387
388 /* GTK offload Specific WMI commands*/
389 WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL),
390
391 /* CSA offload Specific WMI commands*/
392 WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL),
393 WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
394
395 /* Chatter commands*/
396 WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER),
397
398 /* addba specific commands */
399 WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA),
400 WMI_PEER_TID_DELBA_CMDID,
401
402 /* set station mimo powersave method */
403 WMI_STA_DTIM_PS_METHOD_CMDID,
404 /* Configure the Station UAPSD AC Auto Trigger Parameters */
405 WMI_STA_UAPSD_AUTO_TRIG_CMDID,
406
407 /* STA Keep alive parameter configuration,
408 Requires WMI_SERVICE_STA_KEEP_ALIVE */
409 WMI_STA_KEEPALIVE_CMD,
410
411 /* misc command group */
412 WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC),
413 WMI_PDEV_UTF_CMDID,
414 WMI_DBGLOG_CFG_CMDID,
415 WMI_PDEV_QVIT_CMDID,
416 WMI_PDEV_FTM_INTG_CMDID,
417 WMI_VDEV_SET_KEEPALIVE_CMDID,
418 WMI_VDEV_GET_KEEPALIVE_CMDID,
419
420 /* GPIO Configuration */
421 WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
422 WMI_GPIO_OUTPUT_CMDID,
423};
424
425enum wmi_event_id {
426 WMI_SERVICE_READY_EVENTID = 0x1,
427 WMI_READY_EVENTID,
428
429 /* Scan specific events */
430 WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
431
432 /* PDEV specific events */
433 WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV),
434 WMI_CHAN_INFO_EVENTID,
435 WMI_PHYERR_EVENTID,
436
437 /* VDEV specific events */
438 WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
439 WMI_VDEV_STOPPED_EVENTID,
440 WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
441
442 /* peer specific events */
443 WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
444
445 /* beacon/mgmt specific events */
446 WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
447 WMI_HOST_SWBA_EVENTID,
448 WMI_TBTTOFFSET_UPDATE_EVENTID,
449
450 /* ADDBA Related WMI Events*/
451 WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG),
452 WMI_TX_ADDBA_COMPLETE_EVENTID,
453
454 /* Roam event to trigger roaming on host */
455 WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM),
456 WMI_PROFILE_MATCH,
457
458 /* WoW */
459 WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW),
460
461 /* RTT */
462 WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT),
463 WMI_TSF_MEASUREMENT_REPORT_EVENTID,
464 WMI_RTT_ERROR_REPORT_EVENTID,
465
466 /* GTK offload */
467 WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL),
468 WMI_GTK_REKEY_FAIL_EVENTID,
469
470 /* CSA IE received event */
471 WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL),
472
473 /* Misc events */
474 WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
475 WMI_PDEV_UTF_EVENTID,
476 WMI_DEBUG_MESG_EVENTID,
477 WMI_UPDATE_STATS_EVENTID,
478 WMI_DEBUG_PRINT_EVENTID,
479 WMI_DCS_INTERFERENCE_EVENTID,
480 WMI_PDEV_QVIT_EVENTID,
481 WMI_WLAN_PROFILE_DATA_EVENTID,
482 WMI_PDEV_FTM_INTG_EVENTID,
483 WMI_WLAN_FREQ_AVOID_EVENTID,
484 WMI_VDEV_GET_KEEPALIVE_EVENTID,
485
486 /* GPIO Event */
487 WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
488};
489
490enum wmi_phy_mode {
491 MODE_11A = 0, /* 11a Mode */
492 MODE_11G = 1, /* 11b/g Mode */
493 MODE_11B = 2, /* 11b Mode */
494 MODE_11GONLY = 3, /* 11g only Mode */
495 MODE_11NA_HT20 = 4, /* 11a HT20 mode */
496 MODE_11NG_HT20 = 5, /* 11g HT20 mode */
497 MODE_11NA_HT40 = 6, /* 11a HT40 mode */
498 MODE_11NG_HT40 = 7, /* 11g HT40 mode */
499 MODE_11AC_VHT20 = 8,
500 MODE_11AC_VHT40 = 9,
501 MODE_11AC_VHT80 = 10,
502 /* MODE_11AC_VHT160 = 11, */
503 MODE_11AC_VHT20_2G = 11,
504 MODE_11AC_VHT40_2G = 12,
505 MODE_11AC_VHT80_2G = 13,
506 MODE_UNKNOWN = 14,
507 MODE_MAX = 14
508};
509
510#define WMI_CHAN_LIST_TAG 0x1
511#define WMI_SSID_LIST_TAG 0x2
512#define WMI_BSSID_LIST_TAG 0x3
513#define WMI_IE_TAG 0x4
514
515struct wmi_channel {
516 __le32 mhz;
517 __le32 band_center_freq1;
518 __le32 band_center_freq2; /* valid for 11ac, 80plus80 */
519 union {
520 __le32 flags; /* WMI_CHAN_FLAG_ */
521 struct {
522 u8 mode; /* only 6 LSBs */
523 } __packed;
524 } __packed;
525 union {
526 __le32 reginfo0;
527 struct {
528 u8 min_power;
529 u8 max_power;
530 u8 reg_power;
531 u8 reg_classid;
532 } __packed;
533 } __packed;
534 union {
535 __le32 reginfo1;
536 struct {
537 u8 antenna_max;
538 } __packed;
539 } __packed;
540} __packed;
541
542struct wmi_channel_arg {
543 u32 freq;
544 u32 band_center_freq1;
545 bool passive;
546 bool allow_ibss;
547 bool allow_ht;
548 bool allow_vht;
549 bool ht40plus;
550 /* note: power unit is 1/4th of dBm */
551 u32 min_power;
552 u32 max_power;
553 u32 max_reg_power;
554 u32 max_antenna_gain;
555 u32 reg_class_id;
556 enum wmi_phy_mode mode;
557};
558
559enum wmi_channel_change_cause {
560 WMI_CHANNEL_CHANGE_CAUSE_NONE = 0,
561 WMI_CHANNEL_CHANGE_CAUSE_CSA,
562};
563
564#define WMI_CHAN_FLAG_HT40_PLUS (1 << 6)
565#define WMI_CHAN_FLAG_PASSIVE (1 << 7)
566#define WMI_CHAN_FLAG_ADHOC_ALLOWED (1 << 8)
567#define WMI_CHAN_FLAG_AP_DISABLED (1 << 9)
568#define WMI_CHAN_FLAG_DFS (1 << 10)
569#define WMI_CHAN_FLAG_ALLOW_HT (1 << 11)
570#define WMI_CHAN_FLAG_ALLOW_VHT (1 << 12)
571
572/* Indicate reason for channel switch */
573#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
574
575#define WMI_MAX_SPATIAL_STREAM 3
576
577/* HT Capabilities*/
578#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
579#define WMI_HT_CAP_HT20_SGI 0x0002 /* Short Guard Interval with HT20 */
580#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */
581#define WMI_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */
582#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
583#define WMI_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */
584#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
585#define WMI_HT_CAP_LDPC 0x0040 /* LDPC supported */
586#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */
587#define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */
588#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
589#define WMI_HT_CAP_HT40_SGI 0x0800
590
591#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
592 WMI_HT_CAP_HT20_SGI | \
593 WMI_HT_CAP_HT40_SGI | \
594 WMI_HT_CAP_TX_STBC | \
595 WMI_HT_CAP_RX_STBC | \
596 WMI_HT_CAP_LDPC)
597
598
599/*
600 * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
601 * field. The fields not defined here are not supported, or reserved.
602 * Do not change these masks and if you have to add new one follow the
603 * bitmask as specified by 802.11ac draft.
604 */
605
606#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
607#define WMI_VHT_CAP_RX_LDPC 0x00000010
608#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
609#define WMI_VHT_CAP_TX_STBC 0x00000080
610#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
611#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
612#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
613#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23
614#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
615#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
616
617/* The following also refer for max HT AMSDU */
618#define WMI_VHT_CAP_MAX_MPDU_LEN_3839 0x00000000
619#define WMI_VHT_CAP_MAX_MPDU_LEN_7935 0x00000001
620#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
621
622#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
623 WMI_VHT_CAP_RX_LDPC | \
624 WMI_VHT_CAP_SGI_80MHZ | \
625 WMI_VHT_CAP_TX_STBC | \
626 WMI_VHT_CAP_RX_STBC_MASK | \
627 WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
628 WMI_VHT_CAP_RX_FIXED_ANT | \
629 WMI_VHT_CAP_TX_FIXED_ANT)
630
631/*
632 * Interested readers refer to Rx/Tx MCS Map definition as defined in
633 * 802.11ac
634 */
635#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1))
636#define WMI_VHT_MAX_SUPP_RATE_MASK 0x1fff0000
637#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT 16
638
639enum {
640 REGDMN_MODE_11A = 0x00001, /* 11a channels */
641 REGDMN_MODE_TURBO = 0x00002, /* 11a turbo-only channels */
642 REGDMN_MODE_11B = 0x00004, /* 11b channels */
643 REGDMN_MODE_PUREG = 0x00008, /* 11g channels (OFDM only) */
644 REGDMN_MODE_11G = 0x00008, /* XXX historical */
645 REGDMN_MODE_108G = 0x00020, /* 11a+Turbo channels */
646 REGDMN_MODE_108A = 0x00040, /* 11g+Turbo channels */
647 REGDMN_MODE_XR = 0x00100, /* XR channels */
648 REGDMN_MODE_11A_HALF_RATE = 0x00200, /* 11A half rate channels */
649 REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */
650 REGDMN_MODE_11NG_HT20 = 0x00800, /* 11N-G HT20 channels */
651 REGDMN_MODE_11NA_HT20 = 0x01000, /* 11N-A HT20 channels */
652 REGDMN_MODE_11NG_HT40PLUS = 0x02000, /* 11N-G HT40 + channels */
653 REGDMN_MODE_11NG_HT40MINUS = 0x04000, /* 11N-G HT40 - channels */
654 REGDMN_MODE_11NA_HT40PLUS = 0x08000, /* 11N-A HT40 + channels */
655 REGDMN_MODE_11NA_HT40MINUS = 0x10000, /* 11N-A HT40 - channels */
656 REGDMN_MODE_11AC_VHT20 = 0x20000, /* 5Ghz, VHT20 */
657 REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */
658 REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */
659 REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */
660 REGDMN_MODE_ALL = 0xffffffff
661};
662
663#define REGDMN_CAP1_CHAN_HALF_RATE 0x00000001
664#define REGDMN_CAP1_CHAN_QUARTER_RATE 0x00000002
665#define REGDMN_CAP1_CHAN_HAL49GHZ 0x00000004
666
667/* regulatory capabilities */
668#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
669#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
670#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2 0x0100
671#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
672#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
673#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
674
675struct hal_reg_capabilities {
676 /* regdomain value specified in EEPROM */
677 __le32 eeprom_rd;
678 /*regdomain */
679 __le32 eeprom_rd_ext;
680 /* CAP1 capabilities bit map. */
681 __le32 regcap1;
682 /* REGDMN EEPROM CAP. */
683 __le32 regcap2;
684 /* REGDMN MODE */
685 __le32 wireless_modes;
686 __le32 low_2ghz_chan;
687 __le32 high_2ghz_chan;
688 __le32 low_5ghz_chan;
689 __le32 high_5ghz_chan;
690} __packed;
691
692enum wlan_mode_capability {
693 WHAL_WLAN_11A_CAPABILITY = 0x1,
694 WHAL_WLAN_11G_CAPABILITY = 0x2,
695 WHAL_WLAN_11AG_CAPABILITY = 0x3,
696};
697
698/* structure used by FW for requesting host memory */
699struct wlan_host_mem_req {
700 /* ID of the request */
701 __le32 req_id;
702 /* size of the of each unit */
703 __le32 unit_size;
704 /* flags to indicate that
705 * the number units is dependent
706 * on number of resources(num vdevs num peers .. etc)
707 */
708 __le32 num_unit_info;
709 /*
710 * actual number of units to allocate . if flags in the num_unit_info
711 * indicate that number of units is tied to number of a particular
712 * resource to allocate then num_units filed is set to 0 and host
713 * will derive the number units from number of the resources it is
714 * requesting.
715 */
716 __le32 num_units;
717} __packed;
718
719#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
720 ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
721 (1 << ((svc_id)%(sizeof(u32))))) != 0)
722
723/*
724 * The following struct holds optional payload for
725 * wmi_service_ready_event,e.g., 11ac pass some of the
726 * device capability to the host.
727 */
728struct wmi_service_ready_event {
729 __le32 sw_version;
730 __le32 sw_version_1;
731 __le32 abi_version;
732 /* WMI_PHY_CAPABILITY */
733 __le32 phy_capability;
734 /* Maximum number of frag table entries that SW will populate less 1 */
735 __le32 max_frag_entry;
736 __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
737 __le32 num_rf_chains;
738 /*
739 * The following field is only valid for service type
740 * WMI_SERVICE_11AC
741 */
742 __le32 ht_cap_info; /* WMI HT Capability */
743 __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
744 __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
745 __le32 hw_min_tx_power;
746 __le32 hw_max_tx_power;
747 struct hal_reg_capabilities hal_reg_capabilities;
748 __le32 sys_cap_info;
749 __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
750 /*
751 * Max beacon and Probe Response IE offload size
752 * (includes optional P2P IEs)
753 */
754 __le32 max_bcn_ie_size;
755 /*
756 * request to host to allocate a chuck of memory and pss it down to FW
757 * via WM_INIT. FW uses this as FW extesnsion memory for saving its
758 * data structures. Only valid for low latency interfaces like PCIE
759 * where FW can access this memory directly (or) by DMA.
760 */
761 __le32 num_mem_reqs;
762 struct wlan_host_mem_req mem_reqs[1];
763} __packed;
764
765/*
766 * status consists of upper 16 bits fo int status and lower 16 bits of
767 * module ID that retuned status
768 */
769#define WLAN_INIT_STATUS_SUCCESS 0x0
770#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
771#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
772
773#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
774#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
775
776struct wmi_ready_event {
777 __le32 sw_version;
778 __le32 abi_version;
779 struct wmi_mac_addr mac_addr;
780 __le32 status;
781} __packed;
782
783struct wmi_resource_config {
784 /* number of virtual devices (VAPs) to support */
785 __le32 num_vdevs;
786
787 /* number of peer nodes to support */
788 __le32 num_peers;
789
790 /*
791 * In offload mode target supports features like WOW, chatter and
792 * other protocol offloads. In order to support them some
793 * functionalities like reorder buffering, PN checking need to be
794 * done in target. This determines maximum number of peers suported
795 * by target in offload mode
796 */
797 __le32 num_offload_peers;
798
799 /* For target-based RX reordering */
800 __le32 num_offload_reorder_bufs;
801
802 /* number of keys per peer */
803 __le32 num_peer_keys;
804
805 /* total number of TX/RX data TIDs */
806 __le32 num_tids;
807
808 /*
809 * max skid for resolving hash collisions
810 *
811 * The address search table is sparse, so that if two MAC addresses
812 * result in the same hash value, the second of these conflicting
813 * entries can slide to the next index in the address search table,
814 * and use it, if it is unoccupied. This ast_skid_limit parameter
815 * specifies the upper bound on how many subsequent indices to search
816 * over to find an unoccupied space.
817 */
818 __le32 ast_skid_limit;
819
820 /*
821 * the nominal chain mask for transmit
822 *
823 * The chain mask may be modified dynamically, e.g. to operate AP
824 * tx with a reduced number of chains if no clients are associated.
825 * This configuration parameter specifies the nominal chain-mask that
826 * should be used when not operating with a reduced set of tx chains.
827 */
828 __le32 tx_chain_mask;
829
830 /*
831 * the nominal chain mask for receive
832 *
833 * The chain mask may be modified dynamically, e.g. for a client
834 * to use a reduced number of chains for receive if the traffic to
835 * the client is low enough that it doesn't require downlink MIMO
836 * or antenna diversity.
837 * This configuration parameter specifies the nominal chain-mask that
838 * should be used when not operating with a reduced set of rx chains.
839 */
840 __le32 rx_chain_mask;
841
842 /*
843 * what rx reorder timeout (ms) to use for the AC
844 *
845 * Each WMM access class (voice, video, best-effort, background) will
846 * have its own timeout value to dictate how long to wait for missing
847 * rx MPDUs to arrive before flushing subsequent MPDUs that have
848 * already been received.
849 * This parameter specifies the timeout in milliseconds for each
850 * class.
851 */
852 __le32 rx_timeout_pri_vi;
853 __le32 rx_timeout_pri_vo;
854 __le32 rx_timeout_pri_be;
855 __le32 rx_timeout_pri_bk;
856
857 /*
858 * what mode the rx should decap packets to
859 *
860 * MAC can decap to RAW (no decap), native wifi or Ethernet types
861 * THis setting also determines the default TX behavior, however TX
862 * behavior can be modified on a per VAP basis during VAP init
863 */
864 __le32 rx_decap_mode;
865
866 /* what is the maximum scan requests than can be queued */
867 __le32 scan_max_pending_reqs;
868
869 /* maximum VDEV that could use BMISS offload */
870 __le32 bmiss_offload_max_vdev;
871
872 /* maximum VDEV that could use offload roaming */
873 __le32 roam_offload_max_vdev;
874
875 /* maximum AP profiles that would push to offload roaming */
876 __le32 roam_offload_max_ap_profiles;
877
878 /*
879 * how many groups to use for mcast->ucast conversion
880 *
881 * The target's WAL maintains a table to hold information regarding
882 * which peers belong to a given multicast group, so that if
883 * multicast->unicast conversion is enabled, the target can convert
884 * multicast tx frames to a series of unicast tx frames, to each
885 * peer within the multicast group.
886 This num_mcast_groups configuration parameter tells the target how
887 * many multicast groups to provide storage for within its multicast
888 * group membership table.
889 */
890 __le32 num_mcast_groups;
891
892 /*
893 * size to alloc for the mcast membership table
894 *
895 * This num_mcast_table_elems configuration parameter tells the
896 * target how many peer elements it needs to provide storage for in
897 * its multicast group membership table.
898 * These multicast group membership table elements are shared by the
899 * multicast groups stored within the table.
900 */
901 __le32 num_mcast_table_elems;
902
903 /*
904 * whether/how to do multicast->unicast conversion
905 *
906 * This configuration parameter specifies whether the target should
907 * perform multicast --> unicast conversion on transmit, and if so,
908 * what to do if it finds no entries in its multicast group
909 * membership table for the multicast IP address in the tx frame.
910 * Configuration value:
911 * 0 -> Do not perform multicast to unicast conversion.
912 * 1 -> Convert multicast frames to unicast, if the IP multicast
913 * address from the tx frame is found in the multicast group
914 * membership table. If the IP multicast address is not found,
915 * drop the frame.
916 * 2 -> Convert multicast frames to unicast, if the IP multicast
917 * address from the tx frame is found in the multicast group
918 * membership table. If the IP multicast address is not found,
919 * transmit the frame as multicast.
920 */
921 __le32 mcast2ucast_mode;
922
923 /*
924 * how much memory to allocate for a tx PPDU dbg log
925 *
926 * This parameter controls how much memory the target will allocate
927 * to store a log of tx PPDU meta-information (how large the PPDU
928 * was, when it was sent, whether it was successful, etc.)
929 */
930 __le32 tx_dbg_log_size;
931
932 /* how many AST entries to be allocated for WDS */
933 __le32 num_wds_entries;
934
935 /*
936 * MAC DMA burst size, e.g., For target PCI limit can be
937 * 0 -default, 1 256B
938 */
939 __le32 dma_burst_size;
940
941 /*
942 * Fixed delimiters to be inserted after every MPDU to
943 * account for interface latency to avoid underrun.
944 */
945 __le32 mac_aggr_delim;
946
947 /*
948 * determine whether target is responsible for detecting duplicate
949 * non-aggregate MPDU and timing out stale fragments.
950 *
951 * A-MPDU reordering is always performed on the target.
952 *
953 * 0: target responsible for frag timeout and dup checking
954 * 1: host responsible for frag timeout and dup checking
955 */
956 __le32 rx_skip_defrag_timeout_dup_detection_check;
957
958 /*
959 * Configuration for VoW :
960 * No of Video Nodes to be supported
961 * and Max no of descriptors for each Video link (node).
962 */
963 __le32 vow_config;
964
965 /* maximum VDEV that could use GTK offload */
966 __le32 gtk_offload_max_vdev;
967
968 /* Number of msdu descriptors target should use */
969 __le32 num_msdu_desc;
970
971 /*
972 * Max. number of Tx fragments per MSDU
973 * This parameter controls the max number of Tx fragments per MSDU.
974 * This is sent by the target as part of the WMI_SERVICE_READY event
975 * and is overriden by the OS shim as required.
976 */
977 __le32 max_frag_entries;
978} __packed;
979
980/* strucutre describing host memory chunk. */
981struct host_memory_chunk {
982 /* id of the request that is passed up in service ready */
983 __le32 req_id;
984 /* the physical address the memory chunk */
985 __le32 ptr;
986 /* size of the chunk */
987 __le32 size;
988} __packed;
989
990struct wmi_init_cmd {
991 struct wmi_resource_config resource_config;
992 __le32 num_host_mem_chunks;
993
994 /*
995 * variable number of host memory chunks.
996 * This should be the last element in the structure
997 */
998 struct host_memory_chunk host_mem_chunks[1];
999} __packed;
1000
1001/* TLV for channel list */
1002struct wmi_chan_list {
1003 __le32 tag; /* WMI_CHAN_LIST_TAG */
1004 __le32 num_chan;
1005 __le32 channel_list[0];
1006} __packed;
1007
1008struct wmi_bssid_list {
1009 __le32 tag; /* WMI_BSSID_LIST_TAG */
1010 __le32 num_bssid;
1011 struct wmi_mac_addr bssid_list[0];
1012} __packed;
1013
1014struct wmi_ie_data {
1015 __le32 tag; /* WMI_IE_TAG */
1016 __le32 ie_len;
1017 u8 ie_data[0];
1018} __packed;
1019
1020struct wmi_ssid {
1021 __le32 ssid_len;
1022 u8 ssid[32];
1023} __packed;
1024
1025struct wmi_ssid_list {
1026 __le32 tag; /* WMI_SSID_LIST_TAG */
1027 __le32 num_ssids;
1028 struct wmi_ssid ssids[0];
1029} __packed;
1030
1031/* prefix used by scan requestor ids on the host */
1032#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
1033
1034/* prefix used by scan request ids generated on the host */
1035/* host cycles through the lower 12 bits to generate ids */
1036#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
1037
1038#define WLAN_SCAN_PARAMS_MAX_SSID 16
1039#define WLAN_SCAN_PARAMS_MAX_BSSID 4
1040#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
1041
1042/* Scan priority numbers must be sequential, starting with 0 */
1043enum wmi_scan_priority {
1044 WMI_SCAN_PRIORITY_VERY_LOW = 0,
1045 WMI_SCAN_PRIORITY_LOW,
1046 WMI_SCAN_PRIORITY_MEDIUM,
1047 WMI_SCAN_PRIORITY_HIGH,
1048 WMI_SCAN_PRIORITY_VERY_HIGH,
1049 WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
1050};
1051
1052struct wmi_start_scan_cmd {
1053 /* Scan ID */
1054 __le32 scan_id;
1055 /* Scan requestor ID */
1056 __le32 scan_req_id;
1057 /* VDEV id(interface) that is requesting scan */
1058 __le32 vdev_id;
1059 /* Scan Priority, input to scan scheduler */
1060 __le32 scan_priority;
1061 /* Scan events subscription */
1062 __le32 notify_scan_events;
1063 /* dwell time in msec on active channels */
1064 __le32 dwell_time_active;
1065 /* dwell time in msec on passive channels */
1066 __le32 dwell_time_passive;
1067 /*
1068 * min time in msec on the BSS channel,only valid if atleast one
1069 * VDEV is active
1070 */
1071 __le32 min_rest_time;
1072 /*
1073 * max rest time in msec on the BSS channel,only valid if at least
1074 * one VDEV is active
1075 */
1076 /*
1077 * the scanner will rest on the bss channel at least min_rest_time
1078 * after min_rest_time the scanner will start checking for tx/rx
1079 * activity on all VDEVs. if there is no activity the scanner will
1080 * switch to off channel. if there is activity the scanner will let
1081 * the radio on the bss channel until max_rest_time expires.at
1082 * max_rest_time scanner will switch to off channel irrespective of
1083 * activity. activity is determined by the idle_time parameter.
1084 */
1085 __le32 max_rest_time;
1086 /*
1087 * time before sending next set of probe requests.
1088 * The scanner keeps repeating probe requests transmission with
1089 * period specified by repeat_probe_time.
1090 * The number of probe requests specified depends on the ssid_list
1091 * and bssid_list
1092 */
1093 __le32 repeat_probe_time;
1094 /* time in msec between 2 consequetive probe requests with in a set. */
1095 __le32 probe_spacing_time;
1096 /*
1097 * data inactivity time in msec on bss channel that will be used by
1098 * scanner for measuring the inactivity.
1099 */
1100 __le32 idle_time;
1101 /* maximum time in msec allowed for scan */
1102 __le32 max_scan_time;
1103 /*
1104 * delay in msec before sending first probe request after switching
1105 * to a channel
1106 */
1107 __le32 probe_delay;
1108 /* Scan control flags */
1109 __le32 scan_ctrl_flags;
1110
1111 /* Burst duration time in msecs */
1112 __le32 burst_duration;
1113 /*
1114 * TLV (tag length value ) paramerters follow the scan_cmd structure.
1115 * TLV can contain channel list, bssid list, ssid list and
1116 * ie. the TLV tags are defined above;
1117 */
1118} __packed;
1119
1120struct wmi_ssid_arg {
1121 int len;
1122 const u8 *ssid;
1123};
1124
1125struct wmi_bssid_arg {
1126 const u8 *bssid;
1127};
1128
1129struct wmi_start_scan_arg {
1130 u32 scan_id;
1131 u32 scan_req_id;
1132 u32 vdev_id;
1133 u32 scan_priority;
1134 u32 notify_scan_events;
1135 u32 dwell_time_active;
1136 u32 dwell_time_passive;
1137 u32 min_rest_time;
1138 u32 max_rest_time;
1139 u32 repeat_probe_time;
1140 u32 probe_spacing_time;
1141 u32 idle_time;
1142 u32 max_scan_time;
1143 u32 probe_delay;
1144 u32 scan_ctrl_flags;
1145
1146 u32 ie_len;
1147 u32 n_channels;
1148 u32 n_ssids;
1149 u32 n_bssids;
1150
1151 u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
1152 u32 channels[64];
1153 struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
1154 struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
1155};
1156
1157/* scan control flags */
1158
1159/* passively scan all channels including active channels */
1160#define WMI_SCAN_FLAG_PASSIVE 0x1
1161/* add wild card ssid probe request even though ssid_list is specified. */
1162#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
1163/* add cck rates to rates/xrate ie for the generated probe request */
1164#define WMI_SCAN_ADD_CCK_RATES 0x4
1165/* add ofdm rates to rates/xrate ie for the generated probe request */
1166#define WMI_SCAN_ADD_OFDM_RATES 0x8
1167/* To enable indication of Chan load and Noise floor to host */
1168#define WMI_SCAN_CHAN_STAT_EVENT 0x10
1169/* Filter Probe request frames */
1170#define WMI_SCAN_FILTER_PROBE_REQ 0x20
1171/* When set, DFS channels will not be scanned */
1172#define WMI_SCAN_BYPASS_DFS_CHN 0x40
1173/* Different FW scan engine may choose to bail out on errors.
1174 * Allow the driver to have influence over that. */
1175#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
1176
1177/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
1178#define WMI_SCAN_CLASS_MASK 0xFF000000
1179
1180
1181enum wmi_stop_scan_type {
1182 WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */
1183 WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */
1184 WMI_SCAN_STOP_ALL = 0x04000000, /* stop all scans */
1185};
1186
1187struct wmi_stop_scan_cmd {
1188 __le32 scan_req_id;
1189 __le32 scan_id;
1190 __le32 req_type;
1191 __le32 vdev_id;
1192} __packed;
1193
1194struct wmi_stop_scan_arg {
1195 u32 req_id;
1196 enum wmi_stop_scan_type req_type;
1197 union {
1198 u32 scan_id;
1199 u32 vdev_id;
1200 } u;
1201};
1202
1203struct wmi_scan_chan_list_cmd {
1204 __le32 num_scan_chans;
1205 struct wmi_channel chan_info[0];
1206} __packed;
1207
1208struct wmi_scan_chan_list_arg {
1209 u32 n_channels;
1210 struct wmi_channel_arg *channels;
1211};
1212
1213enum wmi_bss_filter {
1214 WMI_BSS_FILTER_NONE = 0, /* no beacons forwarded */
1215 WMI_BSS_FILTER_ALL, /* all beacons forwarded */
1216 WMI_BSS_FILTER_PROFILE, /* only beacons matching profile */
1217 WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */
1218 WMI_BSS_FILTER_CURRENT_BSS, /* only beacons matching current BSS */
1219 WMI_BSS_FILTER_ALL_BUT_BSS, /* all but beacons matching BSS */
1220 WMI_BSS_FILTER_PROBED_SSID, /* beacons matching probed ssid */
1221 WMI_BSS_FILTER_LAST_BSS, /* marker only */
1222};
1223
1224enum wmi_scan_event_type {
1225 WMI_SCAN_EVENT_STARTED = 0x1,
1226 WMI_SCAN_EVENT_COMPLETED = 0x2,
1227 WMI_SCAN_EVENT_BSS_CHANNEL = 0x4,
1228 WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
1229 WMI_SCAN_EVENT_DEQUEUED = 0x10,
1230 WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */
1231 WMI_SCAN_EVENT_START_FAILED = 0x40,
1232 WMI_SCAN_EVENT_RESTARTED = 0x80,
1233 WMI_SCAN_EVENT_MAX = 0x8000
1234};
1235
1236enum wmi_scan_completion_reason {
1237 WMI_SCAN_REASON_COMPLETED,
1238 WMI_SCAN_REASON_CANCELLED,
1239 WMI_SCAN_REASON_PREEMPTED,
1240 WMI_SCAN_REASON_TIMEDOUT,
1241 WMI_SCAN_REASON_MAX,
1242};
1243
1244struct wmi_scan_event {
1245 __le32 event_type; /* %WMI_SCAN_EVENT_ */
1246 __le32 reason; /* %WMI_SCAN_REASON_ */
1247 __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
1248 __le32 scan_req_id;
1249 __le32 scan_id;
1250 __le32 vdev_id;
1251} __packed;
1252
1253/*
1254 * This defines how much headroom is kept in the
1255 * receive frame between the descriptor and the
1256 * payload, in order for the WMI PHY error and
1257 * management handler to insert header contents.
1258 *
1259 * This is in bytes.
1260 */
1261#define WMI_MGMT_RX_HDR_HEADROOM 52
1262
1263/*
1264 * This event will be used for sending scan results
1265 * as well as rx mgmt frames to the host. The rx buffer
1266 * will be sent as part of this WMI event. It would be a
1267 * good idea to pass all the fields in the RX status
1268 * descriptor up to the host.
1269 */
1270struct wmi_mgmt_rx_hdr {
1271 __le32 channel;
1272 __le32 snr;
1273 __le32 rate;
1274 __le32 phy_mode;
1275 __le32 buf_len;
1276 __le32 status; /* %WMI_RX_STATUS_ */
1277} __packed;
1278
1279struct wmi_mgmt_rx_event {
1280 struct wmi_mgmt_rx_hdr hdr;
1281 u8 buf[0];
1282} __packed;
1283
1284#define WMI_RX_STATUS_OK 0x00
1285#define WMI_RX_STATUS_ERR_CRC 0x01
1286#define WMI_RX_STATUS_ERR_DECRYPT 0x08
1287#define WMI_RX_STATUS_ERR_MIC 0x10
1288#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
1289
1290struct wmi_single_phyerr_rx_hdr {
1291 /* TSF timestamp */
1292 __le32 tsf_timestamp;
1293
1294 /*
1295 * Current freq1, freq2
1296 *
1297 * [7:0]: freq1[lo]
1298 * [15:8] : freq1[hi]
1299 * [23:16]: freq2[lo]
1300 * [31:24]: freq2[hi]
1301 */
1302 __le16 freq1;
1303 __le16 freq2;
1304
1305 /*
1306 * Combined RSSI over all chains and channel width for this PHY error
1307 *
1308 * [7:0]: RSSI combined
1309 * [15:8]: Channel width (MHz)
1310 * [23:16]: PHY error code
1311 * [24:16]: reserved (future use)
1312 */
1313 u8 rssi_combined;
1314 u8 chan_width_mhz;
1315 u8 phy_err_code;
1316 u8 rsvd0;
1317
1318 /*
1319 * RSSI on chain 0 through 3
1320 *
1321 * This is formatted the same as the PPDU_START RX descriptor
1322 * field:
1323 *
1324 * [7:0]: pri20
1325 * [15:8]: sec20
1326 * [23:16]: sec40
1327 * [31:24]: sec80
1328 */
1329
1330 __le32 rssi_chain0;
1331 __le32 rssi_chain1;
1332 __le32 rssi_chain2;
1333 __le32 rssi_chain3;
1334
1335 /*
1336 * Last calibrated NF value for chain 0 through 3
1337 *
1338 * nf_list_1:
1339 *
1340 * + [15:0] - chain 0
1341 * + [31:16] - chain 1
1342 *
1343 * nf_list_2:
1344 *
1345 * + [15:0] - chain 2
1346 * + [31:16] - chain 3
1347 */
1348 __le32 nf_list_1;
1349 __le32 nf_list_2;
1350
1351
1352 /* Length of the frame */
1353 __le32 buf_len;
1354} __packed;
1355
1356struct wmi_single_phyerr_rx_event {
1357 /* Phy error event header */
1358 struct wmi_single_phyerr_rx_hdr hdr;
1359 /* frame buffer */
1360 u8 bufp[0];
1361} __packed;
1362
1363struct wmi_comb_phyerr_rx_hdr {
1364 /* Phy error phy error count */
1365 __le32 num_phyerr_events;
1366 __le32 tsf_l32;
1367 __le32 tsf_u32;
1368} __packed;
1369
1370struct wmi_comb_phyerr_rx_event {
1371 /* Phy error phy error count */
1372 struct wmi_comb_phyerr_rx_hdr hdr;
1373 /*
1374 * frame buffer - contains multiple payloads in the order:
1375 * header - payload, header - payload...
1376 * (The header is of type: wmi_single_phyerr_rx_hdr)
1377 */
1378 u8 bufp[0];
1379} __packed;
1380
1381struct wmi_mgmt_tx_hdr {
1382 __le32 vdev_id;
1383 struct wmi_mac_addr peer_macaddr;
1384 __le32 tx_rate;
1385 __le32 tx_power;
1386 __le32 buf_len;
1387} __packed;
1388
1389struct wmi_mgmt_tx_cmd {
1390 struct wmi_mgmt_tx_hdr hdr;
1391 u8 buf[0];
1392} __packed;
1393
1394struct wmi_echo_event {
1395 __le32 value;
1396} __packed;
1397
1398struct wmi_echo_cmd {
1399 __le32 value;
1400} __packed;
1401
1402
1403struct wmi_pdev_set_regdomain_cmd {
1404 __le32 reg_domain;
1405 __le32 reg_domain_2G;
1406 __le32 reg_domain_5G;
1407 __le32 conformance_test_limit_2G;
1408 __le32 conformance_test_limit_5G;
1409} __packed;
1410
1411/* Command to set/unset chip in quiet mode */
1412struct wmi_pdev_set_quiet_cmd {
1413 /* period in TUs */
1414 __le32 period;
1415
1416 /* duration in TUs */
1417 __le32 duration;
1418
1419 /* offset in TUs */
1420 __le32 next_start;
1421
1422 /* enable/disable */
1423 __le32 enabled;
1424} __packed;
1425
1426
1427/*
1428 * 802.11g protection mode.
1429 */
1430enum ath10k_protmode {
1431 ATH10K_PROT_NONE = 0, /* no protection */
1432 ATH10K_PROT_CTSONLY = 1, /* CTS to self */
1433 ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
1434};
1435
1436enum wmi_beacon_gen_mode {
1437 WMI_BEACON_STAGGERED_MODE = 0,
1438 WMI_BEACON_BURST_MODE = 1
1439};
1440
1441enum wmi_csa_event_ies_present_flag {
1442 WMI_CSA_IE_PRESENT = 0x00000001,
1443 WMI_XCSA_IE_PRESENT = 0x00000002,
1444 WMI_WBW_IE_PRESENT = 0x00000004,
1445 WMI_CSWARP_IE_PRESENT = 0x00000008,
1446};
1447
1448/* wmi CSA receive event from beacon frame */
1449struct wmi_csa_event {
1450 __le32 i_fc_dur;
1451 /* Bit 0-15: FC */
1452 /* Bit 16-31: DUR */
1453 struct wmi_mac_addr i_addr1;
1454 struct wmi_mac_addr i_addr2;
1455 __le32 csa_ie[2];
1456 __le32 xcsa_ie[2];
1457 __le32 wb_ie[2];
1458 __le32 cswarp_ie;
1459 __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */
1460} __packed;
1461
1462/* the definition of different PDEV parameters */
1463#define PDEV_DEFAULT_STATS_UPDATE_PERIOD 500
1464#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
1465#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
1466
1467enum wmi_pdev_param {
1468 /* TX chian mask */
1469 WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
1470 /* RX chian mask */
1471 WMI_PDEV_PARAM_RX_CHAIN_MASK,
1472 /* TX power limit for 2G Radio */
1473 WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
1474 /* TX power limit for 5G Radio */
1475 WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
1476 /* TX power scale */
1477 WMI_PDEV_PARAM_TXPOWER_SCALE,
1478 /* Beacon generation mode . 0: host, 1: target */
1479 WMI_PDEV_PARAM_BEACON_GEN_MODE,
1480 /* Beacon generation mode . 0: staggered 1: bursted */
1481 WMI_PDEV_PARAM_BEACON_TX_MODE,
1482 /*
1483 * Resource manager off chan mode .
1484 * 0: turn off off chan mode. 1: turn on offchan mode
1485 */
1486 WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1487 /*
1488 * Protection mode:
1489 * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
1490 */
1491 WMI_PDEV_PARAM_PROTECTION_MODE,
1492 /* Dynamic bandwidth 0: disable 1: enable */
1493 WMI_PDEV_PARAM_DYNAMIC_BW,
1494 /* Non aggregrate/ 11g sw retry threshold.0-disable */
1495 WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1496 /* aggregrate sw retry threshold. 0-disable*/
1497 WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
1498 /* Station kickout threshold (non of consecutive failures).0-disable */
1499 WMI_PDEV_PARAM_STA_KICKOUT_TH,
1500 /* Aggerate size scaling configuration per AC */
1501 WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1502 /* LTR enable */
1503 WMI_PDEV_PARAM_LTR_ENABLE,
1504 /* LTR latency for BE, in us */
1505 WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
1506 /* LTR latency for BK, in us */
1507 WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
1508 /* LTR latency for VI, in us */
1509 WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
1510 /* LTR latency for VO, in us */
1511 WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
1512 /* LTR AC latency timeout, in ms */
1513 WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1514 /* LTR platform latency override, in us */
1515 WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1516 /* LTR-RX override, in us */
1517 WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
1518 /* Tx activity timeout for LTR, in us */
1519 WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1520 /* L1SS state machine enable */
1521 WMI_PDEV_PARAM_L1SS_ENABLE,
1522 /* Deep sleep state machine enable */
1523 WMI_PDEV_PARAM_DSLEEP_ENABLE,
1524 /* RX buffering flush enable */
1525 WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1526 /* RX buffering matermark */
1527 WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
1528 /* RX buffering timeout enable */
1529 WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1530 /* RX buffering timeout value */
1531 WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1532 /* pdev level stats update period in ms */
1533 WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1534 /* vdev level stats update period in ms */
1535 WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1536 /* peer level stats update period in ms */
1537 WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1538 /* beacon filter status update period */
1539 WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1540 /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
1541 WMI_PDEV_PARAM_PMF_QOS,
1542 /* Access category on which ARP frames are sent */
1543 WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
1544 /* DCS configuration */
1545 WMI_PDEV_PARAM_DCS,
1546 /* Enable/Disable ANI on target */
1547 WMI_PDEV_PARAM_ANI_ENABLE,
1548 /* configure the ANI polling period */
1549 WMI_PDEV_PARAM_ANI_POLL_PERIOD,
1550 /* configure the ANI listening period */
1551 WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
1552 /* configure OFDM immunity level */
1553 WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
1554 /* configure CCK immunity level */
1555 WMI_PDEV_PARAM_ANI_CCK_LEVEL,
1556 /* Enable/Disable CDD for 1x1 STAs in rate control module */
1557 WMI_PDEV_PARAM_DYNTXCHAIN,
1558 /* Enable/Disable proxy STA */
1559 WMI_PDEV_PARAM_PROXY_STA,
1560 /* Enable/Disable low power state when all VDEVs are inactive/idle. */
1561 WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1562 /* Enable/Disable power gating sleep */
1563 WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1564};
1565
1566struct wmi_pdev_set_param_cmd {
1567 __le32 param_id;
1568 __le32 param_value;
1569} __packed;
1570
1571struct wmi_pdev_get_tpc_config_cmd {
1572 /* parameter */
1573 __le32 param;
1574} __packed;
1575
1576#define WMI_TPC_RATE_MAX 160
1577#define WMI_TPC_TX_N_CHAIN 4
1578
1579enum wmi_tpc_config_event_flag {
1580 WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1,
1581 WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2,
1582 WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4,
1583};
1584
1585struct wmi_pdev_tpc_config_event {
1586 __le32 reg_domain;
1587 __le32 chan_freq;
1588 __le32 phy_mode;
1589 __le32 twice_antenna_reduction;
1590 __le32 twice_max_rd_power;
1591 s32 twice_antenna_gain;
1592 __le32 power_limit;
1593 __le32 rate_max;
1594 __le32 num_tx_chain;
1595 __le32 ctl;
1596 __le32 flags;
1597 s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
1598 s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
1599 s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
1600 s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
1601 u8 rates_array[WMI_TPC_RATE_MAX];
1602} __packed;
1603
1604/* Transmit power scale factor. */
1605enum wmi_tp_scale {
1606 WMI_TP_SCALE_MAX = 0, /* no scaling (default) */
1607 WMI_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */
1608 WMI_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */
1609 WMI_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */
1610 WMI_TP_SCALE_MIN = 4, /* min, but still on */
1611 WMI_TP_SCALE_SIZE = 5, /* max num of enum */
1612};
1613
1614struct wmi_set_channel_cmd {
1615 /* channel (only frequency and mode info are used) */
1616 struct wmi_channel chan;
1617} __packed;
1618
1619struct wmi_pdev_chanlist_update_event {
1620 /* number of channels */
1621 __le32 num_chan;
1622 /* array of channels */
1623 struct wmi_channel channel_list[1];
1624} __packed;
1625
1626#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
1627
1628struct wmi_debug_mesg_event {
1629 /* message buffer, NULL terminated */
1630 char bufp[WMI_MAX_DEBUG_MESG];
1631} __packed;
1632
1633enum {
1634 /* P2P device */
1635 VDEV_SUBTYPE_P2PDEV = 0,
1636 /* P2P client */
1637 VDEV_SUBTYPE_P2PCLI,
1638 /* P2P GO */
1639 VDEV_SUBTYPE_P2PGO,
1640 /* BT3.0 HS */
1641 VDEV_SUBTYPE_BT,
1642};
1643
1644struct wmi_pdev_set_channel_cmd {
1645 /* idnore power , only use flags , mode and freq */
1646 struct wmi_channel chan;
1647} __packed;
1648
1649/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
1650#define WMI_DSCP_MAP_MAX (64)
1651struct wmi_pdev_set_dscp_tid_map_cmd {
1652 /* map indicating DSCP to TID conversion */
1653 __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX];
1654} __packed;
1655
1656enum mcast_bcast_rate_id {
1657 WMI_SET_MCAST_RATE,
1658 WMI_SET_BCAST_RATE
1659};
1660
1661struct mcast_bcast_rate {
1662 enum mcast_bcast_rate_id rate_id;
1663 __le32 rate;
1664} __packed;
1665
1666struct wmi_wmm_params {
1667 __le32 cwmin;
1668 __le32 cwmax;
1669 __le32 aifs;
1670 __le32 txop;
1671 __le32 acm;
1672 __le32 no_ack;
1673} __packed;
1674
1675struct wmi_pdev_set_wmm_params {
1676 struct wmi_wmm_params ac_be;
1677 struct wmi_wmm_params ac_bk;
1678 struct wmi_wmm_params ac_vi;
1679 struct wmi_wmm_params ac_vo;
1680} __packed;
1681
1682struct wmi_wmm_params_arg {
1683 u32 cwmin;
1684 u32 cwmax;
1685 u32 aifs;
1686 u32 txop;
1687 u32 acm;
1688 u32 no_ack;
1689};
1690
1691struct wmi_pdev_set_wmm_params_arg {
1692 struct wmi_wmm_params_arg ac_be;
1693 struct wmi_wmm_params_arg ac_bk;
1694 struct wmi_wmm_params_arg ac_vi;
1695 struct wmi_wmm_params_arg ac_vo;
1696};
1697
1698struct wal_dbg_tx_stats {
1699 /* Num HTT cookies queued to dispatch list */
1700 __le32 comp_queued;
1701
1702 /* Num HTT cookies dispatched */
1703 __le32 comp_delivered;
1704
1705 /* Num MSDU queued to WAL */
1706 __le32 msdu_enqued;
1707
1708 /* Num MPDU queue to WAL */
1709 __le32 mpdu_enqued;
1710
1711 /* Num MSDUs dropped by WMM limit */
1712 __le32 wmm_drop;
1713
1714 /* Num Local frames queued */
1715 __le32 local_enqued;
1716
1717 /* Num Local frames done */
1718 __le32 local_freed;
1719
1720 /* Num queued to HW */
1721 __le32 hw_queued;
1722
1723 /* Num PPDU reaped from HW */
1724 __le32 hw_reaped;
1725
1726 /* Num underruns */
1727 __le32 underrun;
1728
1729 /* Num PPDUs cleaned up in TX abort */
1730 __le32 tx_abort;
1731
1732 /* Num MPDUs requed by SW */
1733 __le32 mpdus_requed;
1734
1735 /* excessive retries */
1736 __le32 tx_ko;
1737
1738 /* data hw rate code */
1739 __le32 data_rc;
1740
1741 /* Scheduler self triggers */
1742 __le32 self_triggers;
1743
1744 /* frames dropped due to excessive sw retries */
1745 __le32 sw_retry_failure;
1746
1747 /* illegal rate phy errors */
1748 __le32 illgl_rate_phy_err;
1749
1750 /* wal pdev continous xretry */
1751 __le32 pdev_cont_xretry;
1752
1753 /* wal pdev continous xretry */
1754 __le32 pdev_tx_timeout;
1755
1756 /* wal pdev resets */
1757 __le32 pdev_resets;
1758
1759 __le32 phy_underrun;
1760
1761 /* MPDU is more than txop limit */
1762 __le32 txop_ovf;
1763} __packed;
1764
1765struct wal_dbg_rx_stats {
1766 /* Cnts any change in ring routing mid-ppdu */
1767 __le32 mid_ppdu_route_change;
1768
1769 /* Total number of statuses processed */
1770 __le32 status_rcvd;
1771
1772 /* Extra frags on rings 0-3 */
1773 __le32 r0_frags;
1774 __le32 r1_frags;
1775 __le32 r2_frags;
1776 __le32 r3_frags;
1777
1778 /* MSDUs / MPDUs delivered to HTT */
1779 __le32 htt_msdus;
1780 __le32 htt_mpdus;
1781
1782 /* MSDUs / MPDUs delivered to local stack */
1783 __le32 loc_msdus;
1784 __le32 loc_mpdus;
1785
1786 /* AMSDUs that have more MSDUs than the status ring size */
1787 __le32 oversize_amsdu;
1788
1789 /* Number of PHY errors */
1790 __le32 phy_errs;
1791
1792 /* Number of PHY errors drops */
1793 __le32 phy_err_drop;
1794
1795 /* Number of mpdu errors - FCS, MIC, ENC etc. */
1796 __le32 mpdu_errs;
1797} __packed;
1798
1799struct wal_dbg_peer_stats {
1800 /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
1801 __le32 dummy;
1802} __packed;
1803
1804struct wal_dbg_stats {
1805 struct wal_dbg_tx_stats tx;
1806 struct wal_dbg_rx_stats rx;
1807 struct wal_dbg_peer_stats peer;
1808} __packed;
1809
1810enum wmi_stats_id {
1811 WMI_REQUEST_PEER_STAT = 0x01,
1812 WMI_REQUEST_AP_STAT = 0x02
1813};
1814
1815struct wmi_request_stats_cmd {
1816 __le32 stats_id;
1817
1818 /*
1819 * Space to add parameters like
1820 * peer mac addr
1821 */
1822} __packed;
1823
1824/* Suspend option */
1825enum {
1826 /* suspend */
1827 WMI_PDEV_SUSPEND,
1828
1829 /* suspend and disable all interrupts */
1830 WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
1831};
1832
1833struct wmi_pdev_suspend_cmd {
1834 /* suspend option sent to target */
1835 __le32 suspend_opt;
1836} __packed;
1837
1838struct wmi_stats_event {
1839 __le32 stats_id; /* %WMI_REQUEST_ */
1840 /*
1841 * number of pdev stats event structures
1842 * (wmi_pdev_stats) 0 or 1
1843 */
1844 __le32 num_pdev_stats;
1845 /*
1846 * number of vdev stats event structures
1847 * (wmi_vdev_stats) 0 or max vdevs
1848 */
1849 __le32 num_vdev_stats;
1850 /*
1851 * number of peer stats event structures
1852 * (wmi_peer_stats) 0 or max peers
1853 */
1854 __le32 num_peer_stats;
1855 __le32 num_bcnflt_stats;
1856 /*
1857 * followed by
1858 * num_pdev_stats * size of(struct wmi_pdev_stats)
1859 * num_vdev_stats * size of(struct wmi_vdev_stats)
1860 * num_peer_stats * size of(struct wmi_peer_stats)
1861 *
1862 * By having a zero sized array, the pointer to data area
1863 * becomes available without increasing the struct size
1864 */
1865 u8 data[0];
1866} __packed;
1867
1868/*
1869 * PDEV statistics
1870 * TODO: add all PDEV stats here
1871 */
1872struct wmi_pdev_stats {
1873 __le32 chan_nf; /* Channel noise floor */
1874 __le32 tx_frame_count; /* TX frame count */
1875 __le32 rx_frame_count; /* RX frame count */
1876 __le32 rx_clear_count; /* rx clear count */
1877 __le32 cycle_count; /* cycle count */
1878 __le32 phy_err_count; /* Phy error count */
1879 __le32 chan_tx_pwr; /* channel tx power */
1880 struct wal_dbg_stats wal; /* WAL dbg stats */
1881} __packed;
1882
1883/*
1884 * VDEV statistics
1885 * TODO: add all VDEV stats here
1886 */
1887struct wmi_vdev_stats {
1888 __le32 vdev_id;
1889} __packed;
1890
1891/*
1892 * peer statistics.
1893 * TODO: add more stats
1894 */
1895struct wmi_peer_stats {
1896 struct wmi_mac_addr peer_macaddr;
1897 __le32 peer_rssi;
1898 __le32 peer_tx_rate;
1899} __packed;
1900
1901struct wmi_vdev_create_cmd {
1902 __le32 vdev_id;
1903 __le32 vdev_type;
1904 __le32 vdev_subtype;
1905 struct wmi_mac_addr vdev_macaddr;
1906} __packed;
1907
1908enum wmi_vdev_type {
1909 WMI_VDEV_TYPE_AP = 1,
1910 WMI_VDEV_TYPE_STA = 2,
1911 WMI_VDEV_TYPE_IBSS = 3,
1912 WMI_VDEV_TYPE_MONITOR = 4,
1913};
1914
1915enum wmi_vdev_subtype {
1916 WMI_VDEV_SUBTYPE_NONE = 0,
1917 WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
1918 WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
1919 WMI_VDEV_SUBTYPE_P2P_GO = 3,
1920};
1921
1922/* values for vdev_subtype */
1923
1924/* values for vdev_start_request flags */
1925/*
1926 * Indicates that AP VDEV uses hidden ssid. only valid for
1927 * AP/GO */
1928#define WMI_VDEV_START_HIDDEN_SSID (1<<0)
1929/*
1930 * Indicates if robust management frame/management frame
1931 * protection is enabled. For GO/AP vdevs, it indicates that
1932 * it may support station/client associations with RMF enabled.
1933 * For STA/client vdevs, it indicates that sta will
1934 * associate with AP with RMF enabled. */
1935#define WMI_VDEV_START_PMF_ENABLED (1<<1)
1936
1937struct wmi_p2p_noa_descriptor {
1938 __le32 type_count; /* 255: continuous schedule, 0: reserved */
1939 __le32 duration; /* Absent period duration in micro seconds */
1940 __le32 interval; /* Absent period interval in micro seconds */
1941 __le32 start_time; /* 32 bit tsf time when in starts */
1942} __packed;
1943
1944struct wmi_vdev_start_request_cmd {
1945 /* WMI channel */
1946 struct wmi_channel chan;
1947 /* unique id identifying the VDEV, generated by the caller */
1948 __le32 vdev_id;
1949 /* requestor id identifying the caller module */
1950 __le32 requestor_id;
1951 /* beacon interval from received beacon */
1952 __le32 beacon_interval;
1953 /* DTIM Period from the received beacon */
1954 __le32 dtim_period;
1955 /* Flags */
1956 __le32 flags;
1957 /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
1958 struct wmi_ssid ssid;
1959 /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
1960 __le32 bcn_tx_rate;
1961 /* beacon/probe reponse xmit power. Applicable for SoftAP. */
1962 __le32 bcn_tx_power;
1963 /* number of p2p NOA descriptor(s) from scan entry */
1964 __le32 num_noa_descriptors;
1965 /*
1966 * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID.
1967 * During CAC, Our HW shouldn't ack ditected frames
1968 */
1969 __le32 disable_hw_ack;
1970 /* actual p2p NOA descriptor from scan entry */
1971 struct wmi_p2p_noa_descriptor noa_descriptors[2];
1972} __packed;
1973
1974struct wmi_vdev_restart_request_cmd {
1975 struct wmi_vdev_start_request_cmd vdev_start_request_cmd;
1976} __packed;
1977
1978struct wmi_vdev_start_request_arg {
1979 u32 vdev_id;
1980 struct wmi_channel_arg channel;
1981 u32 bcn_intval;
1982 u32 dtim_period;
1983 u8 *ssid;
1984 u32 ssid_len;
1985 u32 bcn_tx_rate;
1986 u32 bcn_tx_power;
1987 bool disable_hw_ack;
1988 bool hidden_ssid;
1989 bool pmf_enabled;
1990};
1991
1992struct wmi_vdev_delete_cmd {
1993 /* unique id identifying the VDEV, generated by the caller */
1994 __le32 vdev_id;
1995} __packed;
1996
1997struct wmi_vdev_up_cmd {
1998 __le32 vdev_id;
1999 __le32 vdev_assoc_id;
2000 struct wmi_mac_addr vdev_bssid;
2001} __packed;
2002
2003struct wmi_vdev_stop_cmd {
2004 __le32 vdev_id;
2005} __packed;
2006
2007struct wmi_vdev_down_cmd {
2008 __le32 vdev_id;
2009} __packed;
2010
2011struct wmi_vdev_standby_response_cmd {
2012 /* unique id identifying the VDEV, generated by the caller */
2013 __le32 vdev_id;
2014} __packed;
2015
2016struct wmi_vdev_resume_response_cmd {
2017 /* unique id identifying the VDEV, generated by the caller */
2018 __le32 vdev_id;
2019} __packed;
2020
2021struct wmi_vdev_set_param_cmd {
2022 __le32 vdev_id;
2023 __le32 param_id;
2024 __le32 param_value;
2025} __packed;
2026
2027#define WMI_MAX_KEY_INDEX 3
2028#define WMI_MAX_KEY_LEN 32
2029
2030#define WMI_KEY_PAIRWISE 0x00
2031#define WMI_KEY_GROUP 0x01
2032#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */
2033
2034struct wmi_key_seq_counter {
2035 __le32 key_seq_counter_l;
2036 __le32 key_seq_counter_h;
2037} __packed;
2038
2039#define WMI_CIPHER_NONE 0x0 /* clear key */
2040#define WMI_CIPHER_WEP 0x1
2041#define WMI_CIPHER_TKIP 0x2
2042#define WMI_CIPHER_AES_OCB 0x3
2043#define WMI_CIPHER_AES_CCM 0x4
2044#define WMI_CIPHER_WAPI 0x5
2045#define WMI_CIPHER_CKIP 0x6
2046#define WMI_CIPHER_AES_CMAC 0x7
2047
2048struct wmi_vdev_install_key_cmd {
2049 __le32 vdev_id;
2050 struct wmi_mac_addr peer_macaddr;
2051 __le32 key_idx;
2052 __le32 key_flags;
2053 __le32 key_cipher; /* %WMI_CIPHER_ */
2054 struct wmi_key_seq_counter key_rsc_counter;
2055 struct wmi_key_seq_counter key_global_rsc_counter;
2056 struct wmi_key_seq_counter key_tsc_counter;
2057 u8 wpi_key_rsc_counter[16];
2058 u8 wpi_key_tsc_counter[16];
2059 __le32 key_len;
2060 __le32 key_txmic_len;
2061 __le32 key_rxmic_len;
2062
2063 /* contains key followed by tx mic followed by rx mic */
2064 u8 key_data[0];
2065} __packed;
2066
2067struct wmi_vdev_install_key_arg {
2068 u32 vdev_id;
2069 const u8 *macaddr;
2070 u32 key_idx;
2071 u32 key_flags;
2072 u32 key_cipher;
2073 u32 key_len;
2074 u32 key_txmic_len;
2075 u32 key_rxmic_len;
2076 const void *key_data;
2077};
2078
2079/* Preamble types to be used with VDEV fixed rate configuration */
2080enum wmi_rate_preamble {
2081 WMI_RATE_PREAMBLE_OFDM,
2082 WMI_RATE_PREAMBLE_CCK,
2083 WMI_RATE_PREAMBLE_HT,
2084 WMI_RATE_PREAMBLE_VHT,
2085};
2086
2087/* Value to disable fixed rate setting */
2088#define WMI_FIXED_RATE_NONE (0xff)
2089
2090/* the definition of different VDEV parameters */
2091enum wmi_vdev_param {
2092 /* RTS Threshold */
2093 WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
2094 /* Fragmentation threshold */
2095 WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
2096 /* beacon interval in TUs */
2097 WMI_VDEV_PARAM_BEACON_INTERVAL,
2098 /* Listen interval in TUs */
2099 WMI_VDEV_PARAM_LISTEN_INTERVAL,
2100 /* muticast rate in Mbps */
2101 WMI_VDEV_PARAM_MULTICAST_RATE,
2102 /* management frame rate in Mbps */
2103 WMI_VDEV_PARAM_MGMT_TX_RATE,
2104 /* slot time (long vs short) */
2105 WMI_VDEV_PARAM_SLOT_TIME,
2106 /* preamble (long vs short) */
2107 WMI_VDEV_PARAM_PREAMBLE,
2108 /* SWBA time (time before tbtt in msec) */
2109 WMI_VDEV_PARAM_SWBA_TIME,
2110 /* time period for updating VDEV stats */
2111 WMI_VDEV_STATS_UPDATE_PERIOD,
2112 /* age out time in msec for frames queued for station in power save */
2113 WMI_VDEV_PWRSAVE_AGEOUT_TIME,
2114 /*
2115 * Host SWBA interval (time in msec before tbtt for SWBA event
2116 * generation).
2117 */
2118 WMI_VDEV_HOST_SWBA_INTERVAL,
2119 /* DTIM period (specified in units of num beacon intervals) */
2120 WMI_VDEV_PARAM_DTIM_PERIOD,
2121 /*
2122 * scheduler air time limit for this VDEV. used by off chan
2123 * scheduler.
2124 */
2125 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
2126 /* enable/dsiable WDS for this VDEV */
2127 WMI_VDEV_PARAM_WDS,
2128 /* ATIM Window */
2129 WMI_VDEV_PARAM_ATIM_WINDOW,
2130 /* BMISS max */
2131 WMI_VDEV_PARAM_BMISS_COUNT_MAX,
2132 /* BMISS first time */
2133 WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
2134 /* BMISS final time */
2135 WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
2136 /* WMM enables/disabled */
2137 WMI_VDEV_PARAM_FEATURE_WMM,
2138 /* Channel width */
2139 WMI_VDEV_PARAM_CHWIDTH,
2140 /* Channel Offset */
2141 WMI_VDEV_PARAM_CHEXTOFFSET,
2142 /* Disable HT Protection */
2143 WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
2144 /* Quick STA Kickout */
2145 WMI_VDEV_PARAM_STA_QUICKKICKOUT,
2146 /* Rate to be used with Management frames */
2147 WMI_VDEV_PARAM_MGMT_RATE,
2148 /* Protection Mode */
2149 WMI_VDEV_PARAM_PROTECTION_MODE,
2150 /* Fixed rate setting */
2151 WMI_VDEV_PARAM_FIXED_RATE,
2152 /* Short GI Enable/Disable */
2153 WMI_VDEV_PARAM_SGI,
2154 /* Enable LDPC */
2155 WMI_VDEV_PARAM_LDPC,
2156 /* Enable Tx STBC */
2157 WMI_VDEV_PARAM_TX_STBC,
2158 /* Enable Rx STBC */
2159 WMI_VDEV_PARAM_RX_STBC,
2160 /* Intra BSS forwarding */
2161 WMI_VDEV_PARAM_INTRA_BSS_FWD,
2162 /* Setting Default xmit key for Vdev */
2163 WMI_VDEV_PARAM_DEF_KEYID,
2164 /* NSS width */
2165 WMI_VDEV_PARAM_NSS,
2166 /* Set the custom rate for the broadcast data frames */
2167 WMI_VDEV_PARAM_BCAST_DATA_RATE,
2168 /* Set the custom rate (rate-code) for multicast data frames */
2169 WMI_VDEV_PARAM_MCAST_DATA_RATE,
2170 /* Tx multicast packet indicate Enable/Disable */
2171 WMI_VDEV_PARAM_MCAST_INDICATE,
2172 /* Tx DHCP packet indicate Enable/Disable */
2173 WMI_VDEV_PARAM_DHCP_INDICATE,
2174 /* Enable host inspection of Tx unicast packet to unknown destination */
2175 WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
2176
2177 /* The minimum amount of time AP begins to consider STA inactive */
2178 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
2179
2180 /*
2181 * An associated STA is considered inactive when there is no recent
2182 * TX/RX activity and no downlink frames are buffered for it. Once a
2183 * STA exceeds the maximum idle inactive time, the AP will send an
2184 * 802.11 data-null as a keep alive to verify the STA is still
2185 * associated. If the STA does ACK the data-null, or if the data-null
2186 * is buffered and the STA does not retrieve it, the STA will be
2187 * considered unresponsive
2188 * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
2189 */
2190 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
2191
2192 /*
2193 * An associated STA is considered unresponsive if there is no recent
2194 * TX/RX activity and downlink frames are buffered for it. Once a STA
2195 * exceeds the maximum unresponsive time, the AP will send a
2196 * WMI_STA_KICKOUT event to the host so the STA can be deleted. */
2197 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
2198
2199 /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
2200 WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
2201 /* Enable/Disable RTS-CTS */
2202 WMI_VDEV_PARAM_ENABLE_RTSCTS,
2203 /* Enable TXBFee/er */
2204 WMI_VDEV_PARAM_TXBF,
2205
2206 /* Set packet power save */
2207 WMI_VDEV_PARAM_PACKET_POWERSAVE,
2208
2209 /*
2210 * Drops un-encrypted packets if eceived in an encrypted connection
2211 * otherwise forwards to host.
2212 */
2213 WMI_VDEV_PARAM_DROP_UNENCRY,
2214
2215 /*
2216 * Set the encapsulation type for frames.
2217 */
2218 WMI_VDEV_PARAM_TX_ENCAP_TYPE,
2219};
2220
2221/* slot time long */
2222#define WMI_VDEV_SLOT_TIME_LONG 0x1
2223/* slot time short */
2224#define WMI_VDEV_SLOT_TIME_SHORT 0x2
2225/* preablbe long */
2226#define WMI_VDEV_PREAMBLE_LONG 0x1
2227/* preablbe short */
2228#define WMI_VDEV_PREAMBLE_SHORT 0x2
2229
2230enum wmi_start_event_param {
2231 WMI_VDEV_RESP_START_EVENT = 0,
2232 WMI_VDEV_RESP_RESTART_EVENT,
2233};
2234
2235struct wmi_vdev_start_response_event {
2236 __le32 vdev_id;
2237 __le32 req_id;
2238 __le32 resp_type; /* %WMI_VDEV_RESP_ */
2239 __le32 status;
2240} __packed;
2241
2242struct wmi_vdev_standby_req_event {
2243 /* unique id identifying the VDEV, generated by the caller */
2244 __le32 vdev_id;
2245} __packed;
2246
2247struct wmi_vdev_resume_req_event {
2248 /* unique id identifying the VDEV, generated by the caller */
2249 __le32 vdev_id;
2250} __packed;
2251
2252struct wmi_vdev_stopped_event {
2253 /* unique id identifying the VDEV, generated by the caller */
2254 __le32 vdev_id;
2255} __packed;
2256
2257/*
2258 * common structure used for simple events
2259 * (stopped, resume_req, standby response)
2260 */
2261struct wmi_vdev_simple_event {
2262 /* unique id identifying the VDEV, generated by the caller */
2263 __le32 vdev_id;
2264} __packed;
2265
2266/* VDEV start response status codes */
2267/* VDEV succesfully started */
2268#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
2269
2270/* requested VDEV not found */
2271#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1
2272
2273/* unsupported VDEV combination */
2274#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2
2275
2276/* Beacon processing related command and event structures */
2277struct wmi_bcn_tx_hdr {
2278 __le32 vdev_id;
2279 __le32 tx_rate;
2280 __le32 tx_power;
2281 __le32 bcn_len;
2282} __packed;
2283
2284struct wmi_bcn_tx_cmd {
2285 struct wmi_bcn_tx_hdr hdr;
2286 u8 *bcn[0];
2287} __packed;
2288
2289struct wmi_bcn_tx_arg {
2290 u32 vdev_id;
2291 u32 tx_rate;
2292 u32 tx_power;
2293 u32 bcn_len;
2294 const void *bcn;
2295};
2296
2297/* Beacon filter */
2298#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */
2299#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */
2300#define WMI_BCN_FILTER_RSSI 2 /* Pass Beacons RSSI >= RSSI threshold */
2301#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */
2302#define WMI_BCN_FILTER_SSID 4 /* Pass Beacons with matching SSID */
2303
2304struct wmi_bcn_filter_rx_cmd {
2305 /* Filter ID */
2306 __le32 bcn_filter_id;
2307 /* Filter type - wmi_bcn_filter */
2308 __le32 bcn_filter;
2309 /* Buffer len */
2310 __le32 bcn_filter_len;
2311 /* Filter info (threshold, BSSID, RSSI) */
2312 u8 *bcn_filter_buf;
2313} __packed;
2314
2315/* Capabilities and IEs to be passed to firmware */
2316struct wmi_bcn_prb_info {
2317 /* Capabilities */
2318 __le32 caps;
2319 /* ERP info */
2320 __le32 erp;
2321 /* Advanced capabilities */
2322 /* HT capabilities */
2323 /* HT Info */
2324 /* ibss_dfs */
2325 /* wpa Info */
2326 /* rsn Info */
2327 /* rrm info */
2328 /* ath_ext */
2329 /* app IE */
2330} __packed;
2331
2332struct wmi_bcn_tmpl_cmd {
2333 /* unique id identifying the VDEV, generated by the caller */
2334 __le32 vdev_id;
2335 /* TIM IE offset from the beginning of the template. */
2336 __le32 tim_ie_offset;
2337 /* beacon probe capabilities and IEs */
2338 struct wmi_bcn_prb_info bcn_prb_info;
2339 /* beacon buffer length */
2340 __le32 buf_len;
2341 /* variable length data */
2342 u8 data[1];
2343} __packed;
2344
2345struct wmi_prb_tmpl_cmd {
2346 /* unique id identifying the VDEV, generated by the caller */
2347 __le32 vdev_id;
2348 /* beacon probe capabilities and IEs */
2349 struct wmi_bcn_prb_info bcn_prb_info;
2350 /* beacon buffer length */
2351 __le32 buf_len;
2352 /* Variable length data */
2353 u8 data[1];
2354} __packed;
2355
2356enum wmi_sta_ps_mode {
2357 /* enable power save for the given STA VDEV */
2358 WMI_STA_PS_MODE_DISABLED = 0,
2359 /* disable power save for a given STA VDEV */
2360 WMI_STA_PS_MODE_ENABLED = 1,
2361};
2362
2363struct wmi_sta_powersave_mode_cmd {
2364 /* unique id identifying the VDEV, generated by the caller */
2365 __le32 vdev_id;
2366
2367 /*
2368 * Power save mode
2369 * (see enum wmi_sta_ps_mode)
2370 */
2371 __le32 sta_ps_mode;
2372} __packed;
2373
2374enum wmi_csa_offload_en {
2375 WMI_CSA_OFFLOAD_DISABLE = 0,
2376 WMI_CSA_OFFLOAD_ENABLE = 1,
2377};
2378
2379struct wmi_csa_offload_enable_cmd {
2380 __le32 vdev_id;
2381 __le32 csa_offload_enable;
2382} __packed;
2383
2384struct wmi_csa_offload_chanswitch_cmd {
2385 __le32 vdev_id;
2386 struct wmi_channel chan;
2387} __packed;
2388
2389/*
2390 * This parameter controls the policy for retrieving frames from AP while the
2391 * STA is in sleep state.
2392 *
2393 * Only takes affect if the sta_ps_mode is enabled
2394 */
2395enum wmi_sta_ps_param_rx_wake_policy {
2396 /*
2397 * Wake up when ever there is an RX activity on the VDEV. In this mode
2398 * the Power save SM(state machine) will come out of sleep by either
2399 * sending null frame (or) a data frame (with PS==0) in response to TIM
2400 * bit set in the received beacon frame from AP.
2401 */
2402 WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
2403
2404 /*
2405 * Here the power save state machine will not wakeup in response to TIM
2406 * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD
2407 * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all
2408 * access categories are delivery-enabled, the station will send a
2409 * UAPSD trigger frame, otherwise it will send a PS-Poll.
2410 */
2411 WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
2412};
2413
2414/*
2415 * Number of tx frames/beacon that cause the power save SM to wake up.
2416 *
2417 * Value 1 causes the SM to wake up for every TX. Value 0 has a special
2418 * meaning, It will cause the SM to never wake up. This is useful if you want
2419 * to keep the system to sleep all the time for some kind of test mode . host
2420 * can change this parameter any time. It will affect at the next tx frame.
2421 */
2422enum wmi_sta_ps_param_tx_wake_threshold {
2423 WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
2424 WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
2425
2426 /*
2427 * Values greater than one indicate that many TX attempts per beacon
2428 * interval before the STA will wake up
2429 */
2430};
2431
2432/*
2433 * The maximum number of PS-Poll frames the FW will send in response to
2434 * traffic advertised in TIM before waking up (by sending a null frame with PS
2435 * = 0). Value 0 has a special meaning: there is no maximum count and the FW
2436 * will send as many PS-Poll as are necessary to retrieve buffered BU. This
2437 * parameter is used when the RX wake policy is
2438 * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
2439 * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
2440 */
2441enum wmi_sta_ps_param_pspoll_count {
2442 WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
2443 /*
2444 * Values greater than 0 indicate the maximum numer of PS-Poll frames
2445 * FW will send before waking up.
2446 */
2447};
2448
2449/*
2450 * This will include the delivery and trigger enabled state for every AC.
2451 * This is the negotiated state with AP. The host MLME needs to set this based
2452 * on AP capability and the state Set in the association request by the
2453 * station MLME.Lower 8 bits of the value specify the UAPSD configuration.
2454 */
2455#define WMI_UAPSD_AC_TYPE_DELI 0
2456#define WMI_UAPSD_AC_TYPE_TRIG 1
2457
2458#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
2459 ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
2460
2461enum wmi_sta_ps_param_uapsd {
2462 WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
2463 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
2464 WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
2465 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
2466 WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
2467 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
2468 WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
2469 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
2470};
2471
2472enum wmi_sta_powersave_param {
2473 /*
2474 * Controls how frames are retrievd from AP while STA is sleeping
2475 *
2476 * (see enum wmi_sta_ps_param_rx_wake_policy)
2477 */
2478 WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
2479
2480 /*
2481 * The STA will go active after this many TX
2482 *
2483 * (see enum wmi_sta_ps_param_tx_wake_threshold)
2484 */
2485 WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
2486
2487 /*
2488 * Number of PS-Poll to send before STA wakes up
2489 *
2490 * (see enum wmi_sta_ps_param_pspoll_count)
2491 *
2492 */
2493 WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
2494
2495 /*
2496 * TX/RX inactivity time in msec before going to sleep.
2497 *
2498 * The power save SM will monitor tx/rx activity on the VDEV, if no
2499 * activity for the specified msec of the parameter the Power save
2500 * SM will go to sleep.
2501 */
2502 WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
2503
2504 /*
2505 * Set uapsd configuration.
2506 *
2507 * (see enum wmi_sta_ps_param_uapsd)
2508 */
2509 WMI_STA_PS_PARAM_UAPSD = 4,
2510};
2511
2512struct wmi_sta_powersave_param_cmd {
2513 __le32 vdev_id;
2514 __le32 param_id; /* %WMI_STA_PS_PARAM_ */
2515 __le32 param_value;
2516} __packed;
2517
2518/* No MIMO power save */
2519#define WMI_STA_MIMO_PS_MODE_DISABLE
2520/* mimo powersave mode static*/
2521#define WMI_STA_MIMO_PS_MODE_STATIC
2522/* mimo powersave mode dynamic */
2523#define WMI_STA_MIMO_PS_MODE_DYNAMIC
2524
2525struct wmi_sta_mimo_ps_mode_cmd {
2526 /* unique id identifying the VDEV, generated by the caller */
2527 __le32 vdev_id;
2528 /* mimo powersave mode as defined above */
2529 __le32 mimo_pwrsave_mode;
2530} __packed;
2531
2532/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
2533enum wmi_ap_ps_param_uapsd {
2534 WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
2535 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
2536 WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
2537 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
2538 WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
2539 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
2540 WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
2541 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
2542};
2543
2544/* U-APSD maximum service period of peer station */
2545enum wmi_ap_ps_peer_param_max_sp {
2546 WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
2547 WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
2548 WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
2549 WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
2550 MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
2551};
2552
2553/*
2554 * AP power save parameter
2555 * Set a power save specific parameter for a peer station
2556 */
2557enum wmi_ap_ps_peer_param {
2558 /* Set uapsd configuration for a given peer.
2559 *
2560 * Include the delivery and trigger enabled state for every AC.
2561 * The host MLME needs to set this based on AP capability and stations
2562 * request Set in the association request received from the station.
2563 *
2564 * Lower 8 bits of the value specify the UAPSD configuration.
2565 *
2566 * (see enum wmi_ap_ps_param_uapsd)
2567 * The default value is 0.
2568 */
2569 WMI_AP_PS_PEER_PARAM_UAPSD = 0,
2570
2571 /*
2572 * Set the service period for a UAPSD capable station
2573 *
2574 * The service period from wme ie in the (re)assoc request frame.
2575 *
2576 * (see enum wmi_ap_ps_peer_param_max_sp)
2577 */
2578 WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
2579
2580 /* Time in seconds for aging out buffered frames for STA in PS */
2581 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
2582};
2583
2584struct wmi_ap_ps_peer_cmd {
2585 /* unique id identifying the VDEV, generated by the caller */
2586 __le32 vdev_id;
2587
2588 /* peer MAC address */
2589 struct wmi_mac_addr peer_macaddr;
2590
2591 /* AP powersave param (see enum wmi_ap_ps_peer_param) */
2592 __le32 param_id;
2593
2594 /* AP powersave param value */
2595 __le32 param_value;
2596} __packed;
2597
2598/* 128 clients = 4 words */
2599#define WMI_TIM_BITMAP_ARRAY_SIZE 4
2600
2601struct wmi_tim_info {
2602 __le32 tim_len;
2603 __le32 tim_mcast;
2604 __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
2605 __le32 tim_changed;
2606 __le32 tim_num_ps_pending;
2607} __packed;
2608
2609/* Maximum number of NOA Descriptors supported */
2610#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
2611#define WMI_P2P_OPPPS_ENABLE_BIT BIT(0)
2612#define WMI_P2P_OPPPS_CTWINDOW_OFFSET 1
2613#define WMI_P2P_NOA_CHANGED_BIT BIT(0)
2614
2615struct wmi_p2p_noa_info {
2616 /* Bit 0 - Flag to indicate an update in NOA schedule
2617 Bits 7-1 - Reserved */
2618 u8 changed;
2619 /* NOA index */
2620 u8 index;
2621 /* Bit 0 - Opp PS state of the AP
2622 Bits 1-7 - Ctwindow in TUs */
2623 u8 ctwindow_oppps;
2624 /* Number of NOA descriptors */
2625 u8 num_descriptors;
2626
2627 struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
2628} __packed;
2629
2630struct wmi_bcn_info {
2631 struct wmi_tim_info tim_info;
2632 struct wmi_p2p_noa_info p2p_noa_info;
2633} __packed;
2634
2635struct wmi_host_swba_event {
2636 __le32 vdev_map;
2637 struct wmi_bcn_info bcn_info[1];
2638} __packed;
2639
2640#define WMI_MAX_AP_VDEV 16
2641
2642struct wmi_tbtt_offset_event {
2643 __le32 vdev_map;
2644 __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
2645} __packed;
2646
2647
2648struct wmi_peer_create_cmd {
2649 __le32 vdev_id;
2650 struct wmi_mac_addr peer_macaddr;
2651} __packed;
2652
2653struct wmi_peer_delete_cmd {
2654 __le32 vdev_id;
2655 struct wmi_mac_addr peer_macaddr;
2656} __packed;
2657
2658struct wmi_peer_flush_tids_cmd {
2659 __le32 vdev_id;
2660 struct wmi_mac_addr peer_macaddr;
2661 __le32 peer_tid_bitmap;
2662} __packed;
2663
2664struct wmi_fixed_rate {
2665 /*
2666 * rate mode . 0: disable fixed rate (auto rate)
2667 * 1: legacy (non 11n) rate specified as ieee rate 2*Mbps
2668 * 2: ht20 11n rate specified as mcs index
2669 * 3: ht40 11n rate specified as mcs index
2670 */
2671 __le32 rate_mode;
2672 /*
2673 * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB)
2674 * and series 3 is stored at byte 3 (MSB)
2675 */
2676 __le32 rate_series;
2677 /*
2678 * 4 retry counts for 4 rate series. retry count for rate 0 is stored
2679 * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3
2680 * (MSB)
2681 */
2682 __le32 rate_retries;
2683} __packed;
2684
2685struct wmi_peer_fixed_rate_cmd {
2686 /* unique id identifying the VDEV, generated by the caller */
2687 __le32 vdev_id;
2688 /* peer MAC address */
2689 struct wmi_mac_addr peer_macaddr;
2690 /* fixed rate */
2691 struct wmi_fixed_rate peer_fixed_rate;
2692} __packed;
2693
2694#define WMI_MGMT_TID 17
2695
2696struct wmi_addba_clear_resp_cmd {
2697 /* unique id identifying the VDEV, generated by the caller */
2698 __le32 vdev_id;
2699 /* peer MAC address */
2700 struct wmi_mac_addr peer_macaddr;
2701} __packed;
2702
2703struct wmi_addba_send_cmd {
2704 /* unique id identifying the VDEV, generated by the caller */
2705 __le32 vdev_id;
2706 /* peer MAC address */
2707 struct wmi_mac_addr peer_macaddr;
2708 /* Tid number */
2709 __le32 tid;
2710 /* Buffer/Window size*/
2711 __le32 buffersize;
2712} __packed;
2713
2714struct wmi_delba_send_cmd {
2715 /* unique id identifying the VDEV, generated by the caller */
2716 __le32 vdev_id;
2717 /* peer MAC address */
2718 struct wmi_mac_addr peer_macaddr;
2719 /* Tid number */
2720 __le32 tid;
2721 /* Is Initiator */
2722 __le32 initiator;
2723 /* Reason code */
2724 __le32 reasoncode;
2725} __packed;
2726
2727struct wmi_addba_setresponse_cmd {
2728 /* unique id identifying the vdev, generated by the caller */
2729 __le32 vdev_id;
2730 /* peer mac address */
2731 struct wmi_mac_addr peer_macaddr;
2732 /* Tid number */
2733 __le32 tid;
2734 /* status code */
2735 __le32 statuscode;
2736} __packed;
2737
2738struct wmi_send_singleamsdu_cmd {
2739 /* unique id identifying the vdev, generated by the caller */
2740 __le32 vdev_id;
2741 /* peer mac address */
2742 struct wmi_mac_addr peer_macaddr;
2743 /* Tid number */
2744 __le32 tid;
2745} __packed;
2746
2747enum wmi_peer_smps_state {
2748 WMI_PEER_SMPS_PS_NONE = 0x0,
2749 WMI_PEER_SMPS_STATIC = 0x1,
2750 WMI_PEER_SMPS_DYNAMIC = 0x2
2751};
2752
2753enum wmi_peer_param {
2754 WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
2755 WMI_PEER_AMPDU = 0x2,
2756 WMI_PEER_AUTHORIZE = 0x3,
2757 WMI_PEER_CHAN_WIDTH = 0x4,
2758 WMI_PEER_NSS = 0x5,
2759 WMI_PEER_USE_4ADDR = 0x6
2760};
2761
2762struct wmi_peer_set_param_cmd {
2763 __le32 vdev_id;
2764 struct wmi_mac_addr peer_macaddr;
2765 __le32 param_id;
2766 __le32 param_value;
2767} __packed;
2768
2769#define MAX_SUPPORTED_RATES 128
2770
2771struct wmi_rate_set {
2772 /* total number of rates */
2773 __le32 num_rates;
2774 /*
2775 * rates (each 8bit value) packed into a 32 bit word.
2776 * the rates are filled from least significant byte to most
2777 * significant byte.
2778 */
2779 __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
2780} __packed;
2781
2782struct wmi_rate_set_arg {
2783 unsigned int num_rates;
2784 u8 rates[MAX_SUPPORTED_RATES];
2785};
2786
2787/*
2788 * NOTE: It would bea good idea to represent the Tx MCS
2789 * info in one word and Rx in another word. This is split
2790 * into multiple words for convenience
2791 */
2792struct wmi_vht_rate_set {
2793 __le32 rx_max_rate; /* Max Rx data rate */
2794 __le32 rx_mcs_set; /* Negotiated RX VHT rates */
2795 __le32 tx_max_rate; /* Max Tx data rate */
2796 __le32 tx_mcs_set; /* Negotiated TX VHT rates */
2797} __packed;
2798
2799struct wmi_vht_rate_set_arg {
2800 u32 rx_max_rate;
2801 u32 rx_mcs_set;
2802 u32 tx_max_rate;
2803 u32 tx_mcs_set;
2804};
2805
2806struct wmi_peer_set_rates_cmd {
2807 /* peer MAC address */
2808 struct wmi_mac_addr peer_macaddr;
2809 /* legacy rate set */
2810 struct wmi_rate_set peer_legacy_rates;
2811 /* ht rate set */
2812 struct wmi_rate_set peer_ht_rates;
2813} __packed;
2814
2815struct wmi_peer_set_q_empty_callback_cmd {
2816 /* unique id identifying the VDEV, generated by the caller */
2817 __le32 vdev_id;
2818 /* peer MAC address */
2819 struct wmi_mac_addr peer_macaddr;
2820 __le32 callback_enable;
2821} __packed;
2822
2823#define WMI_PEER_AUTH 0x00000001
2824#define WMI_PEER_QOS 0x00000002
2825#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
2826#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
2827#define WMI_PEER_APSD 0x00000800
2828#define WMI_PEER_HT 0x00001000
2829#define WMI_PEER_40MHZ 0x00002000
2830#define WMI_PEER_STBC 0x00008000
2831#define WMI_PEER_LDPC 0x00010000
2832#define WMI_PEER_DYN_MIMOPS 0x00020000
2833#define WMI_PEER_STATIC_MIMOPS 0x00040000
2834#define WMI_PEER_SPATIAL_MUX 0x00200000
2835#define WMI_PEER_VHT 0x02000000
2836#define WMI_PEER_80MHZ 0x04000000
2837#define WMI_PEER_PMF 0x08000000
2838
2839/*
2840 * Peer rate capabilities.
2841 *
2842 * This is of interest to the ratecontrol
2843 * module which resides in the firmware. The bit definitions are
2844 * consistent with that defined in if_athrate.c.
2845 */
2846#define WMI_RC_DS_FLAG 0x01
2847#define WMI_RC_CW40_FLAG 0x02
2848#define WMI_RC_SGI_FLAG 0x04
2849#define WMI_RC_HT_FLAG 0x08
2850#define WMI_RC_RTSCTS_FLAG 0x10
2851#define WMI_RC_TX_STBC_FLAG 0x20
2852#define WMI_RC_RX_STBC_FLAG 0xC0
2853#define WMI_RC_RX_STBC_FLAG_S 6
2854#define WMI_RC_WEP_TKIP_FLAG 0x100
2855#define WMI_RC_TS_FLAG 0x200
2856#define WMI_RC_UAPSD_FLAG 0x400
2857
2858/* Maximum listen interval supported by hw in units of beacon interval */
2859#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
2860
2861struct wmi_peer_assoc_complete_cmd {
2862 struct wmi_mac_addr peer_macaddr;
2863 __le32 vdev_id;
2864 __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
2865 __le32 peer_associd; /* 16 LSBs */
2866 __le32 peer_flags;
2867 __le32 peer_caps; /* 16 LSBs */
2868 __le32 peer_listen_intval;
2869 __le32 peer_ht_caps;
2870 __le32 peer_max_mpdu;
2871 __le32 peer_mpdu_density; /* 0..16 */
2872 __le32 peer_rate_caps;
2873 struct wmi_rate_set peer_legacy_rates;
2874 struct wmi_rate_set peer_ht_rates;
2875 __le32 peer_nss; /* num of spatial streams */
2876 __le32 peer_vht_caps;
2877 __le32 peer_phymode;
2878 struct wmi_vht_rate_set peer_vht_rates;
2879 /* HT Operation Element of the peer. Five bytes packed in 2
2880 * INT32 array and filled from lsb to msb. */
2881 __le32 peer_ht_info[2];
2882} __packed;
2883
2884struct wmi_peer_assoc_complete_arg {
2885 u8 addr[ETH_ALEN];
2886 u32 vdev_id;
2887 bool peer_reassoc;
2888 u16 peer_aid;
2889 u32 peer_flags; /* see %WMI_PEER_ */
2890 u16 peer_caps;
2891 u32 peer_listen_intval;
2892 u32 peer_ht_caps;
2893 u32 peer_max_mpdu;
2894 u32 peer_mpdu_density; /* 0..16 */
2895 u32 peer_rate_caps; /* see %WMI_RC_ */
2896 struct wmi_rate_set_arg peer_legacy_rates;
2897 struct wmi_rate_set_arg peer_ht_rates;
2898 u32 peer_num_spatial_streams;
2899 u32 peer_vht_caps;
2900 enum wmi_phy_mode peer_phymode;
2901 struct wmi_vht_rate_set_arg peer_vht_rates;
2902};
2903
2904struct wmi_peer_add_wds_entry_cmd {
2905 /* peer MAC address */
2906 struct wmi_mac_addr peer_macaddr;
2907 /* wds MAC addr */
2908 struct wmi_mac_addr wds_macaddr;
2909} __packed;
2910
2911struct wmi_peer_remove_wds_entry_cmd {
2912 /* wds MAC addr */
2913 struct wmi_mac_addr wds_macaddr;
2914} __packed;
2915
2916struct wmi_peer_q_empty_callback_event {
2917 /* peer MAC address */
2918 struct wmi_mac_addr peer_macaddr;
2919} __packed;
2920
2921/*
2922 * Channel info WMI event
2923 */
2924struct wmi_chan_info_event {
2925 __le32 err_code;
2926 __le32 freq;
2927 __le32 cmd_flags;
2928 __le32 noise_floor;
2929 __le32 rx_clear_count;
2930 __le32 cycle_count;
2931} __packed;
2932
2933/* Beacon filter wmi command info */
2934#define BCN_FLT_MAX_SUPPORTED_IES 256
2935#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
2936
2937struct bss_bcn_stats {
2938 __le32 vdev_id;
2939 __le32 bss_bcnsdropped;
2940 __le32 bss_bcnsdelivered;
2941} __packed;
2942
2943struct bcn_filter_stats {
2944 __le32 bcns_dropped;
2945 __le32 bcns_delivered;
2946 __le32 activefilters;
2947 struct bss_bcn_stats bss_stats;
2948} __packed;
2949
2950struct wmi_add_bcn_filter_cmd {
2951 u32 vdev_id;
2952 u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST];
2953} __packed;
2954
2955enum wmi_sta_keepalive_method {
2956 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
2957 WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
2958};
2959
2960/* note: ip4 addresses are in network byte order, i.e. big endian */
2961struct wmi_sta_keepalive_arp_resp {
2962 __be32 src_ip4_addr;
2963 __be32 dest_ip4_addr;
2964 struct wmi_mac_addr dest_mac_addr;
2965} __packed;
2966
2967struct wmi_sta_keepalive_cmd {
2968 __le32 vdev_id;
2969 __le32 enabled;
2970 __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
2971 __le32 interval; /* in seconds */
2972 struct wmi_sta_keepalive_arp_resp arp_resp;
2973} __packed;
2974
2975#define ATH10K_RTS_MAX 2347
2976#define ATH10K_FRAGMT_THRESHOLD_MIN 540
2977#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
2978
2979#define WMI_MAX_EVENT 0x1000
2980/* Maximum number of pending TXed WMI packets */
2981#define WMI_MAX_PENDING_TX_COUNT 128
2982#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
2983
2984/* By default disable power save for IBSS */
2985#define ATH10K_DEFAULT_ATIM 0
2986
2987struct ath10k;
2988struct ath10k_vif;
2989
2990int ath10k_wmi_attach(struct ath10k *ar);
2991void ath10k_wmi_detach(struct ath10k *ar);
2992int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
2993int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
2994void ath10k_wmi_flush_tx(struct ath10k *ar);
2995
2996int ath10k_wmi_connect_htc_service(struct ath10k *ar);
2997int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
2998 const struct wmi_channel_arg *);
2999int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
3000int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
3001int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
3002 u16 rd5g, u16 ctl2g, u16 ctl5g);
3003int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
3004 u32 value);
3005int ath10k_wmi_cmd_init(struct ath10k *ar);
3006int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
3007void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
3008int ath10k_wmi_stop_scan(struct ath10k *ar,
3009 const struct wmi_stop_scan_arg *arg);
3010int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
3011 enum wmi_vdev_type type,
3012 enum wmi_vdev_subtype subtype,
3013 const u8 macaddr[ETH_ALEN]);
3014int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id);
3015int ath10k_wmi_vdev_start(struct ath10k *ar,
3016 const struct wmi_vdev_start_request_arg *);
3017int ath10k_wmi_vdev_restart(struct ath10k *ar,
3018 const struct wmi_vdev_start_request_arg *);
3019int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id);
3020int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
3021 const u8 *bssid);
3022int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
3023int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
3024 enum wmi_vdev_param param_id, u32 param_value);
3025int ath10k_wmi_vdev_install_key(struct ath10k *ar,
3026 const struct wmi_vdev_install_key_arg *arg);
3027int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
3028 const u8 peer_addr[ETH_ALEN]);
3029int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
3030 const u8 peer_addr[ETH_ALEN]);
3031int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
3032 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap);
3033int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
3034 const u8 *peer_addr,
3035 enum wmi_peer_param param_id, u32 param_value);
3036int ath10k_wmi_peer_assoc(struct ath10k *ar,
3037 const struct wmi_peer_assoc_complete_arg *arg);
3038int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
3039 enum wmi_sta_ps_mode psmode);
3040int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
3041 enum wmi_sta_powersave_param param_id,
3042 u32 value);
3043int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
3044 enum wmi_ap_ps_peer_param param_id, u32 value);
3045int ath10k_wmi_scan_chan_list(struct ath10k *ar,
3046 const struct wmi_scan_chan_list_arg *arg);
3047int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
3048int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
3049 const struct wmi_pdev_set_wmm_params_arg *arg);
3050int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
3051
3052#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8e8bcc7a4805..e9bc9e616b69 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -185,7 +185,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
185 185
186 err_free_hw: 186 err_free_hw:
187 ieee80211_free_hw(hw); 187 ieee80211_free_hw(hw);
188 platform_set_drvdata(pdev, NULL);
189 err_iounmap: 188 err_iounmap:
190 iounmap(mem); 189 iounmap(mem);
191 err_out: 190 err_out:
@@ -221,7 +220,6 @@ static int ath_ahb_remove(struct platform_device *pdev)
221 220
222 ath5k_deinit_ah(ah); 221 ath5k_deinit_ah(ah);
223 iounmap(ah->iobase); 222 iounmap(ah->iobase);
224 platform_set_drvdata(pdev, NULL);
225 ieee80211_free_hw(hw); 223 ieee80211_free_hw(hw);
226 224
227 return 0; 225 return 0;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 7f702fe3ecc2..ce67ab791eae 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -60,6 +60,7 @@
60 60
61#include <asm/unaligned.h> 61#include <asm/unaligned.h>
62 62
63#include <net/mac80211.h>
63#include "base.h" 64#include "base.h"
64#include "reg.h" 65#include "reg.h"
65#include "debug.h" 66#include "debug.h"
@@ -666,9 +667,46 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
666 return htype; 667 return htype;
667} 668}
668 669
670static struct ieee80211_rate *
671ath5k_get_rate(const struct ieee80211_hw *hw,
672 const struct ieee80211_tx_info *info,
673 struct ath5k_buf *bf, int idx)
674{
675 /*
676 * convert a ieee80211_tx_rate RC-table entry to
677 * the respective ieee80211_rate struct
678 */
679 if (bf->rates[idx].idx < 0) {
680 return NULL;
681 }
682
683 return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ];
684}
685
686static u16
687ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
688 const struct ieee80211_tx_info *info,
689 struct ath5k_buf *bf, int idx)
690{
691 struct ieee80211_rate *rate;
692 u16 hw_rate;
693 u8 rc_flags;
694
695 rate = ath5k_get_rate(hw, info, bf, idx);
696 if (!rate)
697 return 0;
698
699 rc_flags = bf->rates[idx].flags;
700 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
701 rate->hw_value_short : rate->hw_value;
702
703 return hw_rate;
704}
705
669static int 706static int
670ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, 707ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
671 struct ath5k_txq *txq, int padsize) 708 struct ath5k_txq *txq, int padsize,
709 struct ieee80211_tx_control *control)
672{ 710{
673 struct ath5k_desc *ds = bf->desc; 711 struct ath5k_desc *ds = bf->desc;
674 struct sk_buff *skb = bf->skb; 712 struct sk_buff *skb = bf->skb;
@@ -688,7 +726,11 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
688 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, 726 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
689 DMA_TO_DEVICE); 727 DMA_TO_DEVICE);
690 728
691 rate = ieee80211_get_tx_rate(ah->hw, info); 729 ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
730 ARRAY_SIZE(bf->rates));
731
732 rate = ath5k_get_rate(ah->hw, info, bf, 0);
733
692 if (!rate) { 734 if (!rate) {
693 ret = -EINVAL; 735 ret = -EINVAL;
694 goto err_unmap; 736 goto err_unmap;
@@ -698,8 +740,8 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
698 flags |= AR5K_TXDESC_NOACK; 740 flags |= AR5K_TXDESC_NOACK;
699 741
700 rc_flags = info->control.rates[0].flags; 742 rc_flags = info->control.rates[0].flags;
701 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? 743
702 rate->hw_value_short : rate->hw_value; 744 hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0);
703 745
704 pktlen = skb->len; 746 pktlen = skb->len;
705 747
@@ -722,12 +764,13 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
722 duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw, 764 duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
723 info->control.vif, pktlen, info)); 765 info->control.vif, pktlen, info));
724 } 766 }
767
725 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 768 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
726 ieee80211_get_hdrlen_from_skb(skb), padsize, 769 ieee80211_get_hdrlen_from_skb(skb), padsize,
727 get_hw_packet_type(skb), 770 get_hw_packet_type(skb),
728 (ah->ah_txpower.txp_requested * 2), 771 (ah->ah_txpower.txp_requested * 2),
729 hw_rate, 772 hw_rate,
730 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, 773 bf->rates[0].count, keyidx, ah->ah_tx_ant, flags,
731 cts_rate, duration); 774 cts_rate, duration);
732 if (ret) 775 if (ret)
733 goto err_unmap; 776 goto err_unmap;
@@ -736,13 +779,15 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
736 if (ah->ah_capabilities.cap_has_mrr_support) { 779 if (ah->ah_capabilities.cap_has_mrr_support) {
737 memset(mrr_rate, 0, sizeof(mrr_rate)); 780 memset(mrr_rate, 0, sizeof(mrr_rate));
738 memset(mrr_tries, 0, sizeof(mrr_tries)); 781 memset(mrr_tries, 0, sizeof(mrr_tries));
782
739 for (i = 0; i < 3; i++) { 783 for (i = 0; i < 3; i++) {
740 rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); 784
785 rate = ath5k_get_rate(ah->hw, info, bf, i);
741 if (!rate) 786 if (!rate)
742 break; 787 break;
743 788
744 mrr_rate[i] = rate->hw_value; 789 mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i);
745 mrr_tries[i] = info->control.rates[i + 1].count; 790 mrr_tries[i] = bf->rates[i].count;
746 } 791 }
747 792
748 ath5k_hw_setup_mrr_tx_desc(ah, ds, 793 ath5k_hw_setup_mrr_tx_desc(ah, ds,
@@ -1515,7 +1560,7 @@ unlock:
1515 1560
1516void 1561void
1517ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 1562ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1518 struct ath5k_txq *txq) 1563 struct ath5k_txq *txq, struct ieee80211_tx_control *control)
1519{ 1564{
1520 struct ath5k_hw *ah = hw->priv; 1565 struct ath5k_hw *ah = hw->priv;
1521 struct ath5k_buf *bf; 1566 struct ath5k_buf *bf;
@@ -1555,7 +1600,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1555 1600
1556 bf->skb = skb; 1601 bf->skb = skb;
1557 1602
1558 if (ath5k_txbuf_setup(ah, bf, txq, padsize)) { 1603 if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) {
1559 bf->skb = NULL; 1604 bf->skb = NULL;
1560 spin_lock_irqsave(&ah->txbuflock, flags); 1605 spin_lock_irqsave(&ah->txbuflock, flags);
1561 list_add_tail(&bf->list, &ah->txbuf); 1606 list_add_tail(&bf->list, &ah->txbuf);
@@ -1571,11 +1616,13 @@ drop_packet:
1571 1616
1572static void 1617static void
1573ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, 1618ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1574 struct ath5k_txq *txq, struct ath5k_tx_status *ts) 1619 struct ath5k_txq *txq, struct ath5k_tx_status *ts,
1620 struct ath5k_buf *bf)
1575{ 1621{
1576 struct ieee80211_tx_info *info; 1622 struct ieee80211_tx_info *info;
1577 u8 tries[3]; 1623 u8 tries[3];
1578 int i; 1624 int i;
1625 int size = 0;
1579 1626
1580 ah->stats.tx_all_count++; 1627 ah->stats.tx_all_count++;
1581 ah->stats.tx_bytes_count += skb->len; 1628 ah->stats.tx_bytes_count += skb->len;
@@ -1587,6 +1634,9 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1587 1634
1588 ieee80211_tx_info_clear_status(info); 1635 ieee80211_tx_info_clear_status(info);
1589 1636
1637 size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
1638 memcpy(info->status.rates, bf->rates, size);
1639
1590 for (i = 0; i < ts->ts_final_idx; i++) { 1640 for (i = 0; i < ts->ts_final_idx; i++) {
1591 struct ieee80211_tx_rate *r = 1641 struct ieee80211_tx_rate *r =
1592 &info->status.rates[i]; 1642 &info->status.rates[i];
@@ -1663,7 +1713,7 @@ ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
1663 1713
1664 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, 1714 dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
1665 DMA_TO_DEVICE); 1715 DMA_TO_DEVICE);
1666 ath5k_tx_frame_completed(ah, skb, txq, &ts); 1716 ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
1667 } 1717 }
1668 1718
1669 /* 1719 /*
@@ -1917,7 +1967,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1917 1967
1918 skb = ieee80211_get_buffered_bc(ah->hw, vif); 1968 skb = ieee80211_get_buffered_bc(ah->hw, vif);
1919 while (skb) { 1969 while (skb) {
1920 ath5k_tx_queue(ah->hw, skb, ah->cabq); 1970 ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
1921 1971
1922 if (ah->cabq->txq_len >= ah->cabq->txq_max) 1972 if (ah->cabq->txq_len >= ah->cabq->txq_max)
1923 break; 1973 break;
@@ -2442,7 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2442 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2492 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2443 IEEE80211_HW_SIGNAL_DBM | 2493 IEEE80211_HW_SIGNAL_DBM |
2444 IEEE80211_HW_MFP_CAPABLE | 2494 IEEE80211_HW_MFP_CAPABLE |
2445 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 2495 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
2496 IEEE80211_HW_SUPPORTS_RC_TABLE;
2446 2497
2447 hw->wiphy->interface_modes = 2498 hw->wiphy->interface_modes =
2448 BIT(NL80211_IFTYPE_AP) | 2499 BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 6c94c7ff2350..ca9a83ceeee1 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -47,6 +47,7 @@ struct ath5k_hw;
47struct ath5k_txq; 47struct ath5k_txq;
48struct ieee80211_channel; 48struct ieee80211_channel;
49struct ath_bus_ops; 49struct ath_bus_ops;
50struct ieee80211_tx_control;
50enum nl80211_iftype; 51enum nl80211_iftype;
51 52
52enum ath5k_srev_type { 53enum ath5k_srev_type {
@@ -61,11 +62,12 @@ struct ath5k_srev_name {
61}; 62};
62 63
63struct ath5k_buf { 64struct ath5k_buf {
64 struct list_head list; 65 struct list_head list;
65 struct ath5k_desc *desc; /* virtual addr of desc */ 66 struct ath5k_desc *desc; /* virtual addr of desc */
66 dma_addr_t daddr; /* physical addr of desc */ 67 dma_addr_t daddr; /* physical addr of desc */
67 struct sk_buff *skb; /* skbuff for buf */ 68 struct sk_buff *skb; /* skbuff for buf */
68 dma_addr_t skbaddr;/* physical addr of skb data */ 69 dma_addr_t skbaddr; /* physical addr of skb data */
70 struct ieee80211_tx_rate rates[4]; /* number of multi-rate stages */
69}; 71};
70 72
71struct ath5k_vif { 73struct ath5k_vif {
@@ -103,7 +105,7 @@ int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
103void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); 105void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
104void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); 106void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
105void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 107void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
106 struct ath5k_txq *txq); 108 struct ath5k_txq *txq, struct ieee80211_tx_control *control);
107 109
108const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val); 110const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
109 111
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 06f86f435711..81b686c6a376 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -66,7 +66,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
66 return; 66 return;
67 } 67 }
68 68
69 ath5k_tx_queue(hw, skb, &ah->txqs[qnum]); 69 ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control);
70} 70}
71 71
72 72
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 5c9736a94e54..2437ad26949d 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3175,10 +3175,21 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3175{ 3175{
3176 struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); 3176 struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
3177 struct ath6kl *ar = ath6kl_priv(vif->ndev); 3177 struct ath6kl *ar = ath6kl_priv(vif->ndev);
3178 u32 id; 3178 u32 id, freq;
3179 const struct ieee80211_mgmt *mgmt; 3179 const struct ieee80211_mgmt *mgmt;
3180 bool more_data, queued; 3180 bool more_data, queued;
3181 3181
3182 /* default to the current channel, but use the one specified as argument
3183 * if any
3184 */
3185 freq = vif->ch_hint;
3186 if (chan)
3187 freq = chan->center_freq;
3188
3189 /* never send freq zero to the firmware */
3190 if (WARN_ON(freq == 0))
3191 return -EINVAL;
3192
3182 mgmt = (const struct ieee80211_mgmt *) buf; 3193 mgmt = (const struct ieee80211_mgmt *) buf;
3183 if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && 3194 if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
3184 ieee80211_is_probe_resp(mgmt->frame_control) && 3195 ieee80211_is_probe_resp(mgmt->frame_control) &&
@@ -3188,8 +3199,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3188 * command to allow the target to fill in the generic IEs. 3199 * command to allow the target to fill in the generic IEs.
3189 */ 3200 */
3190 *cookie = 0; /* TX status not supported */ 3201 *cookie = 0; /* TX status not supported */
3191 return ath6kl_send_go_probe_resp(vif, buf, len, 3202 return ath6kl_send_go_probe_resp(vif, buf, len, freq);
3192 chan->center_freq);
3193 } 3203 }
3194 3204
3195 id = vif->send_action_id++; 3205 id = vif->send_action_id++;
@@ -3205,17 +3215,14 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3205 3215
3206 /* AP mode Power saving processing */ 3216 /* AP mode Power saving processing */
3207 if (vif->nw_type == AP_NETWORK) { 3217 if (vif->nw_type == AP_NETWORK) {
3208 queued = ath6kl_mgmt_powersave_ap(vif, 3218 queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len,
3209 id, chan->center_freq, 3219 &more_data, no_cck);
3210 wait, buf,
3211 len, &more_data, no_cck);
3212 if (queued) 3220 if (queued)
3213 return 0; 3221 return 0;
3214 } 3222 }
3215 3223
3216 return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, 3224 return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq,
3217 chan->center_freq, wait, 3225 wait, buf, len, no_cck);
3218 buf, len, no_cck);
3219} 3226}
3220 3227
3221static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, 3228static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
@@ -3679,6 +3686,20 @@ err:
3679 return NULL; 3686 return NULL;
3680} 3687}
3681 3688
3689#ifdef CONFIG_PM
3690static const struct wiphy_wowlan_support ath6kl_wowlan_support = {
3691 .flags = WIPHY_WOWLAN_MAGIC_PKT |
3692 WIPHY_WOWLAN_DISCONNECT |
3693 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
3694 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
3695 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
3696 WIPHY_WOWLAN_4WAY_HANDSHAKE,
3697 .n_patterns = WOW_MAX_FILTERS_PER_LIST,
3698 .pattern_min_len = 1,
3699 .pattern_max_len = WOW_PATTERN_SIZE,
3700};
3701#endif
3702
3682int ath6kl_cfg80211_init(struct ath6kl *ar) 3703int ath6kl_cfg80211_init(struct ath6kl *ar)
3683{ 3704{
3684 struct wiphy *wiphy = ar->wiphy; 3705 struct wiphy *wiphy = ar->wiphy;
@@ -3772,15 +3793,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3772 wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 3793 wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
3773 3794
3774#ifdef CONFIG_PM 3795#ifdef CONFIG_PM
3775 wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 3796 wiphy->wowlan = &ath6kl_wowlan_support;
3776 WIPHY_WOWLAN_DISCONNECT |
3777 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
3778 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
3779 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
3780 WIPHY_WOWLAN_4WAY_HANDSHAKE;
3781 wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
3782 wiphy->wowlan.pattern_min_len = 1;
3783 wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
3784#endif 3797#endif
3785 3798
3786 wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS; 3799 wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index fe38b836cb26..dbfd17d0a5fa 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -1240,20 +1240,14 @@ static ssize_t ath6kl_force_roam_write(struct file *file,
1240 char buf[20]; 1240 char buf[20];
1241 size_t len; 1241 size_t len;
1242 u8 bssid[ETH_ALEN]; 1242 u8 bssid[ETH_ALEN];
1243 int i;
1244 int addr[ETH_ALEN];
1245 1243
1246 len = min(count, sizeof(buf) - 1); 1244 len = min(count, sizeof(buf) - 1);
1247 if (copy_from_user(buf, user_buf, len)) 1245 if (copy_from_user(buf, user_buf, len))
1248 return -EFAULT; 1246 return -EFAULT;
1249 buf[len] = '\0'; 1247 buf[len] = '\0';
1250 1248
1251 if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x", 1249 if (!mac_pton(buf, bssid))
1252 &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
1253 != ETH_ALEN)
1254 return -EINVAL; 1250 return -EINVAL;
1255 for (i = 0; i < ETH_ALEN; i++)
1256 bssid[i] = addr[i];
1257 1251
1258 ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); 1252 ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
1259 if (ret) 1253 if (ret)
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 40ffee6184fd..6a67881f94d6 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1696,10 +1696,16 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
1696 test_bit(WMI_READY, 1696 test_bit(WMI_READY,
1697 &ar->flag), 1697 &ar->flag),
1698 WMI_TIMEOUT); 1698 WMI_TIMEOUT);
1699 if (timeleft <= 0) {
1700 clear_bit(WMI_READY, &ar->flag);
1701 ath6kl_err("wmi is not ready or wait was interrupted: %ld\n",
1702 timeleft);
1703 ret = -EIO;
1704 goto err_htc_stop;
1705 }
1699 1706
1700 ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n"); 1707 ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
1701 1708
1702
1703 if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { 1709 if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
1704 ath6kl_info("%s %s fw %s api %d%s\n", 1710 ath6kl_info("%s %s fw %s api %d%s\n",
1705 ar->hw.name, 1711 ar->hw.name,
@@ -1718,12 +1724,6 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
1718 goto err_htc_stop; 1724 goto err_htc_stop;
1719 } 1725 }
1720 1726
1721 if (!timeleft || signal_pending(current)) {
1722 ath6kl_err("wmi is not ready or wait was interrupted\n");
1723 ret = -EIO;
1724 goto err_htc_stop;
1725 }
1726
1727 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); 1727 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
1728 1728
1729 /* communicate the wmi protocol verision to the target */ 1729 /* communicate the wmi protocol verision to the target */
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index fb141454c6d2..7126bdd4236c 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -345,17 +345,17 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
345{ 345{
346 struct hif_scatter_req *s_req; 346 struct hif_scatter_req *s_req;
347 struct bus_request *bus_req; 347 struct bus_request *bus_req;
348 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz; 348 int i, scat_req_sz, scat_list_sz, size;
349 u8 *virt_buf; 349 u8 *virt_buf;
350 350
351 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); 351 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
352 scat_req_sz = sizeof(*s_req) + scat_list_sz; 352 scat_req_sz = sizeof(*s_req) + scat_list_sz;
353 353
354 if (!virt_scat) 354 if (!virt_scat)
355 sg_sz = sizeof(struct scatterlist) * n_scat_entry; 355 size = sizeof(struct scatterlist) * n_scat_entry;
356 else 356 else
357 buf_sz = 2 * L1_CACHE_BYTES + 357 size = 2 * L1_CACHE_BYTES +
358 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; 358 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
359 359
360 for (i = 0; i < n_scat_req; i++) { 360 for (i = 0; i < n_scat_req; i++) {
361 /* allocate the scatter request */ 361 /* allocate the scatter request */
@@ -364,7 +364,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
364 return -ENOMEM; 364 return -ENOMEM;
365 365
366 if (virt_scat) { 366 if (virt_scat) {
367 virt_buf = kzalloc(buf_sz, GFP_KERNEL); 367 virt_buf = kzalloc(size, GFP_KERNEL);
368 if (!virt_buf) { 368 if (!virt_buf) {
369 kfree(s_req); 369 kfree(s_req);
370 return -ENOMEM; 370 return -ENOMEM;
@@ -374,7 +374,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
374 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); 374 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
375 } else { 375 } else {
376 /* allocate sglist */ 376 /* allocate sglist */
377 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL); 377 s_req->sgentries = kzalloc(size, GFP_KERNEL);
378 378
379 if (!s_req->sgentries) { 379 if (!s_req->sgentries) {
380 kfree(s_req); 380 kfree(s_req);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index bed0d337712d..f38ff6a6255e 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -1061,6 +1061,22 @@ static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar)
1061 return; 1061 return;
1062} 1062}
1063 1063
1064static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
1065{
1066 /*
1067 * cfg80211 suspend/WOW currently not supported for USB.
1068 */
1069 return 0;
1070}
1071
1072static int ath6kl_usb_resume(struct ath6kl *ar)
1073{
1074 /*
1075 * cfg80211 resume currently not supported for USB.
1076 */
1077 return 0;
1078}
1079
1064static const struct ath6kl_hif_ops ath6kl_usb_ops = { 1080static const struct ath6kl_hif_ops ath6kl_usb_ops = {
1065 .diag_read32 = ath6kl_usb_diag_read32, 1081 .diag_read32 = ath6kl_usb_diag_read32,
1066 .diag_write32 = ath6kl_usb_diag_write32, 1082 .diag_write32 = ath6kl_usb_diag_write32,
@@ -1074,6 +1090,8 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
1074 .pipe_map_service = ath6kl_usb_map_service_pipe, 1090 .pipe_map_service = ath6kl_usb_map_service_pipe,
1075 .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number, 1091 .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
1076 .cleanup_scatter = ath6kl_usb_cleanup_scatter, 1092 .cleanup_scatter = ath6kl_usb_cleanup_scatter,
1093 .suspend = ath6kl_usb_suspend,
1094 .resume = ath6kl_usb_resume,
1077}; 1095};
1078 1096
1079/* ath6kl usb driver registered functions */ 1097/* ath6kl usb driver registered functions */
@@ -1152,7 +1170,7 @@ static void ath6kl_usb_remove(struct usb_interface *interface)
1152 1170
1153#ifdef CONFIG_PM 1171#ifdef CONFIG_PM
1154 1172
1155static int ath6kl_usb_suspend(struct usb_interface *interface, 1173static int ath6kl_usb_pm_suspend(struct usb_interface *interface,
1156 pm_message_t message) 1174 pm_message_t message)
1157{ 1175{
1158 struct ath6kl_usb *device; 1176 struct ath6kl_usb *device;
@@ -1162,7 +1180,7 @@ static int ath6kl_usb_suspend(struct usb_interface *interface,
1162 return 0; 1180 return 0;
1163} 1181}
1164 1182
1165static int ath6kl_usb_resume(struct usb_interface *interface) 1183static int ath6kl_usb_pm_resume(struct usb_interface *interface)
1166{ 1184{
1167 struct ath6kl_usb *device; 1185 struct ath6kl_usb *device;
1168 device = usb_get_intfdata(interface); 1186 device = usb_get_intfdata(interface);
@@ -1175,7 +1193,7 @@ static int ath6kl_usb_resume(struct usb_interface *interface)
1175 return 0; 1193 return 0;
1176} 1194}
1177 1195
1178static int ath6kl_usb_reset_resume(struct usb_interface *intf) 1196static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf)
1179{ 1197{
1180 if (usb_get_intfdata(intf)) 1198 if (usb_get_intfdata(intf))
1181 ath6kl_usb_remove(intf); 1199 ath6kl_usb_remove(intf);
@@ -1184,9 +1202,9 @@ static int ath6kl_usb_reset_resume(struct usb_interface *intf)
1184 1202
1185#else 1203#else
1186 1204
1187#define ath6kl_usb_suspend NULL 1205#define ath6kl_usb_pm_suspend NULL
1188#define ath6kl_usb_resume NULL 1206#define ath6kl_usb_pm_resume NULL
1189#define ath6kl_usb_reset_resume NULL 1207#define ath6kl_usb_pm_reset_resume NULL
1190 1208
1191#endif 1209#endif
1192 1210
@@ -1201,9 +1219,9 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
1201static struct usb_driver ath6kl_usb_driver = { 1219static struct usb_driver ath6kl_usb_driver = {
1202 .name = "ath6kl_usb", 1220 .name = "ath6kl_usb",
1203 .probe = ath6kl_usb_probe, 1221 .probe = ath6kl_usb_probe,
1204 .suspend = ath6kl_usb_suspend, 1222 .suspend = ath6kl_usb_pm_suspend,
1205 .resume = ath6kl_usb_resume, 1223 .resume = ath6kl_usb_pm_resume,
1206 .reset_resume = ath6kl_usb_reset_resume, 1224 .reset_resume = ath6kl_usb_pm_reset_resume,
1207 .disconnect = ath6kl_usb_remove, 1225 .disconnect = ath6kl_usb_remove,
1208 .id_table = ath6kl_usb_ids, 1226 .id_table = ath6kl_usb_ids,
1209 .supports_autosuspend = true, 1227 .supports_autosuspend = true,
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 3c2cbc9d6295..d491a3178986 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -28,7 +28,7 @@ config ATH9K
28 Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family 28 Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
29 of chipsets. For a specific list of supported external 29 of chipsets. For a specific list of supported external
30 cards, laptops that already ship with these cards and 30 cards, laptops that already ship with these cards and
31 APs that come with these cards refer to to ath9k wiki 31 APs that come with these cards refer to ath9k wiki
32 products page: 32 products page:
33 33
34 http://wireless.kernel.org/en/users/Drivers/ath9k/products 34 http://wireless.kernel.org/en/users/Drivers/ath9k/products
@@ -84,14 +84,6 @@ config ATH9K_DFS_CERTIFIED
84 developed. At this point enabling this option won't do anything 84 developed. At this point enabling this option won't do anything
85 except increase code size. 85 except increase code size.
86 86
87config ATH9K_MAC_DEBUG
88 bool "Atheros MAC statistics"
89 depends on ATH9K_DEBUGFS
90 default y
91 ---help---
92 This option enables collection of statistics for Rx/Tx status
93 data and some other MAC related statistics
94
95config ATH9K_LEGACY_RATE_CONTROL 87config ATH9K_LEGACY_RATE_CONTROL
96 bool "Atheros ath9k rate control" 88 bool "Atheros ath9k rate control"
97 depends on ATH9K 89 depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index d1ff3c246a12..072e4b531067 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -150,7 +150,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
150 free_irq(irq, sc); 150 free_irq(irq, sc);
151 err_free_hw: 151 err_free_hw:
152 ieee80211_free_hw(hw); 152 ieee80211_free_hw(hw);
153 platform_set_drvdata(pdev, NULL);
154 return ret; 153 return ret;
155} 154}
156 155
@@ -164,7 +163,6 @@ static int ath_ahb_remove(struct platform_device *pdev)
164 ath9k_deinit_device(sc); 163 ath9k_deinit_device(sc);
165 free_irq(sc->irq, sc); 164 free_irq(sc->irq, sc);
166 ieee80211_free_hw(sc->hw); 165 ieee80211_free_hw(sc->hw);
167 platform_set_drvdata(pdev, NULL);
168 } 166 }
169 167
170 return 0; 168 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 7ecd40f07a74..4994bea809eb 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
46 { 5, 4, 1 }, /* lvl 5 */ 46 { 5, 4, 1 }, /* lvl 5 */
47 { 6, 5, 1 }, /* lvl 6 */ 47 { 6, 5, 1 }, /* lvl 6 */
48 { 7, 6, 1 }, /* lvl 7 */ 48 { 7, 6, 1 }, /* lvl 7 */
49 { 7, 6, 0 }, /* lvl 8 */ 49 { 7, 7, 1 }, /* lvl 8 */
50 { 7, 7, 0 } /* lvl 9 */ 50 { 7, 8, 0 } /* lvl 9 */
51}; 51};
52#define ATH9K_ANI_OFDM_NUM_LEVEL \ 52#define ATH9K_ANI_OFDM_NUM_LEVEL \
53 ARRAY_SIZE(ofdm_level_table) 53 ARRAY_SIZE(ofdm_level_table)
@@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = {
91 { 4, 0 }, /* lvl 4 */ 91 { 4, 0 }, /* lvl 4 */
92 { 5, 0 }, /* lvl 5 */ 92 { 5, 0 }, /* lvl 5 */
93 { 6, 0 }, /* lvl 6 */ 93 { 6, 0 }, /* lvl 6 */
94 { 6, 0 }, /* lvl 7 (only for high rssi) */ 94 { 7, 0 }, /* lvl 7 (only for high rssi) */
95 { 7, 0 } /* lvl 8 (only for high rssi) */ 95 { 8, 0 } /* lvl 8 (only for high rssi) */
96}; 96};
97 97
98#define ATH9K_ANI_CCK_NUM_LEVEL \ 98#define ATH9K_ANI_CCK_NUM_LEVEL \
@@ -118,10 +118,10 @@ static void ath9k_ani_restart(struct ath_hw *ah)
118{ 118{
119 struct ar5416AniState *aniState; 119 struct ar5416AniState *aniState;
120 120
121 if (!DO_ANI(ah)) 121 if (!ah->curchan)
122 return; 122 return;
123 123
124 aniState = &ah->curchan->ani; 124 aniState = &ah->ani;
125 aniState->listenTime = 0; 125 aniState->listenTime = 0;
126 126
127 ENABLE_REGWRITE_BUFFER(ah); 127 ENABLE_REGWRITE_BUFFER(ah);
@@ -143,7 +143,7 @@ static void ath9k_ani_restart(struct ath_hw *ah)
143static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, 143static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
144 bool scan) 144 bool scan)
145{ 145{
146 struct ar5416AniState *aniState = &ah->curchan->ani; 146 struct ar5416AniState *aniState = &ah->ani;
147 struct ath_common *common = ath9k_hw_common(ah); 147 struct ath_common *common = ath9k_hw_common(ah);
148 const struct ani_ofdm_level_entry *entry_ofdm; 148 const struct ani_ofdm_level_entry *entry_ofdm;
149 const struct ani_cck_level_entry *entry_cck; 149 const struct ani_cck_level_entry *entry_cck;
@@ -177,10 +177,15 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
177 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH) 177 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
178 weak_sig = true; 178 weak_sig = true;
179 179
180 if (aniState->ofdmWeakSigDetect != weak_sig) 180 /*
181 ath9k_hw_ani_control(ah, 181 * OFDM Weak signal detection is always enabled for AP mode.
182 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, 182 */
183 entry_ofdm->ofdm_weak_signal_on); 183 if (ah->opmode != NL80211_IFTYPE_AP &&
184 aniState->ofdmWeakSigDetect != weak_sig) {
185 ath9k_hw_ani_control(ah,
186 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
187 entry_ofdm->ofdm_weak_signal_on);
188 }
184 189
185 if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) { 190 if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
186 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; 191 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
@@ -195,10 +200,10 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
195{ 200{
196 struct ar5416AniState *aniState; 201 struct ar5416AniState *aniState;
197 202
198 if (!DO_ANI(ah)) 203 if (!ah->curchan)
199 return; 204 return;
200 205
201 aniState = &ah->curchan->ani; 206 aniState = &ah->ani;
202 207
203 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) 208 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
204 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false); 209 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
@@ -210,7 +215,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
210static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel, 215static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
211 bool scan) 216 bool scan)
212{ 217{
213 struct ar5416AniState *aniState = &ah->curchan->ani; 218 struct ar5416AniState *aniState = &ah->ani;
214 struct ath_common *common = ath9k_hw_common(ah); 219 struct ath_common *common = ath9k_hw_common(ah);
215 const struct ani_ofdm_level_entry *entry_ofdm; 220 const struct ani_ofdm_level_entry *entry_ofdm;
216 const struct ani_cck_level_entry *entry_cck; 221 const struct ani_cck_level_entry *entry_cck;
@@ -251,10 +256,10 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
251{ 256{
252 struct ar5416AniState *aniState; 257 struct ar5416AniState *aniState;
253 258
254 if (!DO_ANI(ah)) 259 if (!ah->curchan)
255 return; 260 return;
256 261
257 aniState = &ah->curchan->ani; 262 aniState = &ah->ani;
258 263
259 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) 264 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
260 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1, 265 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
@@ -269,7 +274,7 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
269{ 274{
270 struct ar5416AniState *aniState; 275 struct ar5416AniState *aniState;
271 276
272 aniState = &ah->curchan->ani; 277 aniState = &ah->ani;
273 278
274 /* lower OFDM noise immunity */ 279 /* lower OFDM noise immunity */
275 if (aniState->ofdmNoiseImmunityLevel > 0 && 280 if (aniState->ofdmNoiseImmunityLevel > 0 &&
@@ -292,12 +297,12 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
292 */ 297 */
293void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) 298void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
294{ 299{
295 struct ar5416AniState *aniState = &ah->curchan->ani; 300 struct ar5416AniState *aniState = &ah->ani;
296 struct ath9k_channel *chan = ah->curchan; 301 struct ath9k_channel *chan = ah->curchan;
297 struct ath_common *common = ath9k_hw_common(ah); 302 struct ath_common *common = ath9k_hw_common(ah);
298 int ofdm_nil, cck_nil; 303 int ofdm_nil, cck_nil;
299 304
300 if (!DO_ANI(ah)) 305 if (!ah->curchan)
301 return; 306 return;
302 307
303 BUG_ON(aniState == NULL); 308 BUG_ON(aniState == NULL);
@@ -363,24 +368,13 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
363 ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning); 368 ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
364 ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning); 369 ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
365 370
366 /*
367 * enable phy counters if hw supports or if not, enable phy
368 * interrupts (so we can count each one)
369 */
370 ath9k_ani_restart(ah); 371 ath9k_ani_restart(ah);
371
372 ENABLE_REGWRITE_BUFFER(ah);
373
374 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
375 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
376
377 REGWRITE_BUFFER_FLUSH(ah);
378} 372}
379 373
380static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) 374static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
381{ 375{
382 struct ath_common *common = ath9k_hw_common(ah); 376 struct ath_common *common = ath9k_hw_common(ah);
383 struct ar5416AniState *aniState = &ah->curchan->ani; 377 struct ar5416AniState *aniState = &ah->ani;
384 u32 phyCnt1, phyCnt2; 378 u32 phyCnt1, phyCnt2;
385 int32_t listenTime; 379 int32_t listenTime;
386 380
@@ -415,10 +409,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
415 struct ath_common *common = ath9k_hw_common(ah); 409 struct ath_common *common = ath9k_hw_common(ah);
416 u32 ofdmPhyErrRate, cckPhyErrRate; 410 u32 ofdmPhyErrRate, cckPhyErrRate;
417 411
418 if (!DO_ANI(ah)) 412 if (!ah->curchan)
419 return; 413 return;
420 414
421 aniState = &ah->curchan->ani; 415 aniState = &ah->ani;
422 if (!ath9k_hw_ani_read_counters(ah)) 416 if (!ath9k_hw_ani_read_counters(ah))
423 return; 417 return;
424 418
@@ -490,32 +484,22 @@ EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
490void ath9k_hw_ani_init(struct ath_hw *ah) 484void ath9k_hw_ani_init(struct ath_hw *ah)
491{ 485{
492 struct ath_common *common = ath9k_hw_common(ah); 486 struct ath_common *common = ath9k_hw_common(ah);
493 int i; 487 struct ar5416AniState *ani = &ah->ani;
494 488
495 ath_dbg(common, ANI, "Initialize ANI\n"); 489 ath_dbg(common, ANI, "Initialize ANI\n");
496 490
497 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; 491 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
498 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW; 492 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
499
500 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH; 493 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
501 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW; 494 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
502 495
503 for (i = 0; i < ARRAY_SIZE(ah->channels); i++) { 496 ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
504 struct ath9k_channel *chan = &ah->channels[i]; 497 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
505 struct ar5416AniState *ani = &chan->ani; 498 ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
506 499 ani->ofdmsTurn = true;
507 ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; 500 ani->ofdmWeakSigDetect = true;
508 501 ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
509 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; 502 ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
510
511 ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
512
513 ani->ofdmsTurn = true;
514
515 ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
516 ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
517 ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
518 }
519 503
520 /* 504 /*
521 * since we expect some ongoing maintenance on the tables, let's sanity 505 * since we expect some ongoing maintenance on the tables, let's sanity
@@ -524,9 +508,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
524 ah->aniperiod = ATH9K_ANI_PERIOD; 508 ah->aniperiod = ATH9K_ANI_PERIOD;
525 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL; 509 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL;
526 510
527 if (ah->config.enable_ani)
528 ah->proc_phyerr |= HAL_PROCESS_ANI;
529
530 ath9k_ani_restart(ah); 511 ath9k_ani_restart(ah);
531 ath9k_enable_mib_counters(ah); 512 ath9k_enable_mib_counters(ah);
532} 513}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index dddb1361039a..b54a3fb01883 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -17,32 +17,19 @@
17#ifndef ANI_H 17#ifndef ANI_H
18#define ANI_H 18#define ANI_H
19 19
20#define HAL_PROCESS_ANI 0x00000001
21
22#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI) && ah->curchan)
23
24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) 20#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
25 21
26/* units are errors per second */ 22/* units are errors per second */
27#define ATH9K_ANI_OFDM_TRIG_HIGH 3500 23#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
28#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000 24#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
29 25
30/* units are errors per second */
31#define ATH9K_ANI_OFDM_TRIG_LOW 400 26#define ATH9K_ANI_OFDM_TRIG_LOW 400
32#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900 27#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
33 28
34/* units are errors per second */
35#define ATH9K_ANI_CCK_TRIG_HIGH 600 29#define ATH9K_ANI_CCK_TRIG_HIGH 600
36
37/* units are errors per second */
38#define ATH9K_ANI_CCK_TRIG_LOW 300 30#define ATH9K_ANI_CCK_TRIG_LOW 300
39 31
40#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
41#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
42#define ATH9K_ANI_CCK_WEAK_SIG_THR false
43
44#define ATH9K_ANI_SPUR_IMMUNE_LVL 3 32#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
45
46#define ATH9K_ANI_FIRSTEP_LVL 2 33#define ATH9K_ANI_FIRSTEP_LVL 2
47 34
48#define ATH9K_ANI_RSSI_THR_HIGH 40 35#define ATH9K_ANI_RSSI_THR_HIGH 40
@@ -53,10 +40,6 @@
53/* in ms */ 40/* in ms */
54#define ATH9K_ANI_POLLINTERVAL 1000 41#define ATH9K_ANI_POLLINTERVAL 1000
55 42
56#define HAL_NOISE_IMMUNE_MAX 4
57#define HAL_SPUR_IMMUNE_MAX 7
58#define HAL_FIRST_STEP_MAX 2
59
60#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0 43#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0
61#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20 44#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20
62#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0 45#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
@@ -111,7 +94,7 @@ struct ar5416AniState {
111 u8 mrcCCK; 94 u8 mrcCCK;
112 u8 spurImmunityLevel; 95 u8 spurImmunityLevel;
113 u8 firstepLevel; 96 u8 firstepLevel;
114 u8 ofdmWeakSigDetect; 97 bool ofdmWeakSigDetect;
115 u32 listenTime; 98 u32 listenTime;
116 u32 ofdmPhyErrCount; 99 u32 ofdmPhyErrCount;
117 u32 cckPhyErrCount; 100 u32 cckPhyErrCount;
@@ -119,8 +102,6 @@ struct ar5416AniState {
119}; 102};
120 103
121struct ar5416Stats { 104struct ar5416Stats {
122 u32 ast_ani_niup;
123 u32 ast_ani_nidown;
124 u32 ast_ani_spurup; 105 u32 ast_ani_spurup;
125 u32 ast_ani_spurdown; 106 u32 ast_ani_spurdown;
126 u32 ast_ani_ofdmon; 107 u32 ast_ani_ofdmon;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 391da5ad6a99..d1acfe98918a 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -931,7 +931,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
931{ 931{
932 struct ath_common *common = ath9k_hw_common(ah); 932 struct ath_common *common = ath9k_hw_common(ah);
933 struct ath9k_channel *chan = ah->curchan; 933 struct ath9k_channel *chan = ah->curchan;
934 struct ar5416AniState *aniState = &chan->ani; 934 struct ar5416AniState *aniState = &ah->ani;
935 s32 value, value2; 935 s32 value, value2;
936 936
937 switch (cmd & ah->ani_function) { 937 switch (cmd & ah->ani_function) {
@@ -1207,7 +1207,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1207{ 1207{
1208 struct ath_common *common = ath9k_hw_common(ah); 1208 struct ath_common *common = ath9k_hw_common(ah);
1209 struct ath9k_channel *chan = ah->curchan; 1209 struct ath9k_channel *chan = ah->curchan;
1210 struct ar5416AniState *aniState = &chan->ani; 1210 struct ar5416AniState *aniState = &ah->ani;
1211 struct ath9k_ani_default *iniDef; 1211 struct ath9k_ani_default *iniDef;
1212 u32 val; 1212 u32 val;
1213 1213
@@ -1251,7 +1251,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1251 /* these levels just got reset to defaults by the INI */ 1251 /* these levels just got reset to defaults by the INI */
1252 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; 1252 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
1253 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; 1253 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
1254 aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; 1254 aniState->ofdmWeakSigDetect = true;
1255 aniState->mrcCCK = false; /* not available on pre AR9003 */ 1255 aniState->mrcCCK = false; /* not available on pre AR9003 */
1256} 1256}
1257 1257
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 830daa12feb6..8dc2d089cdef 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -38,10 +38,6 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
38 else 38 else
39 INIT_INI_ARRAY(&ah->iniPcieSerdes, 39 INIT_INI_ARRAY(&ah->iniPcieSerdes,
40 ar9280PciePhy_clkreq_always_on_L1_9280); 40 ar9280PciePhy_clkreq_always_on_L1_9280);
41#ifdef CONFIG_PM_SLEEP
42 INIT_INI_ARRAY(&ah->iniPcieSerdesWow,
43 ar9280PciePhy_awow);
44#endif
45 41
46 if (AR_SREV_9287_11_OR_LATER(ah)) { 42 if (AR_SREV_9287_11_OR_LATER(ah)) {
47 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1); 43 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index beb6162cf97c..4d18c66a6790 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -925,20 +925,6 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
925 {0x00004044, 0x00000000}, 925 {0x00004044, 0x00000000},
926}; 926};
927 927
928static const u32 ar9280PciePhy_awow[][2] = {
929 /* Addr allmodes */
930 {0x00004040, 0x9248fd00},
931 {0x00004040, 0x24924924},
932 {0x00004040, 0xa8000019},
933 {0x00004040, 0x13160820},
934 {0x00004040, 0xe5980560},
935 {0x00004040, 0xc01dcffd},
936 {0x00004040, 0x1aaabe41},
937 {0x00004040, 0xbe105554},
938 {0x00004040, 0x00043007},
939 {0x00004044, 0x00000000},
940};
941
942static const u32 ar9285Modes_9285_1_2[][5] = { 928static const u32 ar9285Modes_9285_1_2[][5] = {
943 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 929 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
944 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, 930 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index e6b92ff265fd..d105e43d22e1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3563,14 +3563,24 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3563{ 3563{
3564 struct ath9k_hw_capabilities *pCap = &ah->caps; 3564 struct ath9k_hw_capabilities *pCap = &ah->caps;
3565 int chain; 3565 int chain;
3566 u32 regval; 3566 u32 regval, value, gpio;
3567 static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = { 3567 static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
3568 AR_PHY_SWITCH_CHAIN_0, 3568 AR_PHY_SWITCH_CHAIN_0,
3569 AR_PHY_SWITCH_CHAIN_1, 3569 AR_PHY_SWITCH_CHAIN_1,
3570 AR_PHY_SWITCH_CHAIN_2, 3570 AR_PHY_SWITCH_CHAIN_2,
3571 }; 3571 };
3572 3572
3573 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); 3573 if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) {
3574 if (ah->config.xlna_gpio)
3575 gpio = ah->config.xlna_gpio;
3576 else
3577 gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
3578
3579 ath9k_hw_cfg_output(ah, gpio,
3580 AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
3581 }
3582
3583 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
3574 3584
3575 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 3585 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3576 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, 3586 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
@@ -3596,7 +3606,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3596 * 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE 3606 * 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE
3597 * SWITCH_TABLE_COM_SPDT_WLAN_IDLE 3607 * SWITCH_TABLE_COM_SPDT_WLAN_IDLE
3598 */ 3608 */
3599 if (AR_SREV_9462_20(ah) || AR_SREV_9565(ah)) { 3609 if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
3600 value = ar9003_switch_com_spdt_get(ah, is2ghz); 3610 value = ar9003_switch_com_spdt_get(ah, is2ghz);
3601 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, 3611 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
3602 AR_SWITCH_TABLE_COM_SPDT_ALL, value); 3612 AR_SWITCH_TABLE_COM_SPDT_ALL, value);
@@ -3796,7 +3806,13 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
3796 REG_RMW_FIELD(ah, ext_atten_reg[i], 3806 REG_RMW_FIELD(ah, ext_atten_reg[i],
3797 AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value); 3807 AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
3798 3808
3799 value = ar9003_hw_atten_chain_get_margin(ah, i, chan); 3809 if (AR_SREV_9485(ah) &&
3810 (ar9003_hw_get_rx_gain_idx(ah) == 0) &&
3811 ah->config.xatten_margin_cfg)
3812 value = 5;
3813 else
3814 value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
3815
3800 REG_RMW_FIELD(ah, ext_atten_reg[i], 3816 REG_RMW_FIELD(ah, ext_atten_reg[i],
3801 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, 3817 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
3802 value); 3818 value);
@@ -4043,8 +4059,9 @@ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
4043{ 4059{
4044 u32 data, ko, kg; 4060 u32 data, ko, kg;
4045 4061
4046 if (!AR_SREV_9462_20(ah)) 4062 if (!AR_SREV_9462_20_OR_LATER(ah))
4047 return; 4063 return;
4064
4048 ar9300_otp_read_word(ah, 1, &data); 4065 ar9300_otp_read_word(ah, 1, &data);
4049 ko = data & 0xff; 4066 ko = data & 0xff;
4050 kg = (data >> 8) & 0xff; 4067 kg = (data >> 8) & 0xff;
@@ -4546,7 +4563,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
4546 is2GHz); 4563 is2GHz);
4547 4564
4548 for (i = 0; i < ar9300RateSize; i++) { 4565 for (i = 0; i < ar9300RateSize; i++) {
4549 ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", 4566 ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
4550 i, targetPowerValT2[i]); 4567 i, targetPowerValT2[i]);
4551 } 4568 }
4552} 4569}
@@ -4736,7 +4753,7 @@ tempslope:
4736 AR_PHY_TPC_19_ALPHA_THERM, temp_slope); 4753 AR_PHY_TPC_19_ALPHA_THERM, temp_slope);
4737 } 4754 }
4738 4755
4739 if (AR_SREV_9462_20(ah)) 4756 if (AR_SREV_9462_20_OR_LATER(ah))
4740 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, 4757 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
4741 AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope); 4758 AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope);
4742 4759
@@ -5272,7 +5289,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
5272 return; 5289 return;
5273 5290
5274 for (i = 0; i < ar9300RateSize; i++) { 5291 for (i = 0; i < ar9300RateSize; i++) {
5275 ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", 5292 ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
5276 i, targetPowerValT2[i]); 5293 i, targetPowerValT2[i]);
5277 } 5294 }
5278 5295
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index a3523c969a3a..d402cb32283f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -24,6 +24,7 @@
24#include "ar955x_1p0_initvals.h" 24#include "ar955x_1p0_initvals.h"
25#include "ar9580_1p0_initvals.h" 25#include "ar9580_1p0_initvals.h"
26#include "ar9462_2p0_initvals.h" 26#include "ar9462_2p0_initvals.h"
27#include "ar9462_2p1_initvals.h"
27#include "ar9565_1p0_initvals.h" 28#include "ar9565_1p0_initvals.h"
28 29
29/* General hardware code for the AR9003 hadware family */ 30/* General hardware code for the AR9003 hadware family */
@@ -197,6 +198,31 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
197 198
198 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 199 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
199 ar9485_1_1_pcie_phy_clkreq_disable_L1); 200 ar9485_1_1_pcie_phy_clkreq_disable_L1);
201 } else if (AR_SREV_9462_21(ah)) {
202 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
203 ar9462_2p1_mac_core);
204 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
205 ar9462_2p1_mac_postamble);
206 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
207 ar9462_2p1_baseband_core);
208 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
209 ar9462_2p1_baseband_postamble);
210 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
211 ar9462_2p1_radio_core);
212 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
213 ar9462_2p1_radio_postamble);
214 INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
215 ar9462_2p1_radio_postamble_sys2ant);
216 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
217 ar9462_2p1_soc_preamble);
218 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
219 ar9462_2p1_soc_postamble);
220 INIT_INI_ARRAY(&ah->iniModesRxGain,
221 ar9462_2p1_common_rx_gain);
222 INIT_INI_ARRAY(&ah->iniModesFastClock,
223 ar9462_2p1_modes_fast_clock);
224 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
225 ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
200 } else if (AR_SREV_9462_20(ah)) { 226 } else if (AR_SREV_9462_20(ah)) {
201 227
202 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core); 228 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -407,6 +433,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
407 else if (AR_SREV_9580(ah)) 433 else if (AR_SREV_9580(ah))
408 INIT_INI_ARRAY(&ah->iniModesTxGain, 434 INIT_INI_ARRAY(&ah->iniModesTxGain,
409 ar9580_1p0_lowest_ob_db_tx_gain_table); 435 ar9580_1p0_lowest_ob_db_tx_gain_table);
436 else if (AR_SREV_9462_21(ah))
437 INIT_INI_ARRAY(&ah->iniModesTxGain,
438 ar9462_2p1_modes_low_ob_db_tx_gain);
410 else if (AR_SREV_9462_20(ah)) 439 else if (AR_SREV_9462_20(ah))
411 INIT_INI_ARRAY(&ah->iniModesTxGain, 440 INIT_INI_ARRAY(&ah->iniModesTxGain,
412 ar9462_modes_low_ob_db_tx_gain_table_2p0); 441 ar9462_modes_low_ob_db_tx_gain_table_2p0);
@@ -438,6 +467,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
438 else if (AR_SREV_9550(ah)) 467 else if (AR_SREV_9550(ah))
439 INIT_INI_ARRAY(&ah->iniModesTxGain, 468 INIT_INI_ARRAY(&ah->iniModesTxGain,
440 ar955x_1p0_modes_no_xpa_tx_gain_table); 469 ar955x_1p0_modes_no_xpa_tx_gain_table);
470 else if (AR_SREV_9462_21(ah))
471 INIT_INI_ARRAY(&ah->iniModesTxGain,
472 ar9462_2p1_modes_high_ob_db_tx_gain);
441 else if (AR_SREV_9462_20(ah)) 473 else if (AR_SREV_9462_20(ah))
442 INIT_INI_ARRAY(&ah->iniModesTxGain, 474 INIT_INI_ARRAY(&ah->iniModesTxGain,
443 ar9462_modes_high_ob_db_tx_gain_table_2p0); 475 ar9462_modes_high_ob_db_tx_gain_table_2p0);
@@ -507,6 +539,12 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
507 else if (AR_SREV_9580(ah)) 539 else if (AR_SREV_9580(ah))
508 INIT_INI_ARRAY(&ah->iniModesTxGain, 540 INIT_INI_ARRAY(&ah->iniModesTxGain,
509 ar9580_1p0_mixed_ob_db_tx_gain_table); 541 ar9580_1p0_mixed_ob_db_tx_gain_table);
542 else if (AR_SREV_9462_21(ah))
543 INIT_INI_ARRAY(&ah->iniModesTxGain,
544 ar9462_2p1_modes_mix_ob_db_tx_gain);
545 else if (AR_SREV_9462_20(ah))
546 INIT_INI_ARRAY(&ah->iniModesTxGain,
547 ar9462_modes_mix_ob_db_tx_gain_table_2p0);
510 else 548 else
511 INIT_INI_ARRAY(&ah->iniModesTxGain, 549 INIT_INI_ARRAY(&ah->iniModesTxGain,
512 ar9300Modes_mixed_ob_db_tx_gain_table_2p2); 550 ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
@@ -584,6 +622,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
584 } else if (AR_SREV_9580(ah)) 622 } else if (AR_SREV_9580(ah))
585 INIT_INI_ARRAY(&ah->iniModesRxGain, 623 INIT_INI_ARRAY(&ah->iniModesRxGain,
586 ar9580_1p0_rx_gain_table); 624 ar9580_1p0_rx_gain_table);
625 else if (AR_SREV_9462_21(ah))
626 INIT_INI_ARRAY(&ah->iniModesRxGain,
627 ar9462_2p1_common_rx_gain);
587 else if (AR_SREV_9462_20(ah)) 628 else if (AR_SREV_9462_20(ah))
588 INIT_INI_ARRAY(&ah->iniModesRxGain, 629 INIT_INI_ARRAY(&ah->iniModesRxGain,
589 ar9462_common_rx_gain_table_2p0); 630 ar9462_common_rx_gain_table_2p0);
@@ -606,6 +647,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
606 else if (AR_SREV_9485_11(ah)) 647 else if (AR_SREV_9485_11(ah))
607 INIT_INI_ARRAY(&ah->iniModesRxGain, 648 INIT_INI_ARRAY(&ah->iniModesRxGain,
608 ar9485Common_wo_xlna_rx_gain_1_1); 649 ar9485Common_wo_xlna_rx_gain_1_1);
650 else if (AR_SREV_9462_21(ah))
651 INIT_INI_ARRAY(&ah->iniModesRxGain,
652 ar9462_2p1_common_wo_xlna_rx_gain);
609 else if (AR_SREV_9462_20(ah)) 653 else if (AR_SREV_9462_20(ah))
610 INIT_INI_ARRAY(&ah->iniModesRxGain, 654 INIT_INI_ARRAY(&ah->iniModesRxGain,
611 ar9462_common_wo_xlna_rx_gain_table_2p0); 655 ar9462_common_wo_xlna_rx_gain_table_2p0);
@@ -627,9 +671,40 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
627 671
628static void ar9003_rx_gain_table_mode2(struct ath_hw *ah) 672static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
629{ 673{
630 if (AR_SREV_9462_20(ah)) 674 if (AR_SREV_9462_21(ah)) {
675 INIT_INI_ARRAY(&ah->iniModesRxGain,
676 ar9462_2p1_common_mixed_rx_gain);
677 INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
678 ar9462_2p1_baseband_core_mix_rxgain);
679 INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
680 ar9462_2p1_baseband_postamble_mix_rxgain);
681 INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
682 ar9462_2p1_baseband_postamble_5g_xlna);
683 } else if (AR_SREV_9462_20(ah)) {
631 INIT_INI_ARRAY(&ah->iniModesRxGain, 684 INIT_INI_ARRAY(&ah->iniModesRxGain,
632 ar9462_common_mixed_rx_gain_table_2p0); 685 ar9462_common_mixed_rx_gain_table_2p0);
686 INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
687 ar9462_2p0_baseband_core_mix_rxgain);
688 INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
689 ar9462_2p0_baseband_postamble_mix_rxgain);
690 INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
691 ar9462_2p0_baseband_postamble_5g_xlna);
692 }
693}
694
695static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
696{
697 if (AR_SREV_9462_21(ah)) {
698 INIT_INI_ARRAY(&ah->iniModesRxGain,
699 ar9462_2p1_common_5g_xlna_only_rx_gain);
700 INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
701 ar9462_2p1_baseband_postamble_5g_xlna);
702 } else if (AR_SREV_9462_20(ah)) {
703 INIT_INI_ARRAY(&ah->iniModesRxGain,
704 ar9462_2p0_5g_xlna_only_rxgain);
705 INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
706 ar9462_2p0_baseband_postamble_5g_xlna);
707 }
633} 708}
634 709
635static void ar9003_rx_gain_table_apply(struct ath_hw *ah) 710static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
@@ -645,6 +720,9 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
645 case 2: 720 case 2:
646 ar9003_rx_gain_table_mode2(ah); 721 ar9003_rx_gain_table_mode2(ah);
647 break; 722 break;
723 case 3:
724 ar9003_rx_gain_table_mode3(ah);
725 break;
648 } 726 }
649} 727}
650 728
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 301bf72c53bf..5163abd3937c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -469,6 +469,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
469 469
470 rxs->rs_status = 0; 470 rxs->rs_status = 0;
471 rxs->rs_flags = 0; 471 rxs->rs_flags = 0;
472 rxs->flag = 0;
472 473
473 rxs->rs_datalen = rxsp->status2 & AR_DataLen; 474 rxs->rs_datalen = rxsp->status2 & AR_DataLen;
474 rxs->rs_tstamp = rxsp->status3; 475 rxs->rs_tstamp = rxsp->status3;
@@ -493,8 +494,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
493 rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0; 494 rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
494 rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; 495 rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
495 rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); 496 rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
496 rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0; 497 rxs->flag |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
497 rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0; 498 rxs->flag |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0;
498 499
499 rxs->evm0 = rxsp->status6; 500 rxs->evm0 = rxsp->status6;
500 rxs->evm1 = rxsp->status7; 501 rxs->evm1 = rxsp->status7;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 09c1f9da67a0..6343cc91953e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -454,6 +454,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
454 if (accum_cnt <= thresh_accum_cnt) 454 if (accum_cnt <= thresh_accum_cnt)
455 continue; 455 continue;
456 456
457 max_index++;
458
457 /* sum(tx amplitude) */ 459 /* sum(tx amplitude) */
458 accum_tx = ((data_L[i] >> 16) & 0xffff) | 460 accum_tx = ((data_L[i] >> 16) & 0xffff) |
459 ((data_U[i] & 0x7ff) << 16); 461 ((data_U[i] & 0x7ff) << 16);
@@ -468,20 +470,21 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
468 470
469 accum_tx <<= scale_factor; 471 accum_tx <<= scale_factor;
470 accum_rx <<= scale_factor; 472 accum_rx <<= scale_factor;
471 x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >> 473 x_est[max_index] =
472 scale_factor; 474 (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
475 scale_factor;
473 476
474 Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >> 477 Y[max_index] =
478 ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
475 scale_factor) + 479 scale_factor) +
476 (1 << scale_factor) * max_index + 16; 480 (1 << scale_factor) * i + 16;
477 481
478 if (accum_ang >= (1 << 26)) 482 if (accum_ang >= (1 << 26))
479 accum_ang -= 1 << 27; 483 accum_ang -= 1 << 27;
480 484
481 theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) / 485 theta[max_index] =
482 accum_cnt; 486 ((accum_ang * (1 << scale_factor)) + accum_cnt) /
483 487 accum_cnt;
484 max_index++;
485 } 488 }
486 489
487 /* 490 /*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e1714d7c9eeb..1f694ab3cc78 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -735,22 +735,53 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
735 return -EINVAL; 735 return -EINVAL;
736 } 736 }
737 737
738 /*
739 * SOC, MAC, BB, RADIO initvals.
740 */
738 for (i = 0; i < ATH_INI_NUM_SPLIT; i++) { 741 for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
739 ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex); 742 ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
740 ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex); 743 ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
741 ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex); 744 ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
742 ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex); 745 ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
743 if (i == ATH_INI_POST && AR_SREV_9462_20(ah)) 746 if (i == ATH_INI_POST && AR_SREV_9462_20_OR_LATER(ah))
744 ar9003_hw_prog_ini(ah, 747 ar9003_hw_prog_ini(ah,
745 &ah->ini_radio_post_sys2ant, 748 &ah->ini_radio_post_sys2ant,
746 modesIndex); 749 modesIndex);
747 } 750 }
748 751
752 /*
753 * RXGAIN initvals.
754 */
749 REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites); 755 REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
756
757 if (AR_SREV_9462_20_OR_LATER(ah)) {
758 /*
759 * CUS217 mix LNA mode.
760 */
761 if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
762 REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
763 1, regWrites);
764 REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
765 modesIndex, regWrites);
766 }
767
768 /*
769 * 5G-XLNA
770 */
771 if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
772 (ar9003_hw_get_rx_gain_idx(ah) == 3)) {
773 REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
774 modesIndex, regWrites);
775 }
776 }
777
750 if (AR_SREV_9550(ah)) 778 if (AR_SREV_9550(ah))
751 REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex, 779 REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
752 regWrites); 780 regWrites);
753 781
782 /*
783 * TXGAIN initvals.
784 */
754 if (AR_SREV_9550(ah)) { 785 if (AR_SREV_9550(ah)) {
755 int modes_txgain_index; 786 int modes_txgain_index;
756 787
@@ -772,8 +803,14 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
772 REG_WRITE_ARRAY(&ah->iniModesFastClock, 803 REG_WRITE_ARRAY(&ah->iniModesFastClock,
773 modesIndex, regWrites); 804 modesIndex, regWrites);
774 805
806 /*
807 * Clock frequency initvals.
808 */
775 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); 809 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
776 810
811 /*
812 * JAPAN regulatory.
813 */
777 if (chan->channel == 2484) 814 if (chan->channel == 2484)
778 ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1); 815 ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
779 816
@@ -905,7 +942,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
905{ 942{
906 struct ath_common *common = ath9k_hw_common(ah); 943 struct ath_common *common = ath9k_hw_common(ah);
907 struct ath9k_channel *chan = ah->curchan; 944 struct ath9k_channel *chan = ah->curchan;
908 struct ar5416AniState *aniState = &chan->ani; 945 struct ar5416AniState *aniState = &ah->ani;
946 int m1ThreshLow, m2ThreshLow;
947 int m1Thresh, m2Thresh;
948 int m2CountThr, m2CountThrLow;
949 int m1ThreshLowExt, m2ThreshLowExt;
950 int m1ThreshExt, m2ThreshExt;
909 s32 value, value2; 951 s32 value, value2;
910 952
911 switch (cmd & ah->ani_function) { 953 switch (cmd & ah->ani_function) {
@@ -919,6 +961,61 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
919 */ 961 */
920 u32 on = param ? 1 : 0; 962 u32 on = param ? 1 : 0;
921 963
964 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
965 goto skip_ws_det;
966
967 m1ThreshLow = on ?
968 aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
969 m2ThreshLow = on ?
970 aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
971 m1Thresh = on ?
972 aniState->iniDef.m1Thresh : m1Thresh_off;
973 m2Thresh = on ?
974 aniState->iniDef.m2Thresh : m2Thresh_off;
975 m2CountThr = on ?
976 aniState->iniDef.m2CountThr : m2CountThr_off;
977 m2CountThrLow = on ?
978 aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
979 m1ThreshLowExt = on ?
980 aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
981 m2ThreshLowExt = on ?
982 aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
983 m1ThreshExt = on ?
984 aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
985 m2ThreshExt = on ?
986 aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
987
988 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
989 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
990 m1ThreshLow);
991 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
992 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
993 m2ThreshLow);
994 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
995 AR_PHY_SFCORR_M1_THRESH,
996 m1Thresh);
997 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
998 AR_PHY_SFCORR_M2_THRESH,
999 m2Thresh);
1000 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1001 AR_PHY_SFCORR_M2COUNT_THR,
1002 m2CountThr);
1003 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1004 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
1005 m2CountThrLow);
1006 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1007 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
1008 m1ThreshLowExt);
1009 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1010 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
1011 m2ThreshLowExt);
1012 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1013 AR_PHY_SFCORR_EXT_M1_THRESH,
1014 m1ThreshExt);
1015 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1016 AR_PHY_SFCORR_EXT_M2_THRESH,
1017 m2ThreshExt);
1018skip_ws_det:
922 if (on) 1019 if (on)
923 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, 1020 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
924 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 1021 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
@@ -1173,7 +1270,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1173 struct ath9k_ani_default *iniDef; 1270 struct ath9k_ani_default *iniDef;
1174 u32 val; 1271 u32 val;
1175 1272
1176 aniState = &ah->curchan->ani; 1273 aniState = &ah->ani;
1177 iniDef = &aniState->iniDef; 1274 iniDef = &aniState->iniDef;
1178 1275
1179 ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", 1276 ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
@@ -1214,7 +1311,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1214 /* these levels just got reset to defaults by the INI */ 1311 /* these levels just got reset to defaults by the INI */
1215 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; 1312 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
1216 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; 1313 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
1217 aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; 1314 aniState->ofdmWeakSigDetect = true;
1218 aniState->mrcCCK = true; 1315 aniState->mrcCCK = true;
1219} 1316}
1220 1317
@@ -1415,7 +1512,7 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1415 ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex); 1512 ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
1416 ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex); 1513 ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
1417 1514
1418 if (AR_SREV_9462_20(ah)) 1515 if (AR_SREV_9462_20_OR_LATER(ah))
1419 ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant, 1516 ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
1420 modesIndex); 1517 modesIndex);
1421 1518
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index e71774196c01..d4d39f305a0b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -351,6 +351,8 @@
351 351
352#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118 352#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
353 353
354#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
355
354/* 356/*
355 * AGC Field Definitions 357 * AGC Field Definitions
356 */ 358 */
@@ -952,7 +954,7 @@
952#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208) 954#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
953#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c) 955#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
954#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) 956#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
955#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9462(ah) ? \ 957#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(ah) ? \
956 0x280 : 0x240)) 958 0x280 : 0x240))
957#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240) 959#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240)
958#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff 960#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff
@@ -1046,7 +1048,7 @@
1046#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE) 1048#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
1047#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44) 1049#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
1048#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \ 1050#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
1049 (AR_SREV_9462_20(_ah) ? 0x4c : 0x50)) 1051 (AR_SREV_9462_20_OR_LATER(_ah) ? 0x4c : 0x50))
1050#define AR_GLB_STATUS (AR_GLB_BASE + 0x48) 1052#define AR_GLB_STATUS (AR_GLB_BASE + 0x48)
1051 1053
1052/* 1054/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 999ab08c34e6..092b9d412e7f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -78,7 +78,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
78 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, 78 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
79 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 79 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
80 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 80 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
81 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, 81 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
82 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, 82 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
83 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 83 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
84 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 84 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -879,6 +879,69 @@ static const u32 ar9462_2p0_radio_postamble[][5] = {
879 {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000}, 879 {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
880}; 880};
881 881
882static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
883 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
884 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
885 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
886 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
887 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
888 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
889 {0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
890 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
891 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
892 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
893 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
894 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
895 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
896 {0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
897 {0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
898 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
899 {0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
900 {0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
901 {0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
902 {0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
903 {0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
904 {0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
905 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
906 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
907 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
908 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
909 {0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
910 {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
911 {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
912 {0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
913 {0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
914 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
915 {0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
916 {0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
917 {0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
918 {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
919 {0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
920 {0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
921 {0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
922 {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
923 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
924 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
925 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
926 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
927 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
928 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
929 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
930 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
931 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
932 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
933 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
934 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
935 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
936 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
937 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
938 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
939 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
940 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
941 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
942 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
943};
944
882static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = { 945static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
883 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 946 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
884 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, 947 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
@@ -1449,4 +1512,284 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
1449 {0x0000b1fc, 0x00000196}, 1512 {0x0000b1fc, 0x00000196},
1450}; 1513};
1451 1514
1515static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = {
1516 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1517 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
1518};
1519
1520static const u32 ar9462_2p0_5g_xlna_only_rxgain[][2] = {
1521 /* Addr allmodes */
1522 {0x0000a000, 0x00010000},
1523 {0x0000a004, 0x00030002},
1524 {0x0000a008, 0x00050004},
1525 {0x0000a00c, 0x00810080},
1526 {0x0000a010, 0x00830082},
1527 {0x0000a014, 0x01810180},
1528 {0x0000a018, 0x01830182},
1529 {0x0000a01c, 0x01850184},
1530 {0x0000a020, 0x01890188},
1531 {0x0000a024, 0x018b018a},
1532 {0x0000a028, 0x018d018c},
1533 {0x0000a02c, 0x03820190},
1534 {0x0000a030, 0x03840383},
1535 {0x0000a034, 0x03880385},
1536 {0x0000a038, 0x038a0389},
1537 {0x0000a03c, 0x038c038b},
1538 {0x0000a040, 0x0390038d},
1539 {0x0000a044, 0x03920391},
1540 {0x0000a048, 0x03940393},
1541 {0x0000a04c, 0x03960395},
1542 {0x0000a050, 0x00000000},
1543 {0x0000a054, 0x00000000},
1544 {0x0000a058, 0x00000000},
1545 {0x0000a05c, 0x00000000},
1546 {0x0000a060, 0x00000000},
1547 {0x0000a064, 0x00000000},
1548 {0x0000a068, 0x00000000},
1549 {0x0000a06c, 0x00000000},
1550 {0x0000a070, 0x00000000},
1551 {0x0000a074, 0x00000000},
1552 {0x0000a078, 0x00000000},
1553 {0x0000a07c, 0x00000000},
1554 {0x0000a080, 0x29292929},
1555 {0x0000a084, 0x29292929},
1556 {0x0000a088, 0x29292929},
1557 {0x0000a08c, 0x29292929},
1558 {0x0000a090, 0x22292929},
1559 {0x0000a094, 0x1d1d2222},
1560 {0x0000a098, 0x0c111117},
1561 {0x0000a09c, 0x00030303},
1562 {0x0000a0a0, 0x00000000},
1563 {0x0000a0a4, 0x00000000},
1564 {0x0000a0a8, 0x00000000},
1565 {0x0000a0ac, 0x00000000},
1566 {0x0000a0b0, 0x00000000},
1567 {0x0000a0b4, 0x00000000},
1568 {0x0000a0b8, 0x00000000},
1569 {0x0000a0bc, 0x00000000},
1570 {0x0000a0c0, 0x001f0000},
1571 {0x0000a0c4, 0x01000101},
1572 {0x0000a0c8, 0x011e011f},
1573 {0x0000a0cc, 0x011c011d},
1574 {0x0000a0d0, 0x02030204},
1575 {0x0000a0d4, 0x02010202},
1576 {0x0000a0d8, 0x021f0200},
1577 {0x0000a0dc, 0x0302021e},
1578 {0x0000a0e0, 0x03000301},
1579 {0x0000a0e4, 0x031e031f},
1580 {0x0000a0e8, 0x0402031d},
1581 {0x0000a0ec, 0x04000401},
1582 {0x0000a0f0, 0x041e041f},
1583 {0x0000a0f4, 0x0502041d},
1584 {0x0000a0f8, 0x05000501},
1585 {0x0000a0fc, 0x051e051f},
1586 {0x0000a100, 0x06010602},
1587 {0x0000a104, 0x061f0600},
1588 {0x0000a108, 0x061d061e},
1589 {0x0000a10c, 0x07020703},
1590 {0x0000a110, 0x07000701},
1591 {0x0000a114, 0x00000000},
1592 {0x0000a118, 0x00000000},
1593 {0x0000a11c, 0x00000000},
1594 {0x0000a120, 0x00000000},
1595 {0x0000a124, 0x00000000},
1596 {0x0000a128, 0x00000000},
1597 {0x0000a12c, 0x00000000},
1598 {0x0000a130, 0x00000000},
1599 {0x0000a134, 0x00000000},
1600 {0x0000a138, 0x00000000},
1601 {0x0000a13c, 0x00000000},
1602 {0x0000a140, 0x001f0000},
1603 {0x0000a144, 0x01000101},
1604 {0x0000a148, 0x011e011f},
1605 {0x0000a14c, 0x011c011d},
1606 {0x0000a150, 0x02030204},
1607 {0x0000a154, 0x02010202},
1608 {0x0000a158, 0x021f0200},
1609 {0x0000a15c, 0x0302021e},
1610 {0x0000a160, 0x03000301},
1611 {0x0000a164, 0x031e031f},
1612 {0x0000a168, 0x0402031d},
1613 {0x0000a16c, 0x04000401},
1614 {0x0000a170, 0x041e041f},
1615 {0x0000a174, 0x0502041d},
1616 {0x0000a178, 0x05000501},
1617 {0x0000a17c, 0x051e051f},
1618 {0x0000a180, 0x06010602},
1619 {0x0000a184, 0x061f0600},
1620 {0x0000a188, 0x061d061e},
1621 {0x0000a18c, 0x07020703},
1622 {0x0000a190, 0x07000701},
1623 {0x0000a194, 0x00000000},
1624 {0x0000a198, 0x00000000},
1625 {0x0000a19c, 0x00000000},
1626 {0x0000a1a0, 0x00000000},
1627 {0x0000a1a4, 0x00000000},
1628 {0x0000a1a8, 0x00000000},
1629 {0x0000a1ac, 0x00000000},
1630 {0x0000a1b0, 0x00000000},
1631 {0x0000a1b4, 0x00000000},
1632 {0x0000a1b8, 0x00000000},
1633 {0x0000a1bc, 0x00000000},
1634 {0x0000a1c0, 0x00000000},
1635 {0x0000a1c4, 0x00000000},
1636 {0x0000a1c8, 0x00000000},
1637 {0x0000a1cc, 0x00000000},
1638 {0x0000a1d0, 0x00000000},
1639 {0x0000a1d4, 0x00000000},
1640 {0x0000a1d8, 0x00000000},
1641 {0x0000a1dc, 0x00000000},
1642 {0x0000a1e0, 0x00000000},
1643 {0x0000a1e4, 0x00000000},
1644 {0x0000a1e8, 0x00000000},
1645 {0x0000a1ec, 0x00000000},
1646 {0x0000a1f0, 0x00000396},
1647 {0x0000a1f4, 0x00000396},
1648 {0x0000a1f8, 0x00000396},
1649 {0x0000a1fc, 0x00000196},
1650 {0x0000b000, 0x00010000},
1651 {0x0000b004, 0x00030002},
1652 {0x0000b008, 0x00050004},
1653 {0x0000b00c, 0x00810080},
1654 {0x0000b010, 0x00830082},
1655 {0x0000b014, 0x01810180},
1656 {0x0000b018, 0x01830182},
1657 {0x0000b01c, 0x01850184},
1658 {0x0000b020, 0x02810280},
1659 {0x0000b024, 0x02830282},
1660 {0x0000b028, 0x02850284},
1661 {0x0000b02c, 0x02890288},
1662 {0x0000b030, 0x028b028a},
1663 {0x0000b034, 0x0388028c},
1664 {0x0000b038, 0x038a0389},
1665 {0x0000b03c, 0x038c038b},
1666 {0x0000b040, 0x0390038d},
1667 {0x0000b044, 0x03920391},
1668 {0x0000b048, 0x03940393},
1669 {0x0000b04c, 0x03960395},
1670 {0x0000b050, 0x00000000},
1671 {0x0000b054, 0x00000000},
1672 {0x0000b058, 0x00000000},
1673 {0x0000b05c, 0x00000000},
1674 {0x0000b060, 0x00000000},
1675 {0x0000b064, 0x00000000},
1676 {0x0000b068, 0x00000000},
1677 {0x0000b06c, 0x00000000},
1678 {0x0000b070, 0x00000000},
1679 {0x0000b074, 0x00000000},
1680 {0x0000b078, 0x00000000},
1681 {0x0000b07c, 0x00000000},
1682 {0x0000b080, 0x2a2d2f32},
1683 {0x0000b084, 0x21232328},
1684 {0x0000b088, 0x19191c1e},
1685 {0x0000b08c, 0x12141417},
1686 {0x0000b090, 0x07070e0e},
1687 {0x0000b094, 0x03030305},
1688 {0x0000b098, 0x00000003},
1689 {0x0000b09c, 0x00000000},
1690 {0x0000b0a0, 0x00000000},
1691 {0x0000b0a4, 0x00000000},
1692 {0x0000b0a8, 0x00000000},
1693 {0x0000b0ac, 0x00000000},
1694 {0x0000b0b0, 0x00000000},
1695 {0x0000b0b4, 0x00000000},
1696 {0x0000b0b8, 0x00000000},
1697 {0x0000b0bc, 0x00000000},
1698 {0x0000b0c0, 0x003f0020},
1699 {0x0000b0c4, 0x00400041},
1700 {0x0000b0c8, 0x0140005f},
1701 {0x0000b0cc, 0x0160015f},
1702 {0x0000b0d0, 0x017e017f},
1703 {0x0000b0d4, 0x02410242},
1704 {0x0000b0d8, 0x025f0240},
1705 {0x0000b0dc, 0x027f0260},
1706 {0x0000b0e0, 0x0341027e},
1707 {0x0000b0e4, 0x035f0340},
1708 {0x0000b0e8, 0x037f0360},
1709 {0x0000b0ec, 0x04400441},
1710 {0x0000b0f0, 0x0460045f},
1711 {0x0000b0f4, 0x0541047f},
1712 {0x0000b0f8, 0x055f0540},
1713 {0x0000b0fc, 0x057f0560},
1714 {0x0000b100, 0x06400641},
1715 {0x0000b104, 0x0660065f},
1716 {0x0000b108, 0x067e067f},
1717 {0x0000b10c, 0x07410742},
1718 {0x0000b110, 0x075f0740},
1719 {0x0000b114, 0x077f0760},
1720 {0x0000b118, 0x07800781},
1721 {0x0000b11c, 0x07a0079f},
1722 {0x0000b120, 0x07c107bf},
1723 {0x0000b124, 0x000007c0},
1724 {0x0000b128, 0x00000000},
1725 {0x0000b12c, 0x00000000},
1726 {0x0000b130, 0x00000000},
1727 {0x0000b134, 0x00000000},
1728 {0x0000b138, 0x00000000},
1729 {0x0000b13c, 0x00000000},
1730 {0x0000b140, 0x003f0020},
1731 {0x0000b144, 0x00400041},
1732 {0x0000b148, 0x0140005f},
1733 {0x0000b14c, 0x0160015f},
1734 {0x0000b150, 0x017e017f},
1735 {0x0000b154, 0x02410242},
1736 {0x0000b158, 0x025f0240},
1737 {0x0000b15c, 0x027f0260},
1738 {0x0000b160, 0x0341027e},
1739 {0x0000b164, 0x035f0340},
1740 {0x0000b168, 0x037f0360},
1741 {0x0000b16c, 0x04400441},
1742 {0x0000b170, 0x0460045f},
1743 {0x0000b174, 0x0541047f},
1744 {0x0000b178, 0x055f0540},
1745 {0x0000b17c, 0x057f0560},
1746 {0x0000b180, 0x06400641},
1747 {0x0000b184, 0x0660065f},
1748 {0x0000b188, 0x067e067f},
1749 {0x0000b18c, 0x07410742},
1750 {0x0000b190, 0x075f0740},
1751 {0x0000b194, 0x077f0760},
1752 {0x0000b198, 0x07800781},
1753 {0x0000b19c, 0x07a0079f},
1754 {0x0000b1a0, 0x07c107bf},
1755 {0x0000b1a4, 0x000007c0},
1756 {0x0000b1a8, 0x00000000},
1757 {0x0000b1ac, 0x00000000},
1758 {0x0000b1b0, 0x00000000},
1759 {0x0000b1b4, 0x00000000},
1760 {0x0000b1b8, 0x00000000},
1761 {0x0000b1bc, 0x00000000},
1762 {0x0000b1c0, 0x00000000},
1763 {0x0000b1c4, 0x00000000},
1764 {0x0000b1c8, 0x00000000},
1765 {0x0000b1cc, 0x00000000},
1766 {0x0000b1d0, 0x00000000},
1767 {0x0000b1d4, 0x00000000},
1768 {0x0000b1d8, 0x00000000},
1769 {0x0000b1dc, 0x00000000},
1770 {0x0000b1e0, 0x00000000},
1771 {0x0000b1e4, 0x00000000},
1772 {0x0000b1e8, 0x00000000},
1773 {0x0000b1ec, 0x00000000},
1774 {0x0000b1f0, 0x00000396},
1775 {0x0000b1f4, 0x00000396},
1776 {0x0000b1f8, 0x00000396},
1777 {0x0000b1fc, 0x00000196},
1778};
1779
1780static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = {
1781 /* Addr allmodes */
1782 {0x00009fd0, 0x0a2d6b93},
1783};
1784
1785static const u32 ar9462_2p0_baseband_postamble_mix_rxgain[][5] = {
1786 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1787 {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
1788 {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
1789 {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
1790 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
1791 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
1792 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
1793};
1794
1452#endif /* INITVALS_9462_2P0_H */ 1795#endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
new file mode 100644
index 000000000000..4dbc294df7e3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -0,0 +1,1774 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef INITVALS_9462_2P1_H
19#define INITVALS_9462_2P1_H
20
21/* AR9462 2.1 */
22
23static const u32 ar9462_2p1_mac_core[][2] = {
24 /* Addr allmodes */
25 {0x00000008, 0x00000000},
26 {0x00000030, 0x000e0085},
27 {0x00000034, 0x00000005},
28 {0x00000040, 0x00000000},
29 {0x00000044, 0x00000000},
30 {0x00000048, 0x00000008},
31 {0x0000004c, 0x00000010},
32 {0x00000050, 0x00000000},
33 {0x00001040, 0x002ffc0f},
34 {0x00001044, 0x002ffc0f},
35 {0x00001048, 0x002ffc0f},
36 {0x0000104c, 0x002ffc0f},
37 {0x00001050, 0x002ffc0f},
38 {0x00001054, 0x002ffc0f},
39 {0x00001058, 0x002ffc0f},
40 {0x0000105c, 0x002ffc0f},
41 {0x00001060, 0x002ffc0f},
42 {0x00001064, 0x002ffc0f},
43 {0x000010f0, 0x00000100},
44 {0x00001270, 0x00000000},
45 {0x000012b0, 0x00000000},
46 {0x000012f0, 0x00000000},
47 {0x0000143c, 0x00000000},
48 {0x0000147c, 0x00000000},
49 {0x00001810, 0x0f000003},
50 {0x00008000, 0x00000000},
51 {0x00008004, 0x00000000},
52 {0x00008008, 0x00000000},
53 {0x0000800c, 0x00000000},
54 {0x00008018, 0x00000000},
55 {0x00008020, 0x00000000},
56 {0x00008038, 0x00000000},
57 {0x0000803c, 0x00080000},
58 {0x00008040, 0x00000000},
59 {0x00008044, 0x00000000},
60 {0x00008048, 0x00000000},
61 {0x0000804c, 0xffffffff},
62 {0x00008054, 0x00000000},
63 {0x00008058, 0x00000000},
64 {0x0000805c, 0x000fc78f},
65 {0x00008060, 0x0000000f},
66 {0x00008064, 0x00000000},
67 {0x00008070, 0x00000310},
68 {0x00008074, 0x00000020},
69 {0x00008078, 0x00000000},
70 {0x0000809c, 0x0000000f},
71 {0x000080a0, 0x00000000},
72 {0x000080a4, 0x02ff0000},
73 {0x000080a8, 0x0e070605},
74 {0x000080ac, 0x0000000d},
75 {0x000080b0, 0x00000000},
76 {0x000080b4, 0x00000000},
77 {0x000080b8, 0x00000000},
78 {0x000080bc, 0x00000000},
79 {0x000080c0, 0x2a800000},
80 {0x000080c4, 0x06900168},
81 {0x000080c8, 0x13881c20},
82 {0x000080cc, 0x01f40000},
83 {0x000080d0, 0x00252500},
84 {0x000080d4, 0x00b00005},
85 {0x000080d8, 0x00400002},
86 {0x000080dc, 0x00000000},
87 {0x000080e0, 0xffffffff},
88 {0x000080e4, 0x0000ffff},
89 {0x000080e8, 0x3f3f3f3f},
90 {0x000080ec, 0x00000000},
91 {0x000080f0, 0x00000000},
92 {0x000080f4, 0x00000000},
93 {0x000080fc, 0x00020000},
94 {0x00008100, 0x00000000},
95 {0x00008108, 0x00000052},
96 {0x0000810c, 0x00000000},
97 {0x00008110, 0x00000000},
98 {0x00008114, 0x000007ff},
99 {0x00008118, 0x000000aa},
100 {0x0000811c, 0x00003210},
101 {0x00008124, 0x00000000},
102 {0x00008128, 0x00000000},
103 {0x0000812c, 0x00000000},
104 {0x00008130, 0x00000000},
105 {0x00008134, 0x00000000},
106 {0x00008138, 0x00000000},
107 {0x0000813c, 0x0000ffff},
108 {0x00008144, 0xffffffff},
109 {0x00008168, 0x00000000},
110 {0x0000816c, 0x00000000},
111 {0x00008170, 0x18486e00},
112 {0x00008174, 0x33332210},
113 {0x00008178, 0x00000000},
114 {0x0000817c, 0x00020000},
115 {0x000081c4, 0x33332210},
116 {0x000081c8, 0x00000000},
117 {0x000081cc, 0x00000000},
118 {0x000081d4, 0x00000000},
119 {0x000081ec, 0x00000000},
120 {0x000081f0, 0x00000000},
121 {0x000081f4, 0x00000000},
122 {0x000081f8, 0x00000000},
123 {0x000081fc, 0x00000000},
124 {0x00008240, 0x00100000},
125 {0x00008244, 0x0010f400},
126 {0x00008248, 0x00000800},
127 {0x0000824c, 0x0001e800},
128 {0x00008250, 0x00000000},
129 {0x00008254, 0x00000000},
130 {0x00008258, 0x00000000},
131 {0x0000825c, 0x40000000},
132 {0x00008260, 0x00080922},
133 {0x00008264, 0x99c00010},
134 {0x00008268, 0xffffffff},
135 {0x0000826c, 0x0000ffff},
136 {0x00008270, 0x00000000},
137 {0x00008274, 0x40000000},
138 {0x00008278, 0x003e4180},
139 {0x0000827c, 0x00000004},
140 {0x00008284, 0x0000002c},
141 {0x00008288, 0x0000002c},
142 {0x0000828c, 0x000000ff},
143 {0x00008294, 0x00000000},
144 {0x00008298, 0x00000000},
145 {0x0000829c, 0x00000000},
146 {0x00008300, 0x00000140},
147 {0x00008314, 0x00000000},
148 {0x0000831c, 0x0000010d},
149 {0x00008328, 0x00000000},
150 {0x0000832c, 0x0000001f},
151 {0x00008330, 0x00000302},
152 {0x00008334, 0x00000700},
153 {0x00008338, 0xffff0000},
154 {0x0000833c, 0x02400000},
155 {0x00008340, 0x000107ff},
156 {0x00008344, 0xaa48107b},
157 {0x00008348, 0x008f0000},
158 {0x0000835c, 0x00000000},
159 {0x00008360, 0xffffffff},
160 {0x00008364, 0xffffffff},
161 {0x00008368, 0x00000000},
162 {0x00008370, 0x00000000},
163 {0x00008374, 0x000000ff},
164 {0x00008378, 0x00000000},
165 {0x0000837c, 0x00000000},
166 {0x00008380, 0xffffffff},
167 {0x00008384, 0xffffffff},
168 {0x00008390, 0xffffffff},
169 {0x00008394, 0xffffffff},
170 {0x00008398, 0x00000000},
171 {0x0000839c, 0x00000000},
172 {0x000083a4, 0x0000fa14},
173 {0x000083a8, 0x000f0c00},
174 {0x000083ac, 0x33332210},
175 {0x000083b0, 0x33332210},
176 {0x000083b4, 0x33332210},
177 {0x000083b8, 0x33332210},
178 {0x000083bc, 0x00000000},
179 {0x000083c0, 0x00000000},
180 {0x000083c4, 0x00000000},
181 {0x000083c8, 0x00000000},
182 {0x000083cc, 0x00000200},
183 {0x000083d0, 0x000301ff},
184};
185
186static const u32 ar9462_2p1_mac_postamble[][5] = {
187 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
188 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
189 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
190 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
191 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
192 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
193 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
194 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
195 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
196};
197
198static const u32 ar9462_2p1_baseband_core[][2] = {
199 /* Addr allmodes */
200 {0x00009800, 0xafe68e30},
201 {0x00009804, 0xfd14e000},
202 {0x00009808, 0x9c0a9f6b},
203 {0x0000980c, 0x04900000},
204 {0x00009814, 0x9280c00a},
205 {0x00009818, 0x00000000},
206 {0x0000981c, 0x00020028},
207 {0x00009834, 0x6400a290},
208 {0x00009838, 0x0108ecff},
209 {0x0000983c, 0x0d000600},
210 {0x00009880, 0x201fff00},
211 {0x00009884, 0x00001042},
212 {0x000098a4, 0x00200400},
213 {0x000098b0, 0x32440bbe},
214 {0x000098d0, 0x004b6a8e},
215 {0x000098d4, 0x00000820},
216 {0x000098dc, 0x00000000},
217 {0x000098e4, 0x01ffffff},
218 {0x000098e8, 0x01ffffff},
219 {0x000098ec, 0x01ffffff},
220 {0x000098f0, 0x00000000},
221 {0x000098f4, 0x00000000},
222 {0x00009bf0, 0x80000000},
223 {0x00009c04, 0xff55ff55},
224 {0x00009c08, 0x0320ff55},
225 {0x00009c0c, 0x00000000},
226 {0x00009c10, 0x00000000},
227 {0x00009c14, 0x00046384},
228 {0x00009c18, 0x05b6b440},
229 {0x00009c1c, 0x00b6b440},
230 {0x00009d00, 0xc080a333},
231 {0x00009d04, 0x40206c10},
232 {0x00009d08, 0x009c4060},
233 {0x00009d0c, 0x9883800a},
234 {0x00009d10, 0x01834061},
235 {0x00009d14, 0x00c0040b},
236 {0x00009d18, 0x00000000},
237 {0x00009e08, 0x0038230c},
238 {0x00009e24, 0x990bb515},
239 {0x00009e28, 0x0c6f0000},
240 {0x00009e30, 0x06336f77},
241 {0x00009e34, 0x6af6532f},
242 {0x00009e38, 0x0cc80c00},
243 {0x00009e40, 0x15262820},
244 {0x00009e4c, 0x00001004},
245 {0x00009e50, 0x00ff03f1},
246 {0x00009e54, 0xe4c555c2},
247 {0x00009e58, 0xfd857722},
248 {0x00009e5c, 0xe9198724},
249 {0x00009fc0, 0x803e4788},
250 {0x00009fc4, 0x0001efb5},
251 {0x00009fcc, 0x40000014},
252 {0x00009fd0, 0x0a193b93},
253 {0x0000a20c, 0x00000000},
254 {0x0000a220, 0x00000000},
255 {0x0000a224, 0x00000000},
256 {0x0000a228, 0x10002310},
257 {0x0000a23c, 0x00000000},
258 {0x0000a244, 0x0c000000},
259 {0x0000a2a0, 0x00000001},
260 {0x0000a2c0, 0x00000001},
261 {0x0000a2c8, 0x00000000},
262 {0x0000a2cc, 0x18c43433},
263 {0x0000a2d4, 0x00000000},
264 {0x0000a2ec, 0x00000000},
265 {0x0000a2f0, 0x00000000},
266 {0x0000a2f4, 0x00000000},
267 {0x0000a2f8, 0x00000000},
268 {0x0000a344, 0x00000000},
269 {0x0000a34c, 0x00000000},
270 {0x0000a350, 0x0000a000},
271 {0x0000a364, 0x00000000},
272 {0x0000a370, 0x00000000},
273 {0x0000a390, 0x00000001},
274 {0x0000a394, 0x00000444},
275 {0x0000a398, 0x001f0e0f},
276 {0x0000a39c, 0x0075393f},
277 {0x0000a3a0, 0xb79f6427},
278 {0x0000a3c0, 0x20202020},
279 {0x0000a3c4, 0x22222220},
280 {0x0000a3c8, 0x20200020},
281 {0x0000a3cc, 0x20202020},
282 {0x0000a3d0, 0x20202020},
283 {0x0000a3d4, 0x20202020},
284 {0x0000a3d8, 0x20202020},
285 {0x0000a3dc, 0x20202020},
286 {0x0000a3e0, 0x20202020},
287 {0x0000a3e4, 0x20202020},
288 {0x0000a3e8, 0x20202020},
289 {0x0000a3ec, 0x20202020},
290 {0x0000a3f0, 0x00000000},
291 {0x0000a3f4, 0x00000006},
292 {0x0000a3f8, 0x0c9bd380},
293 {0x0000a3fc, 0x000f0f01},
294 {0x0000a400, 0x8fa91f01},
295 {0x0000a404, 0x00000000},
296 {0x0000a408, 0x0e79e5c6},
297 {0x0000a40c, 0x00820820},
298 {0x0000a414, 0x1ce739ce},
299 {0x0000a418, 0x2d001dce},
300 {0x0000a434, 0x00000000},
301 {0x0000a438, 0x00001801},
302 {0x0000a43c, 0x00100000},
303 {0x0000a444, 0x00000000},
304 {0x0000a448, 0x05000080},
305 {0x0000a44c, 0x00000001},
306 {0x0000a450, 0x00010000},
307 {0x0000a454, 0x07000000},
308 {0x0000a644, 0xbfad9d74},
309 {0x0000a648, 0x0048060a},
310 {0x0000a64c, 0x00002037},
311 {0x0000a670, 0x03020100},
312 {0x0000a674, 0x09080504},
313 {0x0000a678, 0x0d0c0b0a},
314 {0x0000a67c, 0x13121110},
315 {0x0000a680, 0x31301514},
316 {0x0000a684, 0x35343332},
317 {0x0000a688, 0x00000036},
318 {0x0000a690, 0x00000838},
319 {0x0000a6b0, 0x0000000a},
320 {0x0000a6b4, 0x00512c01},
321 {0x0000a7c0, 0x00000000},
322 {0x0000a7c4, 0xfffffffc},
323 {0x0000a7c8, 0x00000000},
324 {0x0000a7cc, 0x00000000},
325 {0x0000a7d0, 0x00000000},
326 {0x0000a7d4, 0x00000004},
327 {0x0000a7dc, 0x00000000},
328 {0x0000a7f0, 0x80000000},
329 {0x0000a8d0, 0x004b6a8e},
330 {0x0000a8d4, 0x00000820},
331 {0x0000a8dc, 0x00000000},
332 {0x0000a8f0, 0x00000000},
333 {0x0000a8f4, 0x00000000},
334 {0x0000abf0, 0x80000000},
335 {0x0000b2d0, 0x00000080},
336 {0x0000b2d4, 0x00000000},
337 {0x0000b2ec, 0x00000000},
338 {0x0000b2f0, 0x00000000},
339 {0x0000b2f4, 0x00000000},
340 {0x0000b2f8, 0x00000000},
341 {0x0000b408, 0x0e79e5c0},
342 {0x0000b40c, 0x00820820},
343 {0x0000b420, 0x00000000},
344 {0x0000b6b0, 0x0000000a},
345 {0x0000b6b4, 0x00000001},
346};
347
348static const u32 ar9462_2p1_baseband_postamble[][5] = {
349 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
350 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
351 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
352 {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
353 {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
354 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
355 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
356 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
357 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
358 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
359 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
360 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
361 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
362 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
363 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
364 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
365 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
366 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
367 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
368 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
369 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
370 {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
371 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
372 {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
373 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
374 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
375 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
376 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
377 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
378 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
379 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
380 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
381 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
382 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
383 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
384 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
385 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
386 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
387 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
388 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
389 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
390 {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
391 {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
392 {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
393 {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
394 {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
395 {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
396 {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
397 {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
398 {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
399 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
400 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
401 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
402 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
403 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
404 {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
405};
406
407static const u32 ar9462_2p1_radio_core[][2] = {
408 /* Addr allmodes */
409 {0x00016000, 0x36db6db6},
410 {0x00016004, 0x6db6db40},
411 {0x00016008, 0x73f00000},
412 {0x0001600c, 0x00000000},
413 {0x00016010, 0x6d820001},
414 {0x00016040, 0x7f80fff8},
415 {0x0001604c, 0x2699e04f},
416 {0x00016050, 0x6db6db6c},
417 {0x00016058, 0x6c200000},
418 {0x00016080, 0x000c0000},
419 {0x00016084, 0x9a68048c},
420 {0x00016088, 0x54214514},
421 {0x0001608c, 0x1203040b},
422 {0x00016090, 0x24926490},
423 {0x00016098, 0xd2888888},
424 {0x000160a0, 0x0a108ffe},
425 {0x000160a4, 0x812fc491},
426 {0x000160a8, 0x423c8000},
427 {0x000160b4, 0x92000000},
428 {0x000160b8, 0x0285dddc},
429 {0x000160bc, 0x02908888},
430 {0x000160c0, 0x00adb6d0},
431 {0x000160c4, 0x6db6db60},
432 {0x000160c8, 0x6db6db6c},
433 {0x000160cc, 0x0de6c1b0},
434 {0x00016100, 0x3fffbe04},
435 {0x00016104, 0xfff80000},
436 {0x00016108, 0x00200400},
437 {0x00016110, 0x00000000},
438 {0x00016144, 0x02084080},
439 {0x00016148, 0x000080c0},
440 {0x00016280, 0x050a0001},
441 {0x00016284, 0x3d841418},
442 {0x00016288, 0x00000000},
443 {0x0001628c, 0xe3000000},
444 {0x00016290, 0xa1005080},
445 {0x00016294, 0x00000020},
446 {0x00016298, 0x54a82900},
447 {0x00016340, 0x121e4276},
448 {0x00016344, 0x00300000},
449 {0x00016400, 0x36db6db6},
450 {0x00016404, 0x6db6db40},
451 {0x00016408, 0x73f00000},
452 {0x0001640c, 0x00000000},
453 {0x00016410, 0x6c800001},
454 {0x00016440, 0x7f80fff8},
455 {0x0001644c, 0x4699e04f},
456 {0x00016450, 0x6db6db6c},
457 {0x00016500, 0x3fffbe04},
458 {0x00016504, 0xfff80000},
459 {0x00016508, 0x00200400},
460 {0x00016510, 0x00000000},
461 {0x00016544, 0x02084080},
462 {0x00016548, 0x000080c0},
463};
464
465static const u32 ar9462_2p1_radio_postamble[][5] = {
466 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
467 {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
468 {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
469 {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
470 {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
471};
472
473static const u32 ar9462_2p1_soc_preamble[][2] = {
474 /* Addr allmodes */
475 {0x000040a4, 0x00a0c1c9},
476 {0x00007020, 0x00000000},
477 {0x00007034, 0x00000002},
478 {0x00007038, 0x000004c2},
479};
480
481static const u32 ar9462_2p1_soc_postamble[][5] = {
482 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
483 {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
484};
485
486static const u32 ar9462_2p1_radio_postamble_sys2ant[][5] = {
487 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
488 {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
489 {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
490 {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
491};
492
493static const u32 ar9462_2p1_common_rx_gain[][2] = {
494 /* Addr allmodes */
495 {0x0000a000, 0x00010000},
496 {0x0000a004, 0x00030002},
497 {0x0000a008, 0x00050004},
498 {0x0000a00c, 0x00810080},
499 {0x0000a010, 0x00830082},
500 {0x0000a014, 0x01810180},
501 {0x0000a018, 0x01830182},
502 {0x0000a01c, 0x01850184},
503 {0x0000a020, 0x01890188},
504 {0x0000a024, 0x018b018a},
505 {0x0000a028, 0x018d018c},
506 {0x0000a02c, 0x01910190},
507 {0x0000a030, 0x01930192},
508 {0x0000a034, 0x01950194},
509 {0x0000a038, 0x038a0196},
510 {0x0000a03c, 0x038c038b},
511 {0x0000a040, 0x0390038d},
512 {0x0000a044, 0x03920391},
513 {0x0000a048, 0x03940393},
514 {0x0000a04c, 0x03960395},
515 {0x0000a050, 0x00000000},
516 {0x0000a054, 0x00000000},
517 {0x0000a058, 0x00000000},
518 {0x0000a05c, 0x00000000},
519 {0x0000a060, 0x00000000},
520 {0x0000a064, 0x00000000},
521 {0x0000a068, 0x00000000},
522 {0x0000a06c, 0x00000000},
523 {0x0000a070, 0x00000000},
524 {0x0000a074, 0x00000000},
525 {0x0000a078, 0x00000000},
526 {0x0000a07c, 0x00000000},
527 {0x0000a080, 0x22222229},
528 {0x0000a084, 0x1d1d1d1d},
529 {0x0000a088, 0x1d1d1d1d},
530 {0x0000a08c, 0x1d1d1d1d},
531 {0x0000a090, 0x171d1d1d},
532 {0x0000a094, 0x11111717},
533 {0x0000a098, 0x00030311},
534 {0x0000a09c, 0x00000000},
535 {0x0000a0a0, 0x00000000},
536 {0x0000a0a4, 0x00000000},
537 {0x0000a0a8, 0x00000000},
538 {0x0000a0ac, 0x00000000},
539 {0x0000a0b0, 0x00000000},
540 {0x0000a0b4, 0x00000000},
541 {0x0000a0b8, 0x00000000},
542 {0x0000a0bc, 0x00000000},
543 {0x0000a0c0, 0x001f0000},
544 {0x0000a0c4, 0x01000101},
545 {0x0000a0c8, 0x011e011f},
546 {0x0000a0cc, 0x011c011d},
547 {0x0000a0d0, 0x02030204},
548 {0x0000a0d4, 0x02010202},
549 {0x0000a0d8, 0x021f0200},
550 {0x0000a0dc, 0x0302021e},
551 {0x0000a0e0, 0x03000301},
552 {0x0000a0e4, 0x031e031f},
553 {0x0000a0e8, 0x0402031d},
554 {0x0000a0ec, 0x04000401},
555 {0x0000a0f0, 0x041e041f},
556 {0x0000a0f4, 0x0502041d},
557 {0x0000a0f8, 0x05000501},
558 {0x0000a0fc, 0x051e051f},
559 {0x0000a100, 0x06010602},
560 {0x0000a104, 0x061f0600},
561 {0x0000a108, 0x061d061e},
562 {0x0000a10c, 0x07020703},
563 {0x0000a110, 0x07000701},
564 {0x0000a114, 0x00000000},
565 {0x0000a118, 0x00000000},
566 {0x0000a11c, 0x00000000},
567 {0x0000a120, 0x00000000},
568 {0x0000a124, 0x00000000},
569 {0x0000a128, 0x00000000},
570 {0x0000a12c, 0x00000000},
571 {0x0000a130, 0x00000000},
572 {0x0000a134, 0x00000000},
573 {0x0000a138, 0x00000000},
574 {0x0000a13c, 0x00000000},
575 {0x0000a140, 0x001f0000},
576 {0x0000a144, 0x01000101},
577 {0x0000a148, 0x011e011f},
578 {0x0000a14c, 0x011c011d},
579 {0x0000a150, 0x02030204},
580 {0x0000a154, 0x02010202},
581 {0x0000a158, 0x021f0200},
582 {0x0000a15c, 0x0302021e},
583 {0x0000a160, 0x03000301},
584 {0x0000a164, 0x031e031f},
585 {0x0000a168, 0x0402031d},
586 {0x0000a16c, 0x04000401},
587 {0x0000a170, 0x041e041f},
588 {0x0000a174, 0x0502041d},
589 {0x0000a178, 0x05000501},
590 {0x0000a17c, 0x051e051f},
591 {0x0000a180, 0x06010602},
592 {0x0000a184, 0x061f0600},
593 {0x0000a188, 0x061d061e},
594 {0x0000a18c, 0x07020703},
595 {0x0000a190, 0x07000701},
596 {0x0000a194, 0x00000000},
597 {0x0000a198, 0x00000000},
598 {0x0000a19c, 0x00000000},
599 {0x0000a1a0, 0x00000000},
600 {0x0000a1a4, 0x00000000},
601 {0x0000a1a8, 0x00000000},
602 {0x0000a1ac, 0x00000000},
603 {0x0000a1b0, 0x00000000},
604 {0x0000a1b4, 0x00000000},
605 {0x0000a1b8, 0x00000000},
606 {0x0000a1bc, 0x00000000},
607 {0x0000a1c0, 0x00000000},
608 {0x0000a1c4, 0x00000000},
609 {0x0000a1c8, 0x00000000},
610 {0x0000a1cc, 0x00000000},
611 {0x0000a1d0, 0x00000000},
612 {0x0000a1d4, 0x00000000},
613 {0x0000a1d8, 0x00000000},
614 {0x0000a1dc, 0x00000000},
615 {0x0000a1e0, 0x00000000},
616 {0x0000a1e4, 0x00000000},
617 {0x0000a1e8, 0x00000000},
618 {0x0000a1ec, 0x00000000},
619 {0x0000a1f0, 0x00000396},
620 {0x0000a1f4, 0x00000396},
621 {0x0000a1f8, 0x00000396},
622 {0x0000a1fc, 0x00000196},
623 {0x0000b000, 0x00010000},
624 {0x0000b004, 0x00030002},
625 {0x0000b008, 0x00050004},
626 {0x0000b00c, 0x00810080},
627 {0x0000b010, 0x00830082},
628 {0x0000b014, 0x01810180},
629 {0x0000b018, 0x01830182},
630 {0x0000b01c, 0x01850184},
631 {0x0000b020, 0x02810280},
632 {0x0000b024, 0x02830282},
633 {0x0000b028, 0x02850284},
634 {0x0000b02c, 0x02890288},
635 {0x0000b030, 0x028b028a},
636 {0x0000b034, 0x0388028c},
637 {0x0000b038, 0x038a0389},
638 {0x0000b03c, 0x038c038b},
639 {0x0000b040, 0x0390038d},
640 {0x0000b044, 0x03920391},
641 {0x0000b048, 0x03940393},
642 {0x0000b04c, 0x03960395},
643 {0x0000b050, 0x00000000},
644 {0x0000b054, 0x00000000},
645 {0x0000b058, 0x00000000},
646 {0x0000b05c, 0x00000000},
647 {0x0000b060, 0x00000000},
648 {0x0000b064, 0x00000000},
649 {0x0000b068, 0x00000000},
650 {0x0000b06c, 0x00000000},
651 {0x0000b070, 0x00000000},
652 {0x0000b074, 0x00000000},
653 {0x0000b078, 0x00000000},
654 {0x0000b07c, 0x00000000},
655 {0x0000b080, 0x2a2d2f32},
656 {0x0000b084, 0x21232328},
657 {0x0000b088, 0x19191c1e},
658 {0x0000b08c, 0x12141417},
659 {0x0000b090, 0x07070e0e},
660 {0x0000b094, 0x03030305},
661 {0x0000b098, 0x00000003},
662 {0x0000b09c, 0x00000000},
663 {0x0000b0a0, 0x00000000},
664 {0x0000b0a4, 0x00000000},
665 {0x0000b0a8, 0x00000000},
666 {0x0000b0ac, 0x00000000},
667 {0x0000b0b0, 0x00000000},
668 {0x0000b0b4, 0x00000000},
669 {0x0000b0b8, 0x00000000},
670 {0x0000b0bc, 0x00000000},
671 {0x0000b0c0, 0x003f0020},
672 {0x0000b0c4, 0x00400041},
673 {0x0000b0c8, 0x0140005f},
674 {0x0000b0cc, 0x0160015f},
675 {0x0000b0d0, 0x017e017f},
676 {0x0000b0d4, 0x02410242},
677 {0x0000b0d8, 0x025f0240},
678 {0x0000b0dc, 0x027f0260},
679 {0x0000b0e0, 0x0341027e},
680 {0x0000b0e4, 0x035f0340},
681 {0x0000b0e8, 0x037f0360},
682 {0x0000b0ec, 0x04400441},
683 {0x0000b0f0, 0x0460045f},
684 {0x0000b0f4, 0x0541047f},
685 {0x0000b0f8, 0x055f0540},
686 {0x0000b0fc, 0x057f0560},
687 {0x0000b100, 0x06400641},
688 {0x0000b104, 0x0660065f},
689 {0x0000b108, 0x067e067f},
690 {0x0000b10c, 0x07410742},
691 {0x0000b110, 0x075f0740},
692 {0x0000b114, 0x077f0760},
693 {0x0000b118, 0x07800781},
694 {0x0000b11c, 0x07a0079f},
695 {0x0000b120, 0x07c107bf},
696 {0x0000b124, 0x000007c0},
697 {0x0000b128, 0x00000000},
698 {0x0000b12c, 0x00000000},
699 {0x0000b130, 0x00000000},
700 {0x0000b134, 0x00000000},
701 {0x0000b138, 0x00000000},
702 {0x0000b13c, 0x00000000},
703 {0x0000b140, 0x003f0020},
704 {0x0000b144, 0x00400041},
705 {0x0000b148, 0x0140005f},
706 {0x0000b14c, 0x0160015f},
707 {0x0000b150, 0x017e017f},
708 {0x0000b154, 0x02410242},
709 {0x0000b158, 0x025f0240},
710 {0x0000b15c, 0x027f0260},
711 {0x0000b160, 0x0341027e},
712 {0x0000b164, 0x035f0340},
713 {0x0000b168, 0x037f0360},
714 {0x0000b16c, 0x04400441},
715 {0x0000b170, 0x0460045f},
716 {0x0000b174, 0x0541047f},
717 {0x0000b178, 0x055f0540},
718 {0x0000b17c, 0x057f0560},
719 {0x0000b180, 0x06400641},
720 {0x0000b184, 0x0660065f},
721 {0x0000b188, 0x067e067f},
722 {0x0000b18c, 0x07410742},
723 {0x0000b190, 0x075f0740},
724 {0x0000b194, 0x077f0760},
725 {0x0000b198, 0x07800781},
726 {0x0000b19c, 0x07a0079f},
727 {0x0000b1a0, 0x07c107bf},
728 {0x0000b1a4, 0x000007c0},
729 {0x0000b1a8, 0x00000000},
730 {0x0000b1ac, 0x00000000},
731 {0x0000b1b0, 0x00000000},
732 {0x0000b1b4, 0x00000000},
733 {0x0000b1b8, 0x00000000},
734 {0x0000b1bc, 0x00000000},
735 {0x0000b1c0, 0x00000000},
736 {0x0000b1c4, 0x00000000},
737 {0x0000b1c8, 0x00000000},
738 {0x0000b1cc, 0x00000000},
739 {0x0000b1d0, 0x00000000},
740 {0x0000b1d4, 0x00000000},
741 {0x0000b1d8, 0x00000000},
742 {0x0000b1dc, 0x00000000},
743 {0x0000b1e0, 0x00000000},
744 {0x0000b1e4, 0x00000000},
745 {0x0000b1e8, 0x00000000},
746 {0x0000b1ec, 0x00000000},
747 {0x0000b1f0, 0x00000396},
748 {0x0000b1f4, 0x00000396},
749 {0x0000b1f8, 0x00000396},
750 {0x0000b1fc, 0x00000196},
751};
752
753static const u32 ar9462_2p1_common_mixed_rx_gain[][2] = {
754 /* Addr allmodes */
755 {0x0000a000, 0x00010000},
756 {0x0000a004, 0x00030002},
757 {0x0000a008, 0x00050004},
758 {0x0000a00c, 0x00810080},
759 {0x0000a010, 0x00830082},
760 {0x0000a014, 0x01810180},
761 {0x0000a018, 0x01830182},
762 {0x0000a01c, 0x01850184},
763 {0x0000a020, 0x01890188},
764 {0x0000a024, 0x018b018a},
765 {0x0000a028, 0x018d018c},
766 {0x0000a02c, 0x03820190},
767 {0x0000a030, 0x03840383},
768 {0x0000a034, 0x03880385},
769 {0x0000a038, 0x038a0389},
770 {0x0000a03c, 0x038c038b},
771 {0x0000a040, 0x0390038d},
772 {0x0000a044, 0x03920391},
773 {0x0000a048, 0x03940393},
774 {0x0000a04c, 0x03960395},
775 {0x0000a050, 0x00000000},
776 {0x0000a054, 0x00000000},
777 {0x0000a058, 0x00000000},
778 {0x0000a05c, 0x00000000},
779 {0x0000a060, 0x00000000},
780 {0x0000a064, 0x00000000},
781 {0x0000a068, 0x00000000},
782 {0x0000a06c, 0x00000000},
783 {0x0000a070, 0x00000000},
784 {0x0000a074, 0x00000000},
785 {0x0000a078, 0x00000000},
786 {0x0000a07c, 0x00000000},
787 {0x0000a080, 0x29292929},
788 {0x0000a084, 0x29292929},
789 {0x0000a088, 0x29292929},
790 {0x0000a08c, 0x29292929},
791 {0x0000a090, 0x22292929},
792 {0x0000a094, 0x1d1d2222},
793 {0x0000a098, 0x0c111117},
794 {0x0000a09c, 0x00030303},
795 {0x0000a0a0, 0x00000000},
796 {0x0000a0a4, 0x00000000},
797 {0x0000a0a8, 0x00000000},
798 {0x0000a0ac, 0x00000000},
799 {0x0000a0b0, 0x00000000},
800 {0x0000a0b4, 0x00000000},
801 {0x0000a0b8, 0x00000000},
802 {0x0000a0bc, 0x00000000},
803 {0x0000a0c0, 0x001f0000},
804 {0x0000a0c4, 0x01000101},
805 {0x0000a0c8, 0x011e011f},
806 {0x0000a0cc, 0x011c011d},
807 {0x0000a0d0, 0x02030204},
808 {0x0000a0d4, 0x02010202},
809 {0x0000a0d8, 0x021f0200},
810 {0x0000a0dc, 0x0302021e},
811 {0x0000a0e0, 0x03000301},
812 {0x0000a0e4, 0x031e031f},
813 {0x0000a0e8, 0x0402031d},
814 {0x0000a0ec, 0x04000401},
815 {0x0000a0f0, 0x041e041f},
816 {0x0000a0f4, 0x0502041d},
817 {0x0000a0f8, 0x05000501},
818 {0x0000a0fc, 0x051e051f},
819 {0x0000a100, 0x06010602},
820 {0x0000a104, 0x061f0600},
821 {0x0000a108, 0x061d061e},
822 {0x0000a10c, 0x07020703},
823 {0x0000a110, 0x07000701},
824 {0x0000a114, 0x00000000},
825 {0x0000a118, 0x00000000},
826 {0x0000a11c, 0x00000000},
827 {0x0000a120, 0x00000000},
828 {0x0000a124, 0x00000000},
829 {0x0000a128, 0x00000000},
830 {0x0000a12c, 0x00000000},
831 {0x0000a130, 0x00000000},
832 {0x0000a134, 0x00000000},
833 {0x0000a138, 0x00000000},
834 {0x0000a13c, 0x00000000},
835 {0x0000a140, 0x001f0000},
836 {0x0000a144, 0x01000101},
837 {0x0000a148, 0x011e011f},
838 {0x0000a14c, 0x011c011d},
839 {0x0000a150, 0x02030204},
840 {0x0000a154, 0x02010202},
841 {0x0000a158, 0x021f0200},
842 {0x0000a15c, 0x0302021e},
843 {0x0000a160, 0x03000301},
844 {0x0000a164, 0x031e031f},
845 {0x0000a168, 0x0402031d},
846 {0x0000a16c, 0x04000401},
847 {0x0000a170, 0x041e041f},
848 {0x0000a174, 0x0502041d},
849 {0x0000a178, 0x05000501},
850 {0x0000a17c, 0x051e051f},
851 {0x0000a180, 0x06010602},
852 {0x0000a184, 0x061f0600},
853 {0x0000a188, 0x061d061e},
854 {0x0000a18c, 0x07020703},
855 {0x0000a190, 0x07000701},
856 {0x0000a194, 0x00000000},
857 {0x0000a198, 0x00000000},
858 {0x0000a19c, 0x00000000},
859 {0x0000a1a0, 0x00000000},
860 {0x0000a1a4, 0x00000000},
861 {0x0000a1a8, 0x00000000},
862 {0x0000a1ac, 0x00000000},
863 {0x0000a1b0, 0x00000000},
864 {0x0000a1b4, 0x00000000},
865 {0x0000a1b8, 0x00000000},
866 {0x0000a1bc, 0x00000000},
867 {0x0000a1c0, 0x00000000},
868 {0x0000a1c4, 0x00000000},
869 {0x0000a1c8, 0x00000000},
870 {0x0000a1cc, 0x00000000},
871 {0x0000a1d0, 0x00000000},
872 {0x0000a1d4, 0x00000000},
873 {0x0000a1d8, 0x00000000},
874 {0x0000a1dc, 0x00000000},
875 {0x0000a1e0, 0x00000000},
876 {0x0000a1e4, 0x00000000},
877 {0x0000a1e8, 0x00000000},
878 {0x0000a1ec, 0x00000000},
879 {0x0000a1f0, 0x00000396},
880 {0x0000a1f4, 0x00000396},
881 {0x0000a1f8, 0x00000396},
882 {0x0000a1fc, 0x00000196},
883 {0x0000b000, 0x00010000},
884 {0x0000b004, 0x00030002},
885 {0x0000b008, 0x00050004},
886 {0x0000b00c, 0x00810080},
887 {0x0000b010, 0x00830082},
888 {0x0000b014, 0x01810180},
889 {0x0000b018, 0x01830182},
890 {0x0000b01c, 0x01850184},
891 {0x0000b020, 0x02810280},
892 {0x0000b024, 0x02830282},
893 {0x0000b028, 0x02850284},
894 {0x0000b02c, 0x02890288},
895 {0x0000b030, 0x028b028a},
896 {0x0000b034, 0x0388028c},
897 {0x0000b038, 0x038a0389},
898 {0x0000b03c, 0x038c038b},
899 {0x0000b040, 0x0390038d},
900 {0x0000b044, 0x03920391},
901 {0x0000b048, 0x03940393},
902 {0x0000b04c, 0x03960395},
903 {0x0000b050, 0x00000000},
904 {0x0000b054, 0x00000000},
905 {0x0000b058, 0x00000000},
906 {0x0000b05c, 0x00000000},
907 {0x0000b060, 0x00000000},
908 {0x0000b064, 0x00000000},
909 {0x0000b068, 0x00000000},
910 {0x0000b06c, 0x00000000},
911 {0x0000b070, 0x00000000},
912 {0x0000b074, 0x00000000},
913 {0x0000b078, 0x00000000},
914 {0x0000b07c, 0x00000000},
915 {0x0000b080, 0x2a2d2f32},
916 {0x0000b084, 0x21232328},
917 {0x0000b088, 0x19191c1e},
918 {0x0000b08c, 0x12141417},
919 {0x0000b090, 0x07070e0e},
920 {0x0000b094, 0x03030305},
921 {0x0000b098, 0x00000003},
922 {0x0000b09c, 0x00000000},
923 {0x0000b0a0, 0x00000000},
924 {0x0000b0a4, 0x00000000},
925 {0x0000b0a8, 0x00000000},
926 {0x0000b0ac, 0x00000000},
927 {0x0000b0b0, 0x00000000},
928 {0x0000b0b4, 0x00000000},
929 {0x0000b0b8, 0x00000000},
930 {0x0000b0bc, 0x00000000},
931 {0x0000b0c0, 0x003f0020},
932 {0x0000b0c4, 0x00400041},
933 {0x0000b0c8, 0x0140005f},
934 {0x0000b0cc, 0x0160015f},
935 {0x0000b0d0, 0x017e017f},
936 {0x0000b0d4, 0x02410242},
937 {0x0000b0d8, 0x025f0240},
938 {0x0000b0dc, 0x027f0260},
939 {0x0000b0e0, 0x0341027e},
940 {0x0000b0e4, 0x035f0340},
941 {0x0000b0e8, 0x037f0360},
942 {0x0000b0ec, 0x04400441},
943 {0x0000b0f0, 0x0460045f},
944 {0x0000b0f4, 0x0541047f},
945 {0x0000b0f8, 0x055f0540},
946 {0x0000b0fc, 0x057f0560},
947 {0x0000b100, 0x06400641},
948 {0x0000b104, 0x0660065f},
949 {0x0000b108, 0x067e067f},
950 {0x0000b10c, 0x07410742},
951 {0x0000b110, 0x075f0740},
952 {0x0000b114, 0x077f0760},
953 {0x0000b118, 0x07800781},
954 {0x0000b11c, 0x07a0079f},
955 {0x0000b120, 0x07c107bf},
956 {0x0000b124, 0x000007c0},
957 {0x0000b128, 0x00000000},
958 {0x0000b12c, 0x00000000},
959 {0x0000b130, 0x00000000},
960 {0x0000b134, 0x00000000},
961 {0x0000b138, 0x00000000},
962 {0x0000b13c, 0x00000000},
963 {0x0000b140, 0x003f0020},
964 {0x0000b144, 0x00400041},
965 {0x0000b148, 0x0140005f},
966 {0x0000b14c, 0x0160015f},
967 {0x0000b150, 0x017e017f},
968 {0x0000b154, 0x02410242},
969 {0x0000b158, 0x025f0240},
970 {0x0000b15c, 0x027f0260},
971 {0x0000b160, 0x0341027e},
972 {0x0000b164, 0x035f0340},
973 {0x0000b168, 0x037f0360},
974 {0x0000b16c, 0x04400441},
975 {0x0000b170, 0x0460045f},
976 {0x0000b174, 0x0541047f},
977 {0x0000b178, 0x055f0540},
978 {0x0000b17c, 0x057f0560},
979 {0x0000b180, 0x06400641},
980 {0x0000b184, 0x0660065f},
981 {0x0000b188, 0x067e067f},
982 {0x0000b18c, 0x07410742},
983 {0x0000b190, 0x075f0740},
984 {0x0000b194, 0x077f0760},
985 {0x0000b198, 0x07800781},
986 {0x0000b19c, 0x07a0079f},
987 {0x0000b1a0, 0x07c107bf},
988 {0x0000b1a4, 0x000007c0},
989 {0x0000b1a8, 0x00000000},
990 {0x0000b1ac, 0x00000000},
991 {0x0000b1b0, 0x00000000},
992 {0x0000b1b4, 0x00000000},
993 {0x0000b1b8, 0x00000000},
994 {0x0000b1bc, 0x00000000},
995 {0x0000b1c0, 0x00000000},
996 {0x0000b1c4, 0x00000000},
997 {0x0000b1c8, 0x00000000},
998 {0x0000b1cc, 0x00000000},
999 {0x0000b1d0, 0x00000000},
1000 {0x0000b1d4, 0x00000000},
1001 {0x0000b1d8, 0x00000000},
1002 {0x0000b1dc, 0x00000000},
1003 {0x0000b1e0, 0x00000000},
1004 {0x0000b1e4, 0x00000000},
1005 {0x0000b1e8, 0x00000000},
1006 {0x0000b1ec, 0x00000000},
1007 {0x0000b1f0, 0x00000396},
1008 {0x0000b1f4, 0x00000396},
1009 {0x0000b1f8, 0x00000396},
1010 {0x0000b1fc, 0x00000196},
1011};
1012
1013static const u32 ar9462_2p1_baseband_core_mix_rxgain[][2] = {
1014 /* Addr allmodes */
1015 {0x00009fd0, 0x0a2d6b93},
1016};
1017
1018static const u32 ar9462_2p1_baseband_postamble_mix_rxgain[][5] = {
1019 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1020 {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
1021 {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
1022 {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
1023 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
1024 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
1025 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
1026};
1027
1028static const u32 ar9462_2p1_baseband_postamble_5g_xlna[][5] = {
1029 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1030 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
1031};
1032
1033static const u32 ar9462_2p1_common_wo_xlna_rx_gain[][2] = {
1034 /* Addr allmodes */
1035 {0x0000a000, 0x00010000},
1036 {0x0000a004, 0x00030002},
1037 {0x0000a008, 0x00050004},
1038 {0x0000a00c, 0x00810080},
1039 {0x0000a010, 0x00830082},
1040 {0x0000a014, 0x01810180},
1041 {0x0000a018, 0x01830182},
1042 {0x0000a01c, 0x01850184},
1043 {0x0000a020, 0x01890188},
1044 {0x0000a024, 0x018b018a},
1045 {0x0000a028, 0x018d018c},
1046 {0x0000a02c, 0x03820190},
1047 {0x0000a030, 0x03840383},
1048 {0x0000a034, 0x03880385},
1049 {0x0000a038, 0x038a0389},
1050 {0x0000a03c, 0x038c038b},
1051 {0x0000a040, 0x0390038d},
1052 {0x0000a044, 0x03920391},
1053 {0x0000a048, 0x03940393},
1054 {0x0000a04c, 0x03960395},
1055 {0x0000a050, 0x00000000},
1056 {0x0000a054, 0x00000000},
1057 {0x0000a058, 0x00000000},
1058 {0x0000a05c, 0x00000000},
1059 {0x0000a060, 0x00000000},
1060 {0x0000a064, 0x00000000},
1061 {0x0000a068, 0x00000000},
1062 {0x0000a06c, 0x00000000},
1063 {0x0000a070, 0x00000000},
1064 {0x0000a074, 0x00000000},
1065 {0x0000a078, 0x00000000},
1066 {0x0000a07c, 0x00000000},
1067 {0x0000a080, 0x29292929},
1068 {0x0000a084, 0x29292929},
1069 {0x0000a088, 0x29292929},
1070 {0x0000a08c, 0x29292929},
1071 {0x0000a090, 0x22292929},
1072 {0x0000a094, 0x1d1d2222},
1073 {0x0000a098, 0x0c111117},
1074 {0x0000a09c, 0x00030303},
1075 {0x0000a0a0, 0x00000000},
1076 {0x0000a0a4, 0x00000000},
1077 {0x0000a0a8, 0x00000000},
1078 {0x0000a0ac, 0x00000000},
1079 {0x0000a0b0, 0x00000000},
1080 {0x0000a0b4, 0x00000000},
1081 {0x0000a0b8, 0x00000000},
1082 {0x0000a0bc, 0x00000000},
1083 {0x0000a0c0, 0x001f0000},
1084 {0x0000a0c4, 0x01000101},
1085 {0x0000a0c8, 0x011e011f},
1086 {0x0000a0cc, 0x011c011d},
1087 {0x0000a0d0, 0x02030204},
1088 {0x0000a0d4, 0x02010202},
1089 {0x0000a0d8, 0x021f0200},
1090 {0x0000a0dc, 0x0302021e},
1091 {0x0000a0e0, 0x03000301},
1092 {0x0000a0e4, 0x031e031f},
1093 {0x0000a0e8, 0x0402031d},
1094 {0x0000a0ec, 0x04000401},
1095 {0x0000a0f0, 0x041e041f},
1096 {0x0000a0f4, 0x0502041d},
1097 {0x0000a0f8, 0x05000501},
1098 {0x0000a0fc, 0x051e051f},
1099 {0x0000a100, 0x06010602},
1100 {0x0000a104, 0x061f0600},
1101 {0x0000a108, 0x061d061e},
1102 {0x0000a10c, 0x07020703},
1103 {0x0000a110, 0x07000701},
1104 {0x0000a114, 0x00000000},
1105 {0x0000a118, 0x00000000},
1106 {0x0000a11c, 0x00000000},
1107 {0x0000a120, 0x00000000},
1108 {0x0000a124, 0x00000000},
1109 {0x0000a128, 0x00000000},
1110 {0x0000a12c, 0x00000000},
1111 {0x0000a130, 0x00000000},
1112 {0x0000a134, 0x00000000},
1113 {0x0000a138, 0x00000000},
1114 {0x0000a13c, 0x00000000},
1115 {0x0000a140, 0x001f0000},
1116 {0x0000a144, 0x01000101},
1117 {0x0000a148, 0x011e011f},
1118 {0x0000a14c, 0x011c011d},
1119 {0x0000a150, 0x02030204},
1120 {0x0000a154, 0x02010202},
1121 {0x0000a158, 0x021f0200},
1122 {0x0000a15c, 0x0302021e},
1123 {0x0000a160, 0x03000301},
1124 {0x0000a164, 0x031e031f},
1125 {0x0000a168, 0x0402031d},
1126 {0x0000a16c, 0x04000401},
1127 {0x0000a170, 0x041e041f},
1128 {0x0000a174, 0x0502041d},
1129 {0x0000a178, 0x05000501},
1130 {0x0000a17c, 0x051e051f},
1131 {0x0000a180, 0x06010602},
1132 {0x0000a184, 0x061f0600},
1133 {0x0000a188, 0x061d061e},
1134 {0x0000a18c, 0x07020703},
1135 {0x0000a190, 0x07000701},
1136 {0x0000a194, 0x00000000},
1137 {0x0000a198, 0x00000000},
1138 {0x0000a19c, 0x00000000},
1139 {0x0000a1a0, 0x00000000},
1140 {0x0000a1a4, 0x00000000},
1141 {0x0000a1a8, 0x00000000},
1142 {0x0000a1ac, 0x00000000},
1143 {0x0000a1b0, 0x00000000},
1144 {0x0000a1b4, 0x00000000},
1145 {0x0000a1b8, 0x00000000},
1146 {0x0000a1bc, 0x00000000},
1147 {0x0000a1c0, 0x00000000},
1148 {0x0000a1c4, 0x00000000},
1149 {0x0000a1c8, 0x00000000},
1150 {0x0000a1cc, 0x00000000},
1151 {0x0000a1d0, 0x00000000},
1152 {0x0000a1d4, 0x00000000},
1153 {0x0000a1d8, 0x00000000},
1154 {0x0000a1dc, 0x00000000},
1155 {0x0000a1e0, 0x00000000},
1156 {0x0000a1e4, 0x00000000},
1157 {0x0000a1e8, 0x00000000},
1158 {0x0000a1ec, 0x00000000},
1159 {0x0000a1f0, 0x00000396},
1160 {0x0000a1f4, 0x00000396},
1161 {0x0000a1f8, 0x00000396},
1162 {0x0000a1fc, 0x00000196},
1163 {0x0000b000, 0x00010000},
1164 {0x0000b004, 0x00030002},
1165 {0x0000b008, 0x00050004},
1166 {0x0000b00c, 0x00810080},
1167 {0x0000b010, 0x00830082},
1168 {0x0000b014, 0x01810180},
1169 {0x0000b018, 0x01830182},
1170 {0x0000b01c, 0x01850184},
1171 {0x0000b020, 0x02810280},
1172 {0x0000b024, 0x02830282},
1173 {0x0000b028, 0x02850284},
1174 {0x0000b02c, 0x02890288},
1175 {0x0000b030, 0x028b028a},
1176 {0x0000b034, 0x0388028c},
1177 {0x0000b038, 0x038a0389},
1178 {0x0000b03c, 0x038c038b},
1179 {0x0000b040, 0x0390038d},
1180 {0x0000b044, 0x03920391},
1181 {0x0000b048, 0x03940393},
1182 {0x0000b04c, 0x03960395},
1183 {0x0000b050, 0x00000000},
1184 {0x0000b054, 0x00000000},
1185 {0x0000b058, 0x00000000},
1186 {0x0000b05c, 0x00000000},
1187 {0x0000b060, 0x00000000},
1188 {0x0000b064, 0x00000000},
1189 {0x0000b068, 0x00000000},
1190 {0x0000b06c, 0x00000000},
1191 {0x0000b070, 0x00000000},
1192 {0x0000b074, 0x00000000},
1193 {0x0000b078, 0x00000000},
1194 {0x0000b07c, 0x00000000},
1195 {0x0000b080, 0x32323232},
1196 {0x0000b084, 0x2f2f3232},
1197 {0x0000b088, 0x23282a2d},
1198 {0x0000b08c, 0x1c1e2123},
1199 {0x0000b090, 0x14171919},
1200 {0x0000b094, 0x0e0e1214},
1201 {0x0000b098, 0x03050707},
1202 {0x0000b09c, 0x00030303},
1203 {0x0000b0a0, 0x00000000},
1204 {0x0000b0a4, 0x00000000},
1205 {0x0000b0a8, 0x00000000},
1206 {0x0000b0ac, 0x00000000},
1207 {0x0000b0b0, 0x00000000},
1208 {0x0000b0b4, 0x00000000},
1209 {0x0000b0b8, 0x00000000},
1210 {0x0000b0bc, 0x00000000},
1211 {0x0000b0c0, 0x003f0020},
1212 {0x0000b0c4, 0x00400041},
1213 {0x0000b0c8, 0x0140005f},
1214 {0x0000b0cc, 0x0160015f},
1215 {0x0000b0d0, 0x017e017f},
1216 {0x0000b0d4, 0x02410242},
1217 {0x0000b0d8, 0x025f0240},
1218 {0x0000b0dc, 0x027f0260},
1219 {0x0000b0e0, 0x0341027e},
1220 {0x0000b0e4, 0x035f0340},
1221 {0x0000b0e8, 0x037f0360},
1222 {0x0000b0ec, 0x04400441},
1223 {0x0000b0f0, 0x0460045f},
1224 {0x0000b0f4, 0x0541047f},
1225 {0x0000b0f8, 0x055f0540},
1226 {0x0000b0fc, 0x057f0560},
1227 {0x0000b100, 0x06400641},
1228 {0x0000b104, 0x0660065f},
1229 {0x0000b108, 0x067e067f},
1230 {0x0000b10c, 0x07410742},
1231 {0x0000b110, 0x075f0740},
1232 {0x0000b114, 0x077f0760},
1233 {0x0000b118, 0x07800781},
1234 {0x0000b11c, 0x07a0079f},
1235 {0x0000b120, 0x07c107bf},
1236 {0x0000b124, 0x000007c0},
1237 {0x0000b128, 0x00000000},
1238 {0x0000b12c, 0x00000000},
1239 {0x0000b130, 0x00000000},
1240 {0x0000b134, 0x00000000},
1241 {0x0000b138, 0x00000000},
1242 {0x0000b13c, 0x00000000},
1243 {0x0000b140, 0x003f0020},
1244 {0x0000b144, 0x00400041},
1245 {0x0000b148, 0x0140005f},
1246 {0x0000b14c, 0x0160015f},
1247 {0x0000b150, 0x017e017f},
1248 {0x0000b154, 0x02410242},
1249 {0x0000b158, 0x025f0240},
1250 {0x0000b15c, 0x027f0260},
1251 {0x0000b160, 0x0341027e},
1252 {0x0000b164, 0x035f0340},
1253 {0x0000b168, 0x037f0360},
1254 {0x0000b16c, 0x04400441},
1255 {0x0000b170, 0x0460045f},
1256 {0x0000b174, 0x0541047f},
1257 {0x0000b178, 0x055f0540},
1258 {0x0000b17c, 0x057f0560},
1259 {0x0000b180, 0x06400641},
1260 {0x0000b184, 0x0660065f},
1261 {0x0000b188, 0x067e067f},
1262 {0x0000b18c, 0x07410742},
1263 {0x0000b190, 0x075f0740},
1264 {0x0000b194, 0x077f0760},
1265 {0x0000b198, 0x07800781},
1266 {0x0000b19c, 0x07a0079f},
1267 {0x0000b1a0, 0x07c107bf},
1268 {0x0000b1a4, 0x000007c0},
1269 {0x0000b1a8, 0x00000000},
1270 {0x0000b1ac, 0x00000000},
1271 {0x0000b1b0, 0x00000000},
1272 {0x0000b1b4, 0x00000000},
1273 {0x0000b1b8, 0x00000000},
1274 {0x0000b1bc, 0x00000000},
1275 {0x0000b1c0, 0x00000000},
1276 {0x0000b1c4, 0x00000000},
1277 {0x0000b1c8, 0x00000000},
1278 {0x0000b1cc, 0x00000000},
1279 {0x0000b1d0, 0x00000000},
1280 {0x0000b1d4, 0x00000000},
1281 {0x0000b1d8, 0x00000000},
1282 {0x0000b1dc, 0x00000000},
1283 {0x0000b1e0, 0x00000000},
1284 {0x0000b1e4, 0x00000000},
1285 {0x0000b1e8, 0x00000000},
1286 {0x0000b1ec, 0x00000000},
1287 {0x0000b1f0, 0x00000396},
1288 {0x0000b1f4, 0x00000396},
1289 {0x0000b1f8, 0x00000396},
1290 {0x0000b1fc, 0x00000196},
1291};
1292
1293static const u32 ar9462_2p1_common_5g_xlna_only_rx_gain[][2] = {
1294 /* Addr allmodes */
1295 {0x0000a000, 0x00010000},
1296 {0x0000a004, 0x00030002},
1297 {0x0000a008, 0x00050004},
1298 {0x0000a00c, 0x00810080},
1299 {0x0000a010, 0x00830082},
1300 {0x0000a014, 0x01810180},
1301 {0x0000a018, 0x01830182},
1302 {0x0000a01c, 0x01850184},
1303 {0x0000a020, 0x01890188},
1304 {0x0000a024, 0x018b018a},
1305 {0x0000a028, 0x018d018c},
1306 {0x0000a02c, 0x03820190},
1307 {0x0000a030, 0x03840383},
1308 {0x0000a034, 0x03880385},
1309 {0x0000a038, 0x038a0389},
1310 {0x0000a03c, 0x038c038b},
1311 {0x0000a040, 0x0390038d},
1312 {0x0000a044, 0x03920391},
1313 {0x0000a048, 0x03940393},
1314 {0x0000a04c, 0x03960395},
1315 {0x0000a050, 0x00000000},
1316 {0x0000a054, 0x00000000},
1317 {0x0000a058, 0x00000000},
1318 {0x0000a05c, 0x00000000},
1319 {0x0000a060, 0x00000000},
1320 {0x0000a064, 0x00000000},
1321 {0x0000a068, 0x00000000},
1322 {0x0000a06c, 0x00000000},
1323 {0x0000a070, 0x00000000},
1324 {0x0000a074, 0x00000000},
1325 {0x0000a078, 0x00000000},
1326 {0x0000a07c, 0x00000000},
1327 {0x0000a080, 0x29292929},
1328 {0x0000a084, 0x29292929},
1329 {0x0000a088, 0x29292929},
1330 {0x0000a08c, 0x29292929},
1331 {0x0000a090, 0x22292929},
1332 {0x0000a094, 0x1d1d2222},
1333 {0x0000a098, 0x0c111117},
1334 {0x0000a09c, 0x00030303},
1335 {0x0000a0a0, 0x00000000},
1336 {0x0000a0a4, 0x00000000},
1337 {0x0000a0a8, 0x00000000},
1338 {0x0000a0ac, 0x00000000},
1339 {0x0000a0b0, 0x00000000},
1340 {0x0000a0b4, 0x00000000},
1341 {0x0000a0b8, 0x00000000},
1342 {0x0000a0bc, 0x00000000},
1343 {0x0000a0c0, 0x001f0000},
1344 {0x0000a0c4, 0x01000101},
1345 {0x0000a0c8, 0x011e011f},
1346 {0x0000a0cc, 0x011c011d},
1347 {0x0000a0d0, 0x02030204},
1348 {0x0000a0d4, 0x02010202},
1349 {0x0000a0d8, 0x021f0200},
1350 {0x0000a0dc, 0x0302021e},
1351 {0x0000a0e0, 0x03000301},
1352 {0x0000a0e4, 0x031e031f},
1353 {0x0000a0e8, 0x0402031d},
1354 {0x0000a0ec, 0x04000401},
1355 {0x0000a0f0, 0x041e041f},
1356 {0x0000a0f4, 0x0502041d},
1357 {0x0000a0f8, 0x05000501},
1358 {0x0000a0fc, 0x051e051f},
1359 {0x0000a100, 0x06010602},
1360 {0x0000a104, 0x061f0600},
1361 {0x0000a108, 0x061d061e},
1362 {0x0000a10c, 0x07020703},
1363 {0x0000a110, 0x07000701},
1364 {0x0000a114, 0x00000000},
1365 {0x0000a118, 0x00000000},
1366 {0x0000a11c, 0x00000000},
1367 {0x0000a120, 0x00000000},
1368 {0x0000a124, 0x00000000},
1369 {0x0000a128, 0x00000000},
1370 {0x0000a12c, 0x00000000},
1371 {0x0000a130, 0x00000000},
1372 {0x0000a134, 0x00000000},
1373 {0x0000a138, 0x00000000},
1374 {0x0000a13c, 0x00000000},
1375 {0x0000a140, 0x001f0000},
1376 {0x0000a144, 0x01000101},
1377 {0x0000a148, 0x011e011f},
1378 {0x0000a14c, 0x011c011d},
1379 {0x0000a150, 0x02030204},
1380 {0x0000a154, 0x02010202},
1381 {0x0000a158, 0x021f0200},
1382 {0x0000a15c, 0x0302021e},
1383 {0x0000a160, 0x03000301},
1384 {0x0000a164, 0x031e031f},
1385 {0x0000a168, 0x0402031d},
1386 {0x0000a16c, 0x04000401},
1387 {0x0000a170, 0x041e041f},
1388 {0x0000a174, 0x0502041d},
1389 {0x0000a178, 0x05000501},
1390 {0x0000a17c, 0x051e051f},
1391 {0x0000a180, 0x06010602},
1392 {0x0000a184, 0x061f0600},
1393 {0x0000a188, 0x061d061e},
1394 {0x0000a18c, 0x07020703},
1395 {0x0000a190, 0x07000701},
1396 {0x0000a194, 0x00000000},
1397 {0x0000a198, 0x00000000},
1398 {0x0000a19c, 0x00000000},
1399 {0x0000a1a0, 0x00000000},
1400 {0x0000a1a4, 0x00000000},
1401 {0x0000a1a8, 0x00000000},
1402 {0x0000a1ac, 0x00000000},
1403 {0x0000a1b0, 0x00000000},
1404 {0x0000a1b4, 0x00000000},
1405 {0x0000a1b8, 0x00000000},
1406 {0x0000a1bc, 0x00000000},
1407 {0x0000a1c0, 0x00000000},
1408 {0x0000a1c4, 0x00000000},
1409 {0x0000a1c8, 0x00000000},
1410 {0x0000a1cc, 0x00000000},
1411 {0x0000a1d0, 0x00000000},
1412 {0x0000a1d4, 0x00000000},
1413 {0x0000a1d8, 0x00000000},
1414 {0x0000a1dc, 0x00000000},
1415 {0x0000a1e0, 0x00000000},
1416 {0x0000a1e4, 0x00000000},
1417 {0x0000a1e8, 0x00000000},
1418 {0x0000a1ec, 0x00000000},
1419 {0x0000a1f0, 0x00000396},
1420 {0x0000a1f4, 0x00000396},
1421 {0x0000a1f8, 0x00000396},
1422 {0x0000a1fc, 0x00000196},
1423 {0x0000b000, 0x00010000},
1424 {0x0000b004, 0x00030002},
1425 {0x0000b008, 0x00050004},
1426 {0x0000b00c, 0x00810080},
1427 {0x0000b010, 0x00830082},
1428 {0x0000b014, 0x01810180},
1429 {0x0000b018, 0x01830182},
1430 {0x0000b01c, 0x01850184},
1431 {0x0000b020, 0x02810280},
1432 {0x0000b024, 0x02830282},
1433 {0x0000b028, 0x02850284},
1434 {0x0000b02c, 0x02890288},
1435 {0x0000b030, 0x028b028a},
1436 {0x0000b034, 0x0388028c},
1437 {0x0000b038, 0x038a0389},
1438 {0x0000b03c, 0x038c038b},
1439 {0x0000b040, 0x0390038d},
1440 {0x0000b044, 0x03920391},
1441 {0x0000b048, 0x03940393},
1442 {0x0000b04c, 0x03960395},
1443 {0x0000b050, 0x00000000},
1444 {0x0000b054, 0x00000000},
1445 {0x0000b058, 0x00000000},
1446 {0x0000b05c, 0x00000000},
1447 {0x0000b060, 0x00000000},
1448 {0x0000b064, 0x00000000},
1449 {0x0000b068, 0x00000000},
1450 {0x0000b06c, 0x00000000},
1451 {0x0000b070, 0x00000000},
1452 {0x0000b074, 0x00000000},
1453 {0x0000b078, 0x00000000},
1454 {0x0000b07c, 0x00000000},
1455 {0x0000b080, 0x2a2d2f32},
1456 {0x0000b084, 0x21232328},
1457 {0x0000b088, 0x19191c1e},
1458 {0x0000b08c, 0x12141417},
1459 {0x0000b090, 0x07070e0e},
1460 {0x0000b094, 0x03030305},
1461 {0x0000b098, 0x00000003},
1462 {0x0000b09c, 0x00000000},
1463 {0x0000b0a0, 0x00000000},
1464 {0x0000b0a4, 0x00000000},
1465 {0x0000b0a8, 0x00000000},
1466 {0x0000b0ac, 0x00000000},
1467 {0x0000b0b0, 0x00000000},
1468 {0x0000b0b4, 0x00000000},
1469 {0x0000b0b8, 0x00000000},
1470 {0x0000b0bc, 0x00000000},
1471 {0x0000b0c0, 0x003f0020},
1472 {0x0000b0c4, 0x00400041},
1473 {0x0000b0c8, 0x0140005f},
1474 {0x0000b0cc, 0x0160015f},
1475 {0x0000b0d0, 0x017e017f},
1476 {0x0000b0d4, 0x02410242},
1477 {0x0000b0d8, 0x025f0240},
1478 {0x0000b0dc, 0x027f0260},
1479 {0x0000b0e0, 0x0341027e},
1480 {0x0000b0e4, 0x035f0340},
1481 {0x0000b0e8, 0x037f0360},
1482 {0x0000b0ec, 0x04400441},
1483 {0x0000b0f0, 0x0460045f},
1484 {0x0000b0f4, 0x0541047f},
1485 {0x0000b0f8, 0x055f0540},
1486 {0x0000b0fc, 0x057f0560},
1487 {0x0000b100, 0x06400641},
1488 {0x0000b104, 0x0660065f},
1489 {0x0000b108, 0x067e067f},
1490 {0x0000b10c, 0x07410742},
1491 {0x0000b110, 0x075f0740},
1492 {0x0000b114, 0x077f0760},
1493 {0x0000b118, 0x07800781},
1494 {0x0000b11c, 0x07a0079f},
1495 {0x0000b120, 0x07c107bf},
1496 {0x0000b124, 0x000007c0},
1497 {0x0000b128, 0x00000000},
1498 {0x0000b12c, 0x00000000},
1499 {0x0000b130, 0x00000000},
1500 {0x0000b134, 0x00000000},
1501 {0x0000b138, 0x00000000},
1502 {0x0000b13c, 0x00000000},
1503 {0x0000b140, 0x003f0020},
1504 {0x0000b144, 0x00400041},
1505 {0x0000b148, 0x0140005f},
1506 {0x0000b14c, 0x0160015f},
1507 {0x0000b150, 0x017e017f},
1508 {0x0000b154, 0x02410242},
1509 {0x0000b158, 0x025f0240},
1510 {0x0000b15c, 0x027f0260},
1511 {0x0000b160, 0x0341027e},
1512 {0x0000b164, 0x035f0340},
1513 {0x0000b168, 0x037f0360},
1514 {0x0000b16c, 0x04400441},
1515 {0x0000b170, 0x0460045f},
1516 {0x0000b174, 0x0541047f},
1517 {0x0000b178, 0x055f0540},
1518 {0x0000b17c, 0x057f0560},
1519 {0x0000b180, 0x06400641},
1520 {0x0000b184, 0x0660065f},
1521 {0x0000b188, 0x067e067f},
1522 {0x0000b18c, 0x07410742},
1523 {0x0000b190, 0x075f0740},
1524 {0x0000b194, 0x077f0760},
1525 {0x0000b198, 0x07800781},
1526 {0x0000b19c, 0x07a0079f},
1527 {0x0000b1a0, 0x07c107bf},
1528 {0x0000b1a4, 0x000007c0},
1529 {0x0000b1a8, 0x00000000},
1530 {0x0000b1ac, 0x00000000},
1531 {0x0000b1b0, 0x00000000},
1532 {0x0000b1b4, 0x00000000},
1533 {0x0000b1b8, 0x00000000},
1534 {0x0000b1bc, 0x00000000},
1535 {0x0000b1c0, 0x00000000},
1536 {0x0000b1c4, 0x00000000},
1537 {0x0000b1c8, 0x00000000},
1538 {0x0000b1cc, 0x00000000},
1539 {0x0000b1d0, 0x00000000},
1540 {0x0000b1d4, 0x00000000},
1541 {0x0000b1d8, 0x00000000},
1542 {0x0000b1dc, 0x00000000},
1543 {0x0000b1e0, 0x00000000},
1544 {0x0000b1e4, 0x00000000},
1545 {0x0000b1e8, 0x00000000},
1546 {0x0000b1ec, 0x00000000},
1547 {0x0000b1f0, 0x00000396},
1548 {0x0000b1f4, 0x00000396},
1549 {0x0000b1f8, 0x00000396},
1550 {0x0000b1fc, 0x00000196},
1551};
1552
1553static const u32 ar9462_2p1_modes_low_ob_db_tx_gain[][5] = {
1554 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1555 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1556 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
1557 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
1558 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
1559 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1560 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1561 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1562 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1563 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
1564 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
1565 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1566 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1567 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
1568 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
1569 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
1570 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
1571 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
1572 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
1573 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
1574 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
1575 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
1576 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
1577 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
1578 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
1579 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
1580 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
1581 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
1582 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
1583 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
1584 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
1585 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
1586 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
1587 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1588 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1589 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1590 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1591 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1592 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1593 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1594 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1595 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1596 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1597 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1598 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1599 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
1600 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
1601 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
1602 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
1603 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
1604 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
1605 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
1606 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1607 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1608 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1609 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1610 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
1611 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
1612 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
1613 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1614 {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
1615 {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
1616 {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
1617 {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
1618 {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
1619 {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
1620};
1621
1622static const u32 ar9462_2p1_modes_high_ob_db_tx_gain[][5] = {
1623 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1624 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1625 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1626 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1627 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1628 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1629 {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
1630 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1631 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1632 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1633 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
1634 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
1635 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
1636 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
1637 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
1638 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
1639 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
1640 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
1641 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
1642 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
1643 {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
1644 {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
1645 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
1646 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
1647 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
1648 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
1649 {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
1650 {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
1651 {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
1652 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
1653 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
1654 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
1655 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
1656 {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
1657 {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
1658 {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
1659 {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
1660 {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
1661 {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
1662 {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
1663 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1664 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1665 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1666 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1667 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
1668 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
1669 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
1670 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
1671 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
1672 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
1673 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
1674 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1675 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1676 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1677 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1678 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1679 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1680 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1681 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1682 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1683 {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
1684 {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
1685 {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
1686 {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
1687 {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
1688 {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
1689};
1690
1691static const u32 ar9462_2p1_modes_mix_ob_db_tx_gain[][5] = {
1692 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1693 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1694 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1695 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1696 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1697 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1698 {0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
1699 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1700 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1701 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1702 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
1703 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
1704 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
1705 {0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
1706 {0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
1707 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
1708 {0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
1709 {0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
1710 {0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
1711 {0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
1712 {0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
1713 {0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
1714 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
1715 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
1716 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
1717 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
1718 {0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
1719 {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
1720 {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
1721 {0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
1722 {0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
1723 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
1724 {0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
1725 {0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
1726 {0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
1727 {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
1728 {0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
1729 {0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
1730 {0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
1731 {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
1732 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1733 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1734 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1735 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1736 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
1737 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
1738 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
1739 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
1740 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
1741 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
1742 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
1743 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1744 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1745 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1746 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1747 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1748 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1749 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1750 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1751 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1752};
1753
1754static const u32 ar9462_2p1_modes_fast_clock[][3] = {
1755 /* Addr 5G_HT20 5G_HT40 */
1756 {0x00001030, 0x00000268, 0x000004d0},
1757 {0x00001070, 0x0000018c, 0x00000318},
1758 {0x000010b0, 0x00000fd0, 0x00001fa0},
1759 {0x00008014, 0x044c044c, 0x08980898},
1760 {0x0000801c, 0x148ec02b, 0x148ec057},
1761 {0x00008318, 0x000044c0, 0x00008980},
1762 {0x00009e00, 0x0372131c, 0x0372131c},
1763 {0x0000a230, 0x0000400b, 0x00004016},
1764 {0x0000a254, 0x00000898, 0x00001130},
1765};
1766
1767static const u32 ar9462_2p1_baseband_core_txfir_coeff_japan_2484[][2] = {
1768 /* Addr allmodes */
1769 {0x0000a398, 0x00000000},
1770 {0x0000a39c, 0x6f7f0301},
1771 {0x0000a3a0, 0xca9228ee},
1772};
1773
1774#endif /* INITVALS_9462_2P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 42b03dc39d14..c1224b5a257b 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -296,6 +296,7 @@ struct ath_tx {
296 struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; 296 struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
297 struct ath_descdma txdma; 297 struct ath_descdma txdma;
298 struct ath_txq *txq_map[IEEE80211_NUM_ACS]; 298 struct ath_txq *txq_map[IEEE80211_NUM_ACS];
299 struct ath_txq *uapsdq;
299 u32 txq_max_pending[IEEE80211_NUM_ACS]; 300 u32 txq_max_pending[IEEE80211_NUM_ACS];
300 u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32]; 301 u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
301}; 302};
@@ -343,6 +344,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
343void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); 344void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
344int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 345int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
345 struct ath_tx_control *txctl); 346 struct ath_tx_control *txctl);
347void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
348 struct sk_buff *skb);
346void ath_tx_tasklet(struct ath_softc *sc); 349void ath_tx_tasklet(struct ath_softc *sc);
347void ath_tx_edma_tasklet(struct ath_softc *sc); 350void ath_tx_edma_tasklet(struct ath_softc *sc);
348int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 351int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -353,6 +356,11 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
353void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an); 356void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
354void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 357void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
355 struct ath_node *an); 358 struct ath_node *an);
359void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
360 struct ieee80211_sta *sta,
361 u16 tids, int nframes,
362 enum ieee80211_frame_release_type reason,
363 bool more_data);
356 364
357/********/ 365/********/
358/* VIFs */ 366/* VIFs */
@@ -623,6 +631,11 @@ void ath_ant_comb_update(struct ath_softc *sc);
623/* Main driver core */ 631/* Main driver core */
624/********************/ 632/********************/
625 633
634#define ATH9K_PCI_CUS198 0x0001
635#define ATH9K_PCI_CUS230 0x0002
636#define ATH9K_PCI_CUS217 0x0004
637#define ATH9K_PCI_WOW 0x0008
638
626/* 639/*
627 * Default cache line size, in bytes. 640 * Default cache line size, in bytes.
628 * Used when PCI device not fully initialized by bootrom/BIOS 641 * Used when PCI device not fully initialized by bootrom/BIOS
@@ -642,6 +655,7 @@ enum sc_op_flags {
642 SC_OP_ANI_RUN, 655 SC_OP_ANI_RUN,
643 SC_OP_PRIM_STA_VIF, 656 SC_OP_PRIM_STA_VIF,
644 SC_OP_HW_RESET, 657 SC_OP_HW_RESET,
658 SC_OP_SCANNING,
645}; 659};
646 660
647/* Powersave flags */ 661/* Powersave flags */
@@ -706,6 +720,7 @@ struct ath_softc {
706 720
707 unsigned int hw_busy_count; 721 unsigned int hw_busy_count;
708 unsigned long sc_flags; 722 unsigned long sc_flags;
723 unsigned long driver_data;
709 724
710 u32 intrstatus; 725 u32 intrstatus;
711 u16 ps_flags; /* PS_* */ 726 u16 ps_flags; /* PS_* */
@@ -755,7 +770,6 @@ struct ath_softc {
755 struct rchan *rfs_chan_spec_scan; 770 struct rchan *rfs_chan_spec_scan;
756 enum spectral_mode spectral_mode; 771 enum spectral_mode spectral_mode;
757 struct ath_spec_scan spec_config; 772 struct ath_spec_scan spec_config;
758 int scanning;
759 773
760#ifdef CONFIG_PM_SLEEP 774#ifdef CONFIG_PM_SLEEP
761 atomic_t wow_got_bmiss_intr; 775 atomic_t wow_got_bmiss_intr;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 2ff570f7f8ff..1a17732bb089 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -39,7 +39,8 @@ static void ath9k_beaconq_config(struct ath_softc *sc)
39 39
40 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); 40 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
41 41
42 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { 42 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
43 sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
43 /* Always burst out beacon and CAB traffic. */ 44 /* Always burst out beacon and CAB traffic. */
44 qi.tqi_aifs = 1; 45 qi.tqi_aifs = 1;
45 qi.tqi_cwmin = 0; 46 qi.tqi_cwmin = 0;
@@ -107,23 +108,6 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
107 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); 108 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
108} 109}
109 110
110static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
111{
112 struct ath_softc *sc = hw->priv;
113 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
114 struct ath_tx_control txctl;
115
116 memset(&txctl, 0, sizeof(struct ath_tx_control));
117 txctl.txq = sc->beacon.cabq;
118
119 ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb);
120
121 if (ath_tx_start(hw, skb, &txctl) != 0) {
122 ath_dbg(common, XMIT, "CABQ TX failed\n");
123 ieee80211_free_txskb(hw, skb);
124 }
125}
126
127static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, 111static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
128 struct ieee80211_vif *vif) 112 struct ieee80211_vif *vif)
129{ 113{
@@ -205,10 +189,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
205 189
206 ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx); 190 ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
207 191
208 while (skb) { 192 if (skb)
209 ath9k_tx_cabq(hw, skb); 193 ath_tx_cabq(hw, vif, skb);
210 skb = ieee80211_get_buffered_bc(hw, vif);
211 }
212 194
213 return bf; 195 return bf;
214} 196}
@@ -273,7 +255,8 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
273 u64 tsf; 255 u64 tsf;
274 int slot; 256 int slot;
275 257
276 if (sc->sc_ah->opmode != NL80211_IFTYPE_AP) { 258 if (sc->sc_ah->opmode != NL80211_IFTYPE_AP &&
259 sc->sc_ah->opmode != NL80211_IFTYPE_MESH_POINT) {
277 ath_dbg(common, BEACON, "slot 0, tsf: %llu\n", 260 ath_dbg(common, BEACON, "slot 0, tsf: %llu\n",
278 ath9k_hw_gettsf64(sc->sc_ah)); 261 ath9k_hw_gettsf64(sc->sc_ah));
279 return 0; 262 return 0;
@@ -765,10 +748,10 @@ void ath9k_set_beacon(struct ath_softc *sc)
765 748
766 switch (sc->sc_ah->opmode) { 749 switch (sc->sc_ah->opmode) {
767 case NL80211_IFTYPE_AP: 750 case NL80211_IFTYPE_AP:
751 case NL80211_IFTYPE_MESH_POINT:
768 ath9k_beacon_config_ap(sc, cur_conf); 752 ath9k_beacon_config_ap(sc, cur_conf);
769 break; 753 break;
770 case NL80211_IFTYPE_ADHOC: 754 case NL80211_IFTYPE_ADHOC:
771 case NL80211_IFTYPE_MESH_POINT:
772 ath9k_beacon_config_adhoc(sc, cur_conf); 755 ath9k_beacon_config_adhoc(sc, cur_conf);
773 break; 756 break;
774 case NL80211_IFTYPE_STATION: 757 case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 7304e7585009..5e8219a91e25 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -387,7 +387,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
387 387
388 if (!caldata) { 388 if (!caldata) {
389 chan->noisefloor = nf; 389 chan->noisefloor = nf;
390 ah->noise = ath9k_hw_getchan_noise(ah, chan);
391 return false; 390 return false;
392 } 391 }
393 392
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b37eb8d38811..87454f6c7b4f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -69,7 +69,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
69 return -EFAULT; 69 return -EFAULT;
70 70
71 buf[len] = '\0'; 71 buf[len] = '\0';
72 if (strict_strtoul(buf, 0, &mask)) 72 if (kstrtoul(buf, 0, &mask))
73 return -EINVAL; 73 return -EINVAL;
74 74
75 common->debug_mask = mask; 75 common->debug_mask = mask;
@@ -114,7 +114,7 @@ static ssize_t write_file_tx_chainmask(struct file *file, const char __user *use
114 return -EFAULT; 114 return -EFAULT;
115 115
116 buf[len] = '\0'; 116 buf[len] = '\0';
117 if (strict_strtoul(buf, 0, &mask)) 117 if (kstrtoul(buf, 0, &mask))
118 return -EINVAL; 118 return -EINVAL;
119 119
120 ah->txchainmask = mask; 120 ah->txchainmask = mask;
@@ -157,7 +157,7 @@ static ssize_t write_file_rx_chainmask(struct file *file, const char __user *use
157 return -EFAULT; 157 return -EFAULT;
158 158
159 buf[len] = '\0'; 159 buf[len] = '\0';
160 if (strict_strtoul(buf, 0, &mask)) 160 if (kstrtoul(buf, 0, &mask))
161 return -EINVAL; 161 return -EINVAL;
162 162
163 ah->rxchainmask = mask; 163 ah->rxchainmask = mask;
@@ -173,25 +173,69 @@ static const struct file_operations fops_rx_chainmask = {
173 .llseek = default_llseek, 173 .llseek = default_llseek,
174}; 174};
175 175
176static ssize_t read_file_disable_ani(struct file *file, char __user *user_buf, 176static ssize_t read_file_ani(struct file *file, char __user *user_buf,
177 size_t count, loff_t *ppos) 177 size_t count, loff_t *ppos)
178{ 178{
179 struct ath_softc *sc = file->private_data; 179 struct ath_softc *sc = file->private_data;
180 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 180 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
181 char buf[32]; 181 struct ath_hw *ah = sc->sc_ah;
182 unsigned int len; 182 unsigned int len = 0, size = 1024;
183 ssize_t retval = 0;
184 char *buf;
183 185
184 len = sprintf(buf, "%d\n", common->disable_ani); 186 buf = kzalloc(size, GFP_KERNEL);
185 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 187 if (buf == NULL)
188 return -ENOMEM;
189
190 if (common->disable_ani) {
191 len += snprintf(buf + len, size - len, "%s: %s\n",
192 "ANI", "DISABLED");
193 goto exit;
194 }
195
196 len += snprintf(buf + len, size - len, "%15s: %s\n",
197 "ANI", "ENABLED");
198 len += snprintf(buf + len, size - len, "%15s: %u\n",
199 "ANI RESET", ah->stats.ast_ani_reset);
200 len += snprintf(buf + len, size - len, "%15s: %u\n",
201 "SPUR UP", ah->stats.ast_ani_spurup);
202 len += snprintf(buf + len, size - len, "%15s: %u\n",
203 "SPUR DOWN", ah->stats.ast_ani_spurup);
204 len += snprintf(buf + len, size - len, "%15s: %u\n",
205 "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
206 len += snprintf(buf + len, size - len, "%15s: %u\n",
207 "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
208 len += snprintf(buf + len, size - len, "%15s: %u\n",
209 "MRC-CCK ON", ah->stats.ast_ani_ccklow);
210 len += snprintf(buf + len, size - len, "%15s: %u\n",
211 "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
212 len += snprintf(buf + len, size - len, "%15s: %u\n",
213 "FIR-STEP UP", ah->stats.ast_ani_stepup);
214 len += snprintf(buf + len, size - len, "%15s: %u\n",
215 "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
216 len += snprintf(buf + len, size - len, "%15s: %u\n",
217 "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
218 len += snprintf(buf + len, size - len, "%15s: %u\n",
219 "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
220 len += snprintf(buf + len, size - len, "%15s: %u\n",
221 "CCK ERRORS", ah->stats.ast_ani_cckerrs);
222exit:
223 if (len > size)
224 len = size;
225
226 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
227 kfree(buf);
228
229 return retval;
186} 230}
187 231
188static ssize_t write_file_disable_ani(struct file *file, 232static ssize_t write_file_ani(struct file *file,
189 const char __user *user_buf, 233 const char __user *user_buf,
190 size_t count, loff_t *ppos) 234 size_t count, loff_t *ppos)
191{ 235{
192 struct ath_softc *sc = file->private_data; 236 struct ath_softc *sc = file->private_data;
193 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 237 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
194 unsigned long disable_ani; 238 unsigned long ani;
195 char buf[32]; 239 char buf[32];
196 ssize_t len; 240 ssize_t len;
197 241
@@ -200,12 +244,15 @@ static ssize_t write_file_disable_ani(struct file *file,
200 return -EFAULT; 244 return -EFAULT;
201 245
202 buf[len] = '\0'; 246 buf[len] = '\0';
203 if (strict_strtoul(buf, 0, &disable_ani)) 247 if (kstrtoul(buf, 0, &ani))
204 return -EINVAL; 248 return -EINVAL;
205 249
206 common->disable_ani = !!disable_ani; 250 if (ani < 0 || ani > 1)
251 return -EINVAL;
252
253 common->disable_ani = !ani;
207 254
208 if (disable_ani) { 255 if (common->disable_ani) {
209 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags); 256 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
210 ath_stop_ani(sc); 257 ath_stop_ani(sc);
211 } else { 258 } else {
@@ -215,9 +262,9 @@ static ssize_t write_file_disable_ani(struct file *file,
215 return count; 262 return count;
216} 263}
217 264
218static const struct file_operations fops_disable_ani = { 265static const struct file_operations fops_ani = {
219 .read = read_file_disable_ani, 266 .read = read_file_ani,
220 .write = write_file_disable_ani, 267 .write = write_file_ani,
221 .open = simple_open, 268 .open = simple_open,
222 .owner = THIS_MODULE, 269 .owner = THIS_MODULE,
223 .llseek = default_llseek, 270 .llseek = default_llseek,
@@ -253,7 +300,7 @@ static ssize_t write_file_ant_diversity(struct file *file,
253 goto exit; 300 goto exit;
254 301
255 buf[len] = '\0'; 302 buf[len] = '\0';
256 if (strict_strtoul(buf, 0, &antenna_diversity)) 303 if (kstrtoul(buf, 0, &antenna_diversity))
257 return -EINVAL; 304 return -EINVAL;
258 305
259 common->antenna_diversity = !!antenna_diversity; 306 common->antenna_diversity = !!antenna_diversity;
@@ -738,8 +785,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
738 struct ath_tx_status *ts, struct ath_txq *txq, 785 struct ath_tx_status *ts, struct ath_txq *txq,
739 unsigned int flags) 786 unsigned int flags)
740{ 787{
741#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\
742 [sc->debug.tsidx].c)
743 int qnum = txq->axq_qnum; 788 int qnum = txq->axq_qnum;
744 789
745 TX_STAT_INC(qnum, tx_pkts_all); 790 TX_STAT_INC(qnum, tx_pkts_all);
@@ -771,37 +816,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
771 TX_STAT_INC(qnum, data_underrun); 816 TX_STAT_INC(qnum, data_underrun);
772 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) 817 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
773 TX_STAT_INC(qnum, delim_underrun); 818 TX_STAT_INC(qnum, delim_underrun);
774
775#ifdef CONFIG_ATH9K_MAC_DEBUG
776 spin_lock(&sc->debug.samp_lock);
777 TX_SAMP_DBG(jiffies) = jiffies;
778 TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
779 TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1;
780 TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2;
781 TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0;
782 TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1;
783 TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2;
784 TX_SAMP_DBG(rateindex) = ts->ts_rateindex;
785 TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK);
786 TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry;
787 TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry;
788 TX_SAMP_DBG(rssi) = ts->ts_rssi;
789 TX_SAMP_DBG(tid) = ts->tid;
790 TX_SAMP_DBG(qid) = ts->qid;
791
792 if (ts->ts_flags & ATH9K_TX_BA) {
793 TX_SAMP_DBG(ba_low) = ts->ba_low;
794 TX_SAMP_DBG(ba_high) = ts->ba_high;
795 } else {
796 TX_SAMP_DBG(ba_low) = 0;
797 TX_SAMP_DBG(ba_high) = 0;
798 }
799
800 sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
801 spin_unlock(&sc->debug.samp_lock);
802#endif
803
804#undef TX_SAMP_DBG
805} 819}
806 820
807static const struct file_operations fops_xmit = { 821static const struct file_operations fops_xmit = {
@@ -915,8 +929,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
915void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) 929void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
916{ 930{
917#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ 931#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
918#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
919 [sc->debug.rsidx].c)
920 932
921 RX_STAT_INC(rx_pkts_all); 933 RX_STAT_INC(rx_pkts_all);
922 sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen; 934 sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
@@ -940,27 +952,7 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
940 RX_PHY_ERR_INC(rs->rs_phyerr); 952 RX_PHY_ERR_INC(rs->rs_phyerr);
941 } 953 }
942 954
943#ifdef CONFIG_ATH9K_MAC_DEBUG
944 spin_lock(&sc->debug.samp_lock);
945 RX_SAMP_DBG(jiffies) = jiffies;
946 RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
947 RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1;
948 RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2;
949 RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0;
950 RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1;
951 RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2;
952 RX_SAMP_DBG(antenna) = rs->rs_antenna;
953 RX_SAMP_DBG(rssi) = rs->rs_rssi;
954 RX_SAMP_DBG(rate) = rs->rs_rate;
955 RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon;
956
957 sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
958 spin_unlock(&sc->debug.samp_lock);
959
960#endif
961
962#undef RX_PHY_ERR_INC 955#undef RX_PHY_ERR_INC
963#undef RX_SAMP_DBG
964} 956}
965 957
966static const struct file_operations fops_recv = { 958static const struct file_operations fops_recv = {
@@ -1278,7 +1270,7 @@ static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
1278 return -EFAULT; 1270 return -EFAULT;
1279 1271
1280 buf[len] = '\0'; 1272 buf[len] = '\0';
1281 if (strict_strtoul(buf, 0, &regidx)) 1273 if (kstrtoul(buf, 0, &regidx))
1282 return -EINVAL; 1274 return -EINVAL;
1283 1275
1284 sc->debug.regidx = regidx; 1276 sc->debug.regidx = regidx;
@@ -1323,7 +1315,7 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
1323 return -EFAULT; 1315 return -EFAULT;
1324 1316
1325 buf[len] = '\0'; 1317 buf[len] = '\0';
1326 if (strict_strtoul(buf, 0, &regval)) 1318 if (kstrtoul(buf, 0, &regval))
1327 return -EINVAL; 1319 return -EINVAL;
1328 1320
1329 ath9k_ps_wakeup(sc); 1321 ath9k_ps_wakeup(sc);
@@ -1485,283 +1477,6 @@ static const struct file_operations fops_modal_eeprom = {
1485 .llseek = default_llseek, 1477 .llseek = default_llseek,
1486}; 1478};
1487 1479
1488#ifdef CONFIG_ATH9K_MAC_DEBUG
1489
1490void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
1491{
1492#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
1493 struct ath_hw *ah = sc->sc_ah;
1494 struct ath_common *common = ath9k_hw_common(ah);
1495 unsigned long flags;
1496 int i;
1497
1498 ath9k_ps_wakeup(sc);
1499
1500 spin_lock_bh(&sc->debug.samp_lock);
1501
1502 spin_lock_irqsave(&common->cc_lock, flags);
1503 ath_hw_cycle_counters_update(common);
1504
1505 ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles;
1506 ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy;
1507 ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame;
1508 ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame;
1509 spin_unlock_irqrestore(&common->cc_lock, flags);
1510
1511 ATH_SAMP_DBG(noise) = ah->noise;
1512
1513 REG_WRITE_D(ah, AR_MACMISC,
1514 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
1515 (AR_MACMISC_MISC_OBS_BUS_1 <<
1516 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
1517
1518 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
1519 ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah,
1520 AR_DMADBG_0 + (i * sizeof(u32)));
1521
1522 ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1);
1523 ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR);
1524
1525 memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist,
1526 sizeof(ATH_SAMP_DBG(nfCalHist)));
1527
1528 sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES;
1529 spin_unlock_bh(&sc->debug.samp_lock);
1530 ath9k_ps_restore(sc);
1531
1532#undef ATH_SAMP_DBG
1533}
1534
1535static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
1536{
1537#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c
1538 struct ath_softc *sc = inode->i_private;
1539 struct ath_hw *ah = sc->sc_ah;
1540 struct ath_common *common = ath9k_hw_common(ah);
1541 struct ieee80211_conf *conf = &common->hw->conf;
1542 struct ath_dbg_bb_mac_samp *bb_mac_samp;
1543 struct ath9k_nfcal_hist *h;
1544 int i, j, qcuOffset = 0, dcuOffset = 0;
1545 u32 *qcuBase, *dcuBase, size = 30000, len = 0;
1546 u32 sampidx = 0;
1547 u8 *buf;
1548 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
1549 u8 nread;
1550
1551 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1552 return -EAGAIN;
1553
1554 buf = vmalloc(size);
1555 if (!buf)
1556 return -ENOMEM;
1557 bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
1558 if (!bb_mac_samp) {
1559 vfree(buf);
1560 return -ENOMEM;
1561 }
1562 /* Account the current state too */
1563 ath9k_debug_samp_bb_mac(sc);
1564
1565 spin_lock_bh(&sc->debug.samp_lock);
1566 memcpy(bb_mac_samp, sc->debug.bb_mac_samp,
1567 sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
1568 len += snprintf(buf + len, size - len,
1569 "Current Sample Index: %d\n", sc->debug.sampidx);
1570 spin_unlock_bh(&sc->debug.samp_lock);
1571
1572 len += snprintf(buf + len, size - len,
1573 "Raw DMA Debug Dump:\n");
1574 len += snprintf(buf + len, size - len, "Sample |\t");
1575 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
1576 len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i);
1577 len += snprintf(buf + len, size - len, "\n");
1578
1579 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1580 len += snprintf(buf + len, size - len, "%d\t", sampidx);
1581
1582 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
1583 len += snprintf(buf + len, size - len, " %08x\t",
1584 ATH_SAMP_DBG(dma_dbg_reg_vals[i]));
1585 len += snprintf(buf + len, size - len, "\n");
1586 }
1587 len += snprintf(buf + len, size - len, "\n");
1588
1589 len += snprintf(buf + len, size - len,
1590 "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
1591 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1592 qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
1593 dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
1594
1595 for (i = 0; i < ATH9K_NUM_QUEUES; i++,
1596 qcuOffset += 4, dcuOffset += 5) {
1597 if (i == 8) {
1598 qcuOffset = 0;
1599 qcuBase++;
1600 }
1601
1602 if (i == 6) {
1603 dcuOffset = 0;
1604 dcuBase++;
1605 }
1606 if (!sc->debug.stats.txstats[i].queued)
1607 continue;
1608
1609 len += snprintf(buf + len, size - len,
1610 "%4d %7d %2x %1x %2x %2x\n",
1611 sampidx, i,
1612 (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
1613 (*qcuBase & (0x8 << qcuOffset)) >>
1614 (qcuOffset + 3),
1615 ATH_SAMP_DBG(dma_dbg_reg_vals[2]) &
1616 (0x7 << (i * 3)) >> (i * 3),
1617 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
1618 }
1619 len += snprintf(buf + len, size - len, "\n");
1620 }
1621 len += snprintf(buf + len, size - len,
1622 "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp "
1623 "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 "
1624 "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n");
1625
1626 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1627 qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
1628 dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
1629
1630 len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx,
1631 (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18,
1632 (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22);
1633 len += snprintf(buf + len, size - len, "%7x %8x ",
1634 (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26,
1635 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3));
1636 len += snprintf(buf + len, size - len, "%7x %7x ",
1637 (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25,
1638 (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27);
1639 len += snprintf(buf + len, size - len, "%7d %12d ",
1640 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2,
1641 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10);
1642 len += snprintf(buf + len, size - len, "%12d %12d ",
1643 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11,
1644 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12);
1645 len += snprintf(buf + len, size - len, "%12d %12d ",
1646 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13,
1647 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17);
1648 len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n",
1649 ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr));
1650 }
1651
1652 len += snprintf(buf + len, size - len,
1653 "Sample ChNoise Chain privNF #Reading Readings\n");
1654 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1655 h = ATH_SAMP_DBG(nfCalHist);
1656 if (!ATH_SAMP_DBG(noise))
1657 continue;
1658
1659 for (i = 0; i < NUM_NF_READINGS; i++) {
1660 if (!(chainmask & (1 << i)) ||
1661 ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
1662 continue;
1663
1664 nread = AR_PHY_CCA_FILTERWINDOW_LENGTH -
1665 h[i].invalidNFcount;
1666 len += snprintf(buf + len, size - len,
1667 "%4d %5d %4d\t %d\t %d\t",
1668 sampidx, ATH_SAMP_DBG(noise),
1669 i, h[i].privNF, nread);
1670 for (j = 0; j < nread; j++)
1671 len += snprintf(buf + len, size - len,
1672 " %d", h[i].nfCalBuffer[j]);
1673 len += snprintf(buf + len, size - len, "\n");
1674 }
1675 }
1676 len += snprintf(buf + len, size - len, "\nCycle counters:\n"
1677 "Sample Total Rxbusy Rxframes Txframes\n");
1678 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1679 if (!ATH_SAMP_DBG(cc.cycles))
1680 continue;
1681 len += snprintf(buf + len, size - len,
1682 "%4d %08x %08x %08x %08x\n",
1683 sampidx, ATH_SAMP_DBG(cc.cycles),
1684 ATH_SAMP_DBG(cc.rx_busy),
1685 ATH_SAMP_DBG(cc.rx_frame),
1686 ATH_SAMP_DBG(cc.tx_frame));
1687 }
1688
1689 len += snprintf(buf + len, size - len, "Tx status Dump :\n");
1690 len += snprintf(buf + len, size - len,
1691 "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb "
1692 "isok rts_fail data_fail rate tid qid "
1693 "ba_low ba_high tx_before(ms)\n");
1694 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1695 for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
1696 if (!ATH_SAMP_DBG(ts[i].jiffies))
1697 continue;
1698 len += snprintf(buf + len, size - len, "%-14d"
1699 "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-4d %-8d "
1700 "%-9d %-4d %-3d %-3d %08x %08x %-11d\n",
1701 sampidx,
1702 ATH_SAMP_DBG(ts[i].rssi_ctl0),
1703 ATH_SAMP_DBG(ts[i].rssi_ctl1),
1704 ATH_SAMP_DBG(ts[i].rssi_ctl2),
1705 ATH_SAMP_DBG(ts[i].rssi_ext0),
1706 ATH_SAMP_DBG(ts[i].rssi_ext1),
1707 ATH_SAMP_DBG(ts[i].rssi_ext2),
1708 ATH_SAMP_DBG(ts[i].rssi),
1709 ATH_SAMP_DBG(ts[i].isok),
1710 ATH_SAMP_DBG(ts[i].rts_fail_cnt),
1711 ATH_SAMP_DBG(ts[i].data_fail_cnt),
1712 ATH_SAMP_DBG(ts[i].rateindex),
1713 ATH_SAMP_DBG(ts[i].tid),
1714 ATH_SAMP_DBG(ts[i].qid),
1715 ATH_SAMP_DBG(ts[i].ba_low),
1716 ATH_SAMP_DBG(ts[i].ba_high),
1717 jiffies_to_msecs(jiffies -
1718 ATH_SAMP_DBG(ts[i].jiffies)));
1719 }
1720 }
1721
1722 len += snprintf(buf + len, size - len, "Rx status Dump :\n");
1723 len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 "
1724 "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n");
1725 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1726 for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
1727 if (!ATH_SAMP_DBG(rs[i].jiffies))
1728 continue;
1729 len += snprintf(buf + len, size - len, "%-14d"
1730 "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-9s %-2d %02x %-13d\n",
1731 sampidx,
1732 ATH_SAMP_DBG(rs[i].rssi_ctl0),
1733 ATH_SAMP_DBG(rs[i].rssi_ctl1),
1734 ATH_SAMP_DBG(rs[i].rssi_ctl2),
1735 ATH_SAMP_DBG(rs[i].rssi_ext0),
1736 ATH_SAMP_DBG(rs[i].rssi_ext1),
1737 ATH_SAMP_DBG(rs[i].rssi_ext2),
1738 ATH_SAMP_DBG(rs[i].rssi),
1739 ATH_SAMP_DBG(rs[i].is_mybeacon) ?
1740 "True" : "False",
1741 ATH_SAMP_DBG(rs[i].antenna),
1742 ATH_SAMP_DBG(rs[i].rate),
1743 jiffies_to_msecs(jiffies -
1744 ATH_SAMP_DBG(rs[i].jiffies)));
1745 }
1746 }
1747
1748 vfree(bb_mac_samp);
1749 file->private_data = buf;
1750
1751 return 0;
1752#undef ATH_SAMP_DBG
1753}
1754
1755static const struct file_operations fops_samps = {
1756 .open = open_file_bb_mac_samps,
1757 .read = ath9k_debugfs_read_buf,
1758 .release = ath9k_debugfs_release_buf,
1759 .owner = THIS_MODULE,
1760 .llseek = default_llseek,
1761};
1762
1763#endif
1764
1765#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1480#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1766static ssize_t read_file_btcoex(struct file *file, char __user *user_buf, 1481static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
1767 size_t count, loff_t *ppos) 1482 size_t count, loff_t *ppos)
@@ -2059,8 +1774,8 @@ int ath9k_init_debug(struct ath_hw *ah)
2059 sc->debug.debugfs_phy, sc, &fops_rx_chainmask); 1774 sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
2060 debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR, 1775 debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
2061 sc->debug.debugfs_phy, sc, &fops_tx_chainmask); 1776 sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
2062 debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR, 1777 debugfs_create_file("ani", S_IRUSR | S_IWUSR,
2063 sc->debug.debugfs_phy, sc, &fops_disable_ani); 1778 sc->debug.debugfs_phy, sc, &fops_ani);
2064 debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1779 debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
2065 &sc->sc_ah->config.enable_paprd); 1780 &sc->sc_ah->config.enable_paprd);
2066 debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1781 debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -2095,11 +1810,6 @@ int ath9k_init_debug(struct ath_hw *ah)
2095 debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR, 1810 debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
2096 sc->debug.debugfs_phy, sc, 1811 sc->debug.debugfs_phy, sc,
2097 &fops_spectral_fft_period); 1812 &fops_spectral_fft_period);
2098
2099#ifdef CONFIG_ATH9K_MAC_DEBUG
2100 debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
2101 &fops_samps);
2102#endif
2103 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, 1813 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
2104 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); 1814 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
2105 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, 1815 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 9d49aab8b989..fc679198a0f3 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -251,56 +251,10 @@ struct ath_stats {
251 u32 reset[__RESET_TYPE_MAX]; 251 u32 reset[__RESET_TYPE_MAX];
252}; 252};
253 253
254#define ATH_DBG_MAX_SAMPLES 10
255struct ath_dbg_bb_mac_samp {
256 u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS];
257 u32 pcu_obs, pcu_cr, noise;
258 struct {
259 u64 jiffies;
260 int8_t rssi_ctl0;
261 int8_t rssi_ctl1;
262 int8_t rssi_ctl2;
263 int8_t rssi_ext0;
264 int8_t rssi_ext1;
265 int8_t rssi_ext2;
266 int8_t rssi;
267 bool isok;
268 u8 rts_fail_cnt;
269 u8 data_fail_cnt;
270 u8 rateindex;
271 u8 qid;
272 u8 tid;
273 u32 ba_low;
274 u32 ba_high;
275 } ts[ATH_DBG_MAX_SAMPLES];
276 struct {
277 u64 jiffies;
278 int8_t rssi_ctl0;
279 int8_t rssi_ctl1;
280 int8_t rssi_ctl2;
281 int8_t rssi_ext0;
282 int8_t rssi_ext1;
283 int8_t rssi_ext2;
284 int8_t rssi;
285 bool is_mybeacon;
286 u8 antenna;
287 u8 rate;
288 } rs[ATH_DBG_MAX_SAMPLES];
289 struct ath_cycle_counters cc;
290 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
291};
292
293struct ath9k_debug { 254struct ath9k_debug {
294 struct dentry *debugfs_phy; 255 struct dentry *debugfs_phy;
295 u32 regidx; 256 u32 regidx;
296 struct ath_stats stats; 257 struct ath_stats stats;
297#ifdef CONFIG_ATH9K_MAC_DEBUG
298 spinlock_t samp_lock;
299 struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
300 u8 sampidx;
301 u8 tsidx;
302 u8 rsidx;
303#endif
304}; 258};
305 259
306int ath9k_init_debug(struct ath_hw *ah); 260int ath9k_init_debug(struct ath_hw *ah);
@@ -364,17 +318,4 @@ static inline void ath_debug_stat_rx(struct ath_softc *sc,
364 318
365#endif /* CONFIG_ATH9K_DEBUGFS */ 319#endif /* CONFIG_ATH9K_DEBUGFS */
366 320
367#ifdef CONFIG_ATH9K_MAC_DEBUG
368
369void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
370
371#else
372
373static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
374{
375}
376
377#endif
378
379
380#endif /* DEBUG_H */ 321#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index b7611b7bbe43..3c6e4138a95d 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -96,7 +96,7 @@ static ssize_t write_file_dfs(struct file *file, const char __user *user_buf,
96 return -EFAULT; 96 return -EFAULT;
97 97
98 buf[len] = '\0'; 98 buf[len] = '\0';
99 if (strict_strtoul(buf, 0, &val)) 99 if (kstrtoul(buf, 0, &val))
100 return -EINVAL; 100 return -EINVAL;
101 101
102 if (val == DFS_STATS_RESET_MAGIC) 102 if (val == DFS_STATS_RESET_MAGIC)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f5dda84176c3..9e582e14da74 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -234,10 +234,15 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
234 struct sk_buff *skb; 234 struct sk_buff *skb;
235 235
236 while ((skb = __skb_dequeue(queue)) != NULL) { 236 while ((skb = __skb_dequeue(queue)) != NULL) {
237#ifdef CONFIG_ATH9K_HTC_DEBUGFS
238 int ln = skb->len;
239#endif
237 ath9k_htc_txcompletion_cb(hif_dev->htc_handle, 240 ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
238 skb, txok); 241 skb, txok);
239 if (txok) 242 if (txok) {
240 TX_STAT_INC(skb_success); 243 TX_STAT_INC(skb_success);
244 TX_STAT_ADD(skb_success_bytes, ln);
245 }
241 else 246 else
242 TX_STAT_INC(skb_failed); 247 TX_STAT_INC(skb_failed);
243 } 248 }
@@ -620,6 +625,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
620 625
621err: 626err:
622 for (i = 0; i < pool_index; i++) { 627 for (i = 0; i < pool_index; i++) {
628 RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len);
623 ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i], 629 ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
624 skb_pool[i]->len, USB_WLAN_RX_PIPE); 630 skb_pool[i]->len, USB_WLAN_RX_PIPE);
625 RX_STAT_INC(skb_completed); 631 RX_STAT_INC(skb_completed);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index d3b099d7898b..055d7c25e090 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -142,6 +142,7 @@ struct ath9k_htc_target_aggr {
142#define WLAN_RC_40_FLAG 0x02 142#define WLAN_RC_40_FLAG 0x02
143#define WLAN_RC_SGI_FLAG 0x04 143#define WLAN_RC_SGI_FLAG 0x04
144#define WLAN_RC_HT_FLAG 0x08 144#define WLAN_RC_HT_FLAG 0x08
145#define ATH_RC_TX_STBC_FLAG 0x20
145 146
146struct ath9k_htc_rateset { 147struct ath9k_htc_rateset {
147 u8 rs_nrates; 148 u8 rs_nrates;
@@ -208,6 +209,9 @@ struct ath9k_htc_target_rx_stats {
208 case NL80211_IFTYPE_AP: \ 209 case NL80211_IFTYPE_AP: \
209 _priv->num_ap_vif++; \ 210 _priv->num_ap_vif++; \
210 break; \ 211 break; \
212 case NL80211_IFTYPE_MESH_POINT: \
213 _priv->num_mbss_vif++; \
214 break; \
211 default: \ 215 default: \
212 break; \ 216 break; \
213 } \ 217 } \
@@ -224,6 +228,9 @@ struct ath9k_htc_target_rx_stats {
224 case NL80211_IFTYPE_AP: \ 228 case NL80211_IFTYPE_AP: \
225 _priv->num_ap_vif--; \ 229 _priv->num_ap_vif--; \
226 break; \ 230 break; \
231 case NL80211_IFTYPE_MESH_POINT: \
232 _priv->num_mbss_vif--; \
233 break; \
227 default: \ 234 default: \
228 break; \ 235 break; \
229 } \ 236 } \
@@ -317,7 +324,9 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
317#ifdef CONFIG_ATH9K_HTC_DEBUGFS 324#ifdef CONFIG_ATH9K_HTC_DEBUGFS
318 325
319#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) 326#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
327#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
320#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++) 328#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
329#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a)
321#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++ 330#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
322 331
323#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) 332#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
@@ -330,6 +339,7 @@ struct ath_tx_stats {
330 u32 buf_completed; 339 u32 buf_completed;
331 u32 skb_queued; 340 u32 skb_queued;
332 u32 skb_success; 341 u32 skb_success;
342 u32 skb_success_bytes;
333 u32 skb_failed; 343 u32 skb_failed;
334 u32 cab_queued; 344 u32 cab_queued;
335 u32 queue_stats[IEEE80211_NUM_ACS]; 345 u32 queue_stats[IEEE80211_NUM_ACS];
@@ -338,6 +348,7 @@ struct ath_tx_stats {
338struct ath_rx_stats { 348struct ath_rx_stats {
339 u32 skb_allocated; 349 u32 skb_allocated;
340 u32 skb_completed; 350 u32 skb_completed;
351 u32 skb_completed_bytes;
341 u32 skb_dropped; 352 u32 skb_dropped;
342 u32 err_crc; 353 u32 err_crc;
343 u32 err_decrypt_crc; 354 u32 err_decrypt_crc;
@@ -355,10 +366,20 @@ struct ath9k_debug {
355 struct ath_rx_stats rx_stats; 366 struct ath_rx_stats rx_stats;
356}; 367};
357 368
369void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
370 struct ieee80211_vif *vif,
371 u32 sset, u8 *data);
372int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
373 struct ieee80211_vif *vif, int sset);
374void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
375 struct ieee80211_vif *vif,
376 struct ethtool_stats *stats, u64 *data);
358#else 377#else
359 378
360#define TX_STAT_INC(c) do { } while (0) 379#define TX_STAT_INC(c) do { } while (0)
380#define TX_STAT_ADD(c, a) do { } while (0)
361#define RX_STAT_INC(c) do { } while (0) 381#define RX_STAT_INC(c) do { } while (0)
382#define RX_STAT_ADD(c, a) do { } while (0)
362#define CAB_STAT_INC do { } while (0) 383#define CAB_STAT_INC do { } while (0)
363 384
364#define TX_QSTAT_INC(c) do { } while (0) 385#define TX_QSTAT_INC(c) do { } while (0)
@@ -450,6 +471,7 @@ struct ath9k_htc_priv {
450 u8 sta_slot; 471 u8 sta_slot;
451 u8 vif_sta_pos[ATH9K_HTC_MAX_VIF]; 472 u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
452 u8 num_ibss_vif; 473 u8 num_ibss_vif;
474 u8 num_mbss_vif;
453 u8 num_sta_vif; 475 u8 num_sta_vif;
454 u8 num_sta_assoc_vif; 476 u8 num_sta_assoc_vif;
455 u8 num_ap_vif; 477 u8 num_ap_vif;
@@ -575,6 +597,8 @@ bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
575void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); 597void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
576void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw); 598void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
577 599
600struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
601
578#ifdef CONFIG_MAC80211_LEDS 602#ifdef CONFIG_MAC80211_LEDS
579void ath9k_init_leds(struct ath9k_htc_priv *priv); 603void ath9k_init_leds(struct ath9k_htc_priv *priv);
580void ath9k_deinit_leds(struct ath9k_htc_priv *priv); 604void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index f13f458dd656..e0c03bd64182 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -28,7 +28,8 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
28 28
29 ath9k_hw_get_txq_props(ah, priv->beaconq, &qi); 29 ath9k_hw_get_txq_props(ah, priv->beaconq, &qi);
30 30
31 if (priv->ah->opmode == NL80211_IFTYPE_AP) { 31 if (priv->ah->opmode == NL80211_IFTYPE_AP ||
32 priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
32 qi.tqi_aifs = 1; 33 qi.tqi_aifs = 1;
33 qi.tqi_cwmin = 0; 34 qi.tqi_cwmin = 0;
34 qi.tqi_cwmax = 0; 35 qi.tqi_cwmax = 0;
@@ -628,6 +629,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
628 case NL80211_IFTYPE_ADHOC: 629 case NL80211_IFTYPE_ADHOC:
629 ath9k_htc_beacon_config_adhoc(priv, cur_conf); 630 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
630 break; 631 break;
632 case NL80211_IFTYPE_MESH_POINT:
631 case NL80211_IFTYPE_AP: 633 case NL80211_IFTYPE_AP:
632 ath9k_htc_beacon_config_ap(priv, cur_conf); 634 ath9k_htc_beacon_config_ap(priv, cur_conf);
633 break; 635 break;
@@ -649,6 +651,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
649 case NL80211_IFTYPE_ADHOC: 651 case NL80211_IFTYPE_ADHOC:
650 ath9k_htc_beacon_config_adhoc(priv, cur_conf); 652 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
651 break; 653 break;
654 case NL80211_IFTYPE_MESH_POINT:
652 case NL80211_IFTYPE_AP: 655 case NL80211_IFTYPE_AP:
653 ath9k_htc_beacon_config_ap(priv, cur_conf); 656 ath9k_htc_beacon_config_ap(priv, cur_conf);
654 break; 657 break;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 87110de577ef..c1b45e2f8481 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -471,7 +471,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
471 return -EFAULT; 471 return -EFAULT;
472 472
473 buf[len] = '\0'; 473 buf[len] = '\0';
474 if (strict_strtoul(buf, 0, &mask)) 474 if (kstrtoul(buf, 0, &mask))
475 return -EINVAL; 475 return -EINVAL;
476 476
477 common->debug_mask = mask; 477 common->debug_mask = mask;
@@ -496,21 +496,7 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
496 ssize_t retval = 0; 496 ssize_t retval = 0;
497 char *buf; 497 char *buf;
498 498
499 /* 499 pBase = ath9k_htc_get_eeprom_base(priv);
500 * This can be done since all the 3 EEPROM families have the
501 * same base header upto a certain point, and we are interested in
502 * the data only upto that point.
503 */
504
505 if (AR_SREV_9271(priv->ah))
506 pBase = (struct base_eep_header *)
507 &priv->ah->eeprom.map4k.baseEepHeader;
508 else if (priv->ah->hw_version.usbdev == AR9280_USB)
509 pBase = (struct base_eep_header *)
510 &priv->ah->eeprom.def.baseEepHeader;
511 else if (priv->ah->hw_version.usbdev == AR9287_USB)
512 pBase = (struct base_eep_header *)
513 &priv->ah->eeprom.map9287.baseEepHeader;
514 500
515 if (pBase == NULL) { 501 if (pBase == NULL) {
516 ath_err(common, "Unknown EEPROM type\n"); 502 ath_err(common, "Unknown EEPROM type\n");
@@ -916,6 +902,87 @@ static const struct file_operations fops_modal_eeprom = {
916 .llseek = default_llseek, 902 .llseek = default_llseek,
917}; 903};
918 904
905
906/* Ethtool support for get-stats */
907#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
908static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
909 "tx_pkts_nic",
910 "tx_bytes_nic",
911 "rx_pkts_nic",
912 "rx_bytes_nic",
913
914 AMKSTR(d_tx_pkts),
915
916 "d_rx_crc_err",
917 "d_rx_decrypt_crc_err",
918 "d_rx_phy_err",
919 "d_rx_mic_err",
920 "d_rx_pre_delim_crc_err",
921 "d_rx_post_delim_crc_err",
922 "d_rx_decrypt_busy_err",
923
924 "d_rx_phyerr_radar",
925 "d_rx_phyerr_ofdm_timing",
926 "d_rx_phyerr_cck_timing",
927
928};
929#define ATH9K_HTC_SSTATS_LEN ARRAY_SIZE(ath9k_htc_gstrings_stats)
930
931void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
932 struct ieee80211_vif *vif,
933 u32 sset, u8 *data)
934{
935 if (sset == ETH_SS_STATS)
936 memcpy(data, *ath9k_htc_gstrings_stats,
937 sizeof(ath9k_htc_gstrings_stats));
938}
939
940int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
941 struct ieee80211_vif *vif, int sset)
942{
943 if (sset == ETH_SS_STATS)
944 return ATH9K_HTC_SSTATS_LEN;
945 return 0;
946}
947
948#define STXBASE priv->debug.tx_stats
949#define SRXBASE priv->debug.rx_stats
950#define ASTXQ(a) \
951 data[i++] = STXBASE.a[IEEE80211_AC_BE]; \
952 data[i++] = STXBASE.a[IEEE80211_AC_BK]; \
953 data[i++] = STXBASE.a[IEEE80211_AC_VI]; \
954 data[i++] = STXBASE.a[IEEE80211_AC_VO]
955
956void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
957 struct ieee80211_vif *vif,
958 struct ethtool_stats *stats, u64 *data)
959{
960 struct ath9k_htc_priv *priv = hw->priv;
961 int i = 0;
962
963 data[i++] = STXBASE.skb_success;
964 data[i++] = STXBASE.skb_success_bytes;
965 data[i++] = SRXBASE.skb_completed;
966 data[i++] = SRXBASE.skb_completed_bytes;
967
968 ASTXQ(queue_stats);
969
970 data[i++] = SRXBASE.err_crc;
971 data[i++] = SRXBASE.err_decrypt_crc;
972 data[i++] = SRXBASE.err_phy;
973 data[i++] = SRXBASE.err_mic;
974 data[i++] = SRXBASE.err_pre_delim;
975 data[i++] = SRXBASE.err_post_delim;
976 data[i++] = SRXBASE.err_decrypt_busy;
977
978 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR];
979 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING];
980 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING];
981
982 WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
983}
984
985
919int ath9k_htc_init_debug(struct ath_hw *ah) 986int ath9k_htc_init_debug(struct ath_hw *ah)
920{ 987{
921 struct ath_common *common = ath9k_hw_common(ah); 988 struct ath_common *common = ath9k_hw_common(ah);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index a47f5e05fc04..71a183ffc77f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -517,6 +517,9 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
517 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", 517 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
518 tx_streams, rx_streams); 518 tx_streams, rx_streams);
519 519
520 if (tx_streams >= 2)
521 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
522
520 if (tx_streams != rx_streams) { 523 if (tx_streams != rx_streams) {
521 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 524 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
522 ht_info->mcs.tx_params |= ((tx_streams - 1) << 525 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
@@ -698,6 +701,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
698 { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | 701 { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
699 BIT(NL80211_IFTYPE_P2P_CLIENT) }, 702 BIT(NL80211_IFTYPE_P2P_CLIENT) },
700 { .max = 2, .types = BIT(NL80211_IFTYPE_AP) | 703 { .max = 2, .types = BIT(NL80211_IFTYPE_AP) |
704#ifdef CONFIG_MAC80211_MESH
705 BIT(NL80211_IFTYPE_MESH_POINT) |
706#endif
701 BIT(NL80211_IFTYPE_P2P_GO) }, 707 BIT(NL80211_IFTYPE_P2P_GO) },
702}; 708};
703 709
@@ -712,6 +718,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
712 struct ieee80211_hw *hw) 718 struct ieee80211_hw *hw)
713{ 719{
714 struct ath_common *common = ath9k_hw_common(priv->ah); 720 struct ath_common *common = ath9k_hw_common(priv->ah);
721 struct base_eep_header *pBase;
715 722
716 hw->flags = IEEE80211_HW_SIGNAL_DBM | 723 hw->flags = IEEE80211_HW_SIGNAL_DBM |
717 IEEE80211_HW_AMPDU_AGGREGATION | 724 IEEE80211_HW_AMPDU_AGGREGATION |
@@ -721,6 +728,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
721 IEEE80211_HW_SUPPORTS_PS | 728 IEEE80211_HW_SUPPORTS_PS |
722 IEEE80211_HW_PS_NULLFUNC_STACK | 729 IEEE80211_HW_PS_NULLFUNC_STACK |
723 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 730 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
731 IEEE80211_HW_MFP_CAPABLE |
724 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 732 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
725 733
726 hw->wiphy->interface_modes = 734 hw->wiphy->interface_modes =
@@ -728,7 +736,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
728 BIT(NL80211_IFTYPE_ADHOC) | 736 BIT(NL80211_IFTYPE_ADHOC) |
729 BIT(NL80211_IFTYPE_AP) | 737 BIT(NL80211_IFTYPE_AP) |
730 BIT(NL80211_IFTYPE_P2P_GO) | 738 BIT(NL80211_IFTYPE_P2P_GO) |
731 BIT(NL80211_IFTYPE_P2P_CLIENT); 739 BIT(NL80211_IFTYPE_P2P_CLIENT) |
740 BIT(NL80211_IFTYPE_MESH_POINT);
732 741
733 hw->wiphy->iface_combinations = &if_comb; 742 hw->wiphy->iface_combinations = &if_comb;
734 hw->wiphy->n_iface_combinations = 1; 743 hw->wiphy->n_iface_combinations = 1;
@@ -765,6 +774,12 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
765 &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap); 774 &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
766 } 775 }
767 776
777 pBase = ath9k_htc_get_eeprom_base(priv);
778 if (pBase) {
779 hw->wiphy->available_antennas_rx = pBase->rxMask;
780 hw->wiphy->available_antennas_tx = pBase->txMask;
781 }
782
768 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 783 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
769} 784}
770 785
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 62f1b7636c92..5c1bec18c9e3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -113,7 +113,9 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
113 struct ath9k_htc_priv *priv = data; 113 struct ath9k_htc_priv *priv = data;
114 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 114 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
115 115
116 if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon) 116 if ((vif->type == NL80211_IFTYPE_AP ||
117 vif->type == NL80211_IFTYPE_MESH_POINT) &&
118 bss_conf->enable_beacon)
117 priv->reconfig_beacon = true; 119 priv->reconfig_beacon = true;
118 120
119 if (bss_conf->assoc) { 121 if (bss_conf->assoc) {
@@ -180,6 +182,8 @@ static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
180 priv->ah->opmode = NL80211_IFTYPE_ADHOC; 182 priv->ah->opmode = NL80211_IFTYPE_ADHOC;
181 else if (priv->num_ap_vif) 183 else if (priv->num_ap_vif)
182 priv->ah->opmode = NL80211_IFTYPE_AP; 184 priv->ah->opmode = NL80211_IFTYPE_AP;
185 else if (priv->num_mbss_vif)
186 priv->ah->opmode = NL80211_IFTYPE_MESH_POINT;
183 else 187 else
184 priv->ah->opmode = NL80211_IFTYPE_STATION; 188 priv->ah->opmode = NL80211_IFTYPE_STATION;
185 189
@@ -623,6 +627,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
623 trate->rates.ht_rates.rs_nrates = j; 627 trate->rates.ht_rates.rs_nrates = j;
624 628
625 caps = WLAN_RC_HT_FLAG; 629 caps = WLAN_RC_HT_FLAG;
630 if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
631 caps |= ATH_RC_TX_STBC_FLAG;
626 if (sta->ht_cap.mcs.rx_mask[1]) 632 if (sta->ht_cap.mcs.rx_mask[1])
627 caps |= WLAN_RC_DS_FLAG; 633 caps |= WLAN_RC_DS_FLAG;
628 if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && 634 if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
@@ -810,8 +816,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
810 } 816 }
811 817
812 /* Verify whether we must check ANI */ 818 /* Verify whether we must check ANI */
813 if (ah->config.enable_ani && 819 if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
814 (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
815 aniflag = true; 820 aniflag = true;
816 common->ani.checkani_timer = timestamp; 821 common->ani.checkani_timer = timestamp;
817 } 822 }
@@ -841,8 +846,7 @@ set_timer:
841 * short calibration and long calibration. 846 * short calibration and long calibration.
842 */ 847 */
843 cal_interval = ATH_LONG_CALINTERVAL; 848 cal_interval = ATH_LONG_CALINTERVAL;
844 if (ah->config.enable_ani) 849 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
845 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
846 if (!common->ani.caldone) 850 if (!common->ani.caldone)
847 cal_interval = min(cal_interval, (u32)short_cal_interval); 851 cal_interval = min(cal_interval, (u32)short_cal_interval);
848 852
@@ -1052,6 +1056,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1052 case NL80211_IFTYPE_AP: 1056 case NL80211_IFTYPE_AP:
1053 hvif.opmode = HTC_M_HOSTAP; 1057 hvif.opmode = HTC_M_HOSTAP;
1054 break; 1058 break;
1059 case NL80211_IFTYPE_MESH_POINT:
1060 hvif.opmode = HTC_M_WDS; /* close enough */
1061 break;
1055 default: 1062 default:
1056 ath_err(common, 1063 ath_err(common,
1057 "Interface type %d not yet supported\n", vif->type); 1064 "Interface type %d not yet supported\n", vif->type);
@@ -1084,6 +1091,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1084 INC_VIF(priv, vif->type); 1091 INC_VIF(priv, vif->type);
1085 1092
1086 if ((vif->type == NL80211_IFTYPE_AP) || 1093 if ((vif->type == NL80211_IFTYPE_AP) ||
1094 (vif->type == NL80211_IFTYPE_MESH_POINT) ||
1087 (vif->type == NL80211_IFTYPE_ADHOC)) 1095 (vif->type == NL80211_IFTYPE_ADHOC))
1088 ath9k_htc_assign_bslot(priv, vif); 1096 ath9k_htc_assign_bslot(priv, vif);
1089 1097
@@ -1134,6 +1142,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1134 DEC_VIF(priv, vif->type); 1142 DEC_VIF(priv, vif->type);
1135 1143
1136 if ((vif->type == NL80211_IFTYPE_AP) || 1144 if ((vif->type == NL80211_IFTYPE_AP) ||
1145 vif->type == NL80211_IFTYPE_MESH_POINT ||
1137 (vif->type == NL80211_IFTYPE_ADHOC)) 1146 (vif->type == NL80211_IFTYPE_ADHOC))
1138 ath9k_htc_remove_bslot(priv, vif); 1147 ath9k_htc_remove_bslot(priv, vif);
1139 1148
@@ -1525,9 +1534,10 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1525 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) { 1534 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
1526 /* 1535 /*
1527 * Disable SWBA interrupt only if there are no 1536 * Disable SWBA interrupt only if there are no
1528 * AP/IBSS interfaces. 1537 * concurrent AP/mesh or IBSS interfaces.
1529 */ 1538 */
1530 if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) { 1539 if ((priv->num_ap_vif + priv->num_mbss_vif <= 1) ||
1540 priv->num_ibss_vif) {
1531 ath_dbg(common, CONFIG, 1541 ath_dbg(common, CONFIG,
1532 "Beacon disabled for BSS: %pM\n", 1542 "Beacon disabled for BSS: %pM\n",
1533 bss_conf->bssid); 1543 bss_conf->bssid);
@@ -1538,12 +1548,15 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1538 1548
1539 if (changed & BSS_CHANGED_BEACON_INT) { 1549 if (changed & BSS_CHANGED_BEACON_INT) {
1540 /* 1550 /*
1541 * Reset the HW TSF for the first AP interface. 1551 * Reset the HW TSF for the first AP or mesh interface.
1542 */ 1552 */
1543 if ((priv->ah->opmode == NL80211_IFTYPE_AP) && 1553 if (priv->nvifs == 1 &&
1544 (priv->nvifs == 1) && 1554 ((priv->ah->opmode == NL80211_IFTYPE_AP &&
1545 (priv->num_ap_vif == 1) && 1555 vif->type == NL80211_IFTYPE_AP &&
1546 (vif->type == NL80211_IFTYPE_AP)) { 1556 priv->num_ap_vif == 1) ||
1557 (priv->ah->opmode == NL80211_IFTYPE_MESH_POINT &&
1558 vif->type == NL80211_IFTYPE_MESH_POINT &&
1559 priv->num_mbss_vif == 1))) {
1547 set_bit(OP_TSF_RESET, &priv->op_flags); 1560 set_bit(OP_TSF_RESET, &priv->op_flags);
1548 } 1561 }
1549 ath_dbg(common, CONFIG, 1562 ath_dbg(common, CONFIG,
@@ -1761,6 +1774,43 @@ static int ath9k_htc_get_stats(struct ieee80211_hw *hw,
1761 return 0; 1774 return 0;
1762} 1775}
1763 1776
1777struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv)
1778{
1779 struct base_eep_header *pBase = NULL;
1780 /*
1781 * This can be done since all the 3 EEPROM families have the
1782 * same base header upto a certain point, and we are interested in
1783 * the data only upto that point.
1784 */
1785
1786 if (AR_SREV_9271(priv->ah))
1787 pBase = (struct base_eep_header *)
1788 &priv->ah->eeprom.map4k.baseEepHeader;
1789 else if (priv->ah->hw_version.usbdev == AR9280_USB)
1790 pBase = (struct base_eep_header *)
1791 &priv->ah->eeprom.def.baseEepHeader;
1792 else if (priv->ah->hw_version.usbdev == AR9287_USB)
1793 pBase = (struct base_eep_header *)
1794 &priv->ah->eeprom.map9287.baseEepHeader;
1795 return pBase;
1796}
1797
1798
1799static int ath9k_htc_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
1800 u32 *rx_ant)
1801{
1802 struct ath9k_htc_priv *priv = hw->priv;
1803 struct base_eep_header *pBase = ath9k_htc_get_eeprom_base(priv);
1804 if (pBase) {
1805 *tx_ant = pBase->txMask;
1806 *rx_ant = pBase->rxMask;
1807 } else {
1808 *tx_ant = 0;
1809 *rx_ant = 0;
1810 }
1811 return 0;
1812}
1813
1764struct ieee80211_ops ath9k_htc_ops = { 1814struct ieee80211_ops ath9k_htc_ops = {
1765 .tx = ath9k_htc_tx, 1815 .tx = ath9k_htc_tx,
1766 .start = ath9k_htc_start, 1816 .start = ath9k_htc_start,
@@ -1786,4 +1836,11 @@ struct ieee80211_ops ath9k_htc_ops = {
1786 .set_coverage_class = ath9k_htc_set_coverage_class, 1836 .set_coverage_class = ath9k_htc_set_coverage_class,
1787 .set_bitrate_mask = ath9k_htc_set_bitrate_mask, 1837 .set_bitrate_mask = ath9k_htc_set_bitrate_mask,
1788 .get_stats = ath9k_htc_get_stats, 1838 .get_stats = ath9k_htc_get_stats,
1839 .get_antenna = ath9k_htc_get_antenna,
1840
1841#ifdef CONFIG_ATH9K_HTC_DEBUGFS
1842 .get_et_sset_count = ath9k_htc_get_et_sset_count,
1843 .get_et_stats = ath9k_htc_get_et_stats,
1844 .get_et_strings = ath9k_htc_get_et_strings,
1845#endif
1789}; 1846};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 6bd0e92ea2aa..e602c9519709 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -887,7 +887,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
887 if (priv->rxfilter & FIF_PSPOLL) 887 if (priv->rxfilter & FIF_PSPOLL)
888 rfilt |= ATH9K_RX_FILTER_PSPOLL; 888 rfilt |= ATH9K_RX_FILTER_PSPOLL;
889 889
890 if (priv->nvifs > 1) 890 if (priv->nvifs > 1 || priv->rxfilter & FIF_OTHER_BSS)
891 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 891 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
892 892
893 return rfilt; 893 return rfilt;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 15dfefcf2d0f..4ca0cb060106 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -452,7 +452,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
452 ah->config.pcie_clock_req = 0; 452 ah->config.pcie_clock_req = 0;
453 ah->config.pcie_waen = 0; 453 ah->config.pcie_waen = 0;
454 ah->config.analog_shiftreg = 1; 454 ah->config.analog_shiftreg = 1;
455 ah->config.enable_ani = true;
456 455
457 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 456 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
458 ah->config.spurchans[i][0] = AR_NO_SPUR; 457 ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -549,8 +548,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
549 ah->eep_ops->get_eeprom_ver(ah), 548 ah->eep_ops->get_eeprom_ver(ah),
550 ah->eep_ops->get_eeprom_rev(ah)); 549 ah->eep_ops->get_eeprom_rev(ah));
551 550
552 if (ah->config.enable_ani) 551 ath9k_hw_ani_init(ah);
553 ath9k_hw_ani_init(ah);
554 552
555 return 0; 553 return 0;
556} 554}
@@ -1250,10 +1248,10 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1250 1248
1251 switch (opmode) { 1249 switch (opmode) {
1252 case NL80211_IFTYPE_ADHOC: 1250 case NL80211_IFTYPE_ADHOC:
1253 case NL80211_IFTYPE_MESH_POINT:
1254 set |= AR_STA_ID1_ADHOC; 1251 set |= AR_STA_ID1_ADHOC;
1255 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 1252 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1256 break; 1253 break;
1254 case NL80211_IFTYPE_MESH_POINT:
1257 case NL80211_IFTYPE_AP: 1255 case NL80211_IFTYPE_AP:
1258 set |= AR_STA_ID1_STA_AP; 1256 set |= AR_STA_ID1_STA_AP;
1259 /* fall through */ 1257 /* fall through */
@@ -1872,7 +1870,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1872 1870
1873 ah->caldata = caldata; 1871 ah->caldata = caldata;
1874 if (caldata && (chan->channel != caldata->channel || 1872 if (caldata && (chan->channel != caldata->channel ||
1875 chan->channelFlags != caldata->channelFlags)) { 1873 chan->channelFlags != caldata->channelFlags ||
1874 chan->chanmode != caldata->chanmode)) {
1876 /* Operating channel changed, reset channel calibration data */ 1875 /* Operating channel changed, reset channel calibration data */
1877 memset(caldata, 0, sizeof(*caldata)); 1876 memset(caldata, 0, sizeof(*caldata));
1878 ath9k_init_nfcal_hist_buffer(ah, chan); 1877 ath9k_init_nfcal_hist_buffer(ah, chan);
@@ -2255,12 +2254,12 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
2255 2254
2256 switch (ah->opmode) { 2255 switch (ah->opmode) {
2257 case NL80211_IFTYPE_ADHOC: 2256 case NL80211_IFTYPE_ADHOC:
2258 case NL80211_IFTYPE_MESH_POINT:
2259 REG_SET_BIT(ah, AR_TXCFG, 2257 REG_SET_BIT(ah, AR_TXCFG,
2260 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); 2258 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
2261 REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon + 2259 REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
2262 TU_TO_USEC(ah->atim_window ? ah->atim_window : 1)); 2260 TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
2263 flags |= AR_NDP_TIMER_EN; 2261 flags |= AR_NDP_TIMER_EN;
2262 case NL80211_IFTYPE_MESH_POINT:
2264 case NL80211_IFTYPE_AP: 2263 case NL80211_IFTYPE_AP:
2265 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon); 2264 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
2266 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon - 2265 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
@@ -2600,17 +2599,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2600 if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE)) 2599 if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
2601 pCap->hw_caps |= ATH9K_HW_CAP_MCI; 2600 pCap->hw_caps |= ATH9K_HW_CAP_MCI;
2602 2601
2603 if (AR_SREV_9462_20(ah)) 2602 if (AR_SREV_9462_20_OR_LATER(ah))
2604 pCap->hw_caps |= ATH9K_HW_CAP_RTT; 2603 pCap->hw_caps |= ATH9K_HW_CAP_RTT;
2605 } 2604 }
2606 2605
2607 if (AR_SREV_9280_20_OR_LATER(ah)) { 2606 if (AR_SREV_9462(ah))
2608 pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE | 2607 pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE;
2609 ATH9K_HW_WOW_PATTERN_MATCH_EXACT;
2610
2611 if (AR_SREV_9280(ah))
2612 pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD;
2613 }
2614 2608
2615 if (AR_SREV_9300_20_OR_LATER(ah) && 2609 if (AR_SREV_9300_20_OR_LATER(ah) &&
2616 ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) 2610 ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
@@ -3048,7 +3042,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
3048 3042
3049 timer_next = tsf + trig_timeout; 3043 timer_next = tsf + trig_timeout;
3050 3044
3051 ath_dbg(ath9k_hw_common(ah), HWTIMER, 3045 ath_dbg(ath9k_hw_common(ah), BTCOEX,
3052 "current tsf %x period %x timer_next %x\n", 3046 "current tsf %x period %x timer_next %x\n",
3053 tsf, timer_period, timer_next); 3047 tsf, timer_period, timer_next);
3054 3048
@@ -3147,7 +3141,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
3147 index = rightmost_index(timer_table, &thresh_mask); 3141 index = rightmost_index(timer_table, &thresh_mask);
3148 timer = timer_table->timers[index]; 3142 timer = timer_table->timers[index];
3149 BUG_ON(!timer); 3143 BUG_ON(!timer);
3150 ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n", 3144 ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n",
3151 index); 3145 index);
3152 timer->overflow(timer->arg); 3146 timer->overflow(timer->arg);
3153 } 3147 }
@@ -3156,7 +3150,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
3156 index = rightmost_index(timer_table, &trigger_mask); 3150 index = rightmost_index(timer_table, &trigger_mask);
3157 timer = timer_table->timers[index]; 3151 timer = timer_table->timers[index];
3158 BUG_ON(!timer); 3152 BUG_ON(!timer);
3159 ath_dbg(common, HWTIMER, 3153 ath_dbg(common, BTCOEX,
3160 "Gen timer[%d] trigger\n", index); 3154 "Gen timer[%d] trigger\n", index);
3161 timer->trigger(timer->arg); 3155 timer->trigger(timer->arg);
3162 } 3156 }
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index ae3034374bc4..cd74b3afef7d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -246,9 +246,7 @@ enum ath9k_hw_caps {
246 ATH9K_HW_CAP_MCI = BIT(15), 246 ATH9K_HW_CAP_MCI = BIT(15),
247 ATH9K_HW_CAP_DFS = BIT(16), 247 ATH9K_HW_CAP_DFS = BIT(16),
248 ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17), 248 ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
249 ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18), 249 ATH9K_HW_CAP_PAPRD = BIT(18),
250 ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19),
251 ATH9K_HW_CAP_PAPRD = BIT(20),
252}; 250};
253 251
254/* 252/*
@@ -291,7 +289,6 @@ struct ath9k_ops_config {
291 u32 ofdm_trig_high; 289 u32 ofdm_trig_high;
292 u32 cck_trig_high; 290 u32 cck_trig_high;
293 u32 cck_trig_low; 291 u32 cck_trig_low;
294 u32 enable_ani;
295 u32 enable_paprd; 292 u32 enable_paprd;
296 int serialize_regmode; 293 int serialize_regmode;
297 bool rx_intr_mitigation; 294 bool rx_intr_mitigation;
@@ -310,6 +307,10 @@ struct ath9k_ops_config {
310 u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; 307 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
311 u8 max_txtrig_level; 308 u8 max_txtrig_level;
312 u16 ani_poll_interval; /* ANI poll interval in ms */ 309 u16 ani_poll_interval; /* ANI poll interval in ms */
310
311 /* Platform specific config */
312 u32 xlna_gpio;
313 bool xatten_margin_cfg;
313}; 314};
314 315
315enum ath9k_int { 316enum ath9k_int {
@@ -423,7 +424,6 @@ struct ath9k_hw_cal_data {
423 424
424struct ath9k_channel { 425struct ath9k_channel {
425 struct ieee80211_channel *chan; 426 struct ieee80211_channel *chan;
426 struct ar5416AniState ani;
427 u16 channel; 427 u16 channel;
428 u32 channelFlags; 428 u32 channelFlags;
429 u32 chanmode; 429 u32 chanmode;
@@ -854,10 +854,10 @@ struct ath_hw {
854 u32 globaltxtimeout; 854 u32 globaltxtimeout;
855 855
856 /* ANI */ 856 /* ANI */
857 u32 proc_phyerr;
858 u32 aniperiod; 857 u32 aniperiod;
859 enum ath9k_ani_cmd ani_function; 858 enum ath9k_ani_cmd ani_function;
860 u32 ani_skip_count; 859 u32 ani_skip_count;
860 struct ar5416AniState ani;
861 861
862#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 862#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
863 struct ath_btcoex_hw btcoex_hw; 863 struct ath_btcoex_hw btcoex_hw;
@@ -882,9 +882,6 @@ struct ath_hw {
882 struct ar5416IniArray iniBank6; 882 struct ar5416IniArray iniBank6;
883 struct ar5416IniArray iniAddac; 883 struct ar5416IniArray iniAddac;
884 struct ar5416IniArray iniPcieSerdes; 884 struct ar5416IniArray iniPcieSerdes;
885#ifdef CONFIG_PM_SLEEP
886 struct ar5416IniArray iniPcieSerdesWow;
887#endif
888 struct ar5416IniArray iniPcieSerdesLowPower; 885 struct ar5416IniArray iniPcieSerdesLowPower;
889 struct ar5416IniArray iniModesFastClock; 886 struct ar5416IniArray iniModesFastClock;
890 struct ar5416IniArray iniAdditional; 887 struct ar5416IniArray iniAdditional;
@@ -895,6 +892,9 @@ struct ath_hw {
895 struct ar5416IniArray iniCckfirJapan2484; 892 struct ar5416IniArray iniCckfirJapan2484;
896 struct ar5416IniArray iniModes_9271_ANI_reg; 893 struct ar5416IniArray iniModes_9271_ANI_reg;
897 struct ar5416IniArray ini_radio_post_sys2ant; 894 struct ar5416IniArray ini_radio_post_sys2ant;
895 struct ar5416IniArray ini_modes_rxgain_5g_xlna;
896 struct ar5416IniArray ini_modes_rxgain_bb_core;
897 struct ar5416IniArray ini_modes_rxgain_bb_postamble;
898 898
899 struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT]; 899 struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
900 struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT]; 900 struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
@@ -1165,8 +1165,6 @@ static inline void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
1165} 1165}
1166#endif 1166#endif
1167 1167
1168
1169
1170#define ATH9K_CLOCK_RATE_CCK 22 1168#define ATH9K_CLOCK_RATE_CCK 22
1171#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 1169#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
1172#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 1170#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 2ba494567777..16f8b201642b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -21,6 +21,7 @@
21#include <linux/ath9k_platform.h> 21#include <linux/ath9k_platform.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/relay.h> 23#include <linux/relay.h>
24#include <net/ieee80211_radiotap.h>
24 25
25#include "ath9k.h" 26#include "ath9k.h"
26 27
@@ -431,6 +432,8 @@ static int ath9k_init_queues(struct ath_softc *sc)
431 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 432 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
432 ath_cabq_update(sc); 433 ath_cabq_update(sc);
433 434
435 sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
436
434 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 437 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
435 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); 438 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
436 sc->tx.txq_map[i]->mac80211_qnum = i; 439 sc->tx.txq_map[i]->mac80211_qnum = i;
@@ -510,6 +513,27 @@ static void ath9k_init_misc(struct ath_softc *sc)
510 sc->spec_config.fft_period = 0xF; 513 sc->spec_config.fft_period = 0xF;
511} 514}
512 515
516static void ath9k_init_platform(struct ath_softc *sc)
517{
518 struct ath_hw *ah = sc->sc_ah;
519 struct ath_common *common = ath9k_hw_common(ah);
520
521 if (common->bus_ops->ath_bus_type != ATH_PCI)
522 return;
523
524 if (sc->driver_data & (ATH9K_PCI_CUS198 |
525 ATH9K_PCI_CUS230)) {
526 ah->config.xlna_gpio = 9;
527 ah->config.xatten_margin_cfg = true;
528
529 ath_info(common, "Set parameters for %s\n",
530 (sc->driver_data & ATH9K_PCI_CUS198) ?
531 "CUS198" : "CUS230");
532 } else if (sc->driver_data & ATH9K_PCI_CUS217) {
533 ath_info(common, "CUS217 card detected\n");
534 }
535}
536
513static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, 537static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
514 void *ctx) 538 void *ctx)
515{ 539{
@@ -602,6 +626,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
602 common->disable_ani = false; 626 common->disable_ani = false;
603 627
604 /* 628 /*
629 * Platform quirks.
630 */
631 ath9k_init_platform(sc);
632
633 /*
605 * Enable Antenna diversity only when BTCOEX is disabled 634 * Enable Antenna diversity only when BTCOEX is disabled
606 * and the user manually requests the feature. 635 * and the user manually requests the feature.
607 */ 636 */
@@ -613,9 +642,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
613 spin_lock_init(&sc->sc_serial_rw); 642 spin_lock_init(&sc->sc_serial_rw);
614 spin_lock_init(&sc->sc_pm_lock); 643 spin_lock_init(&sc->sc_pm_lock);
615 mutex_init(&sc->mutex); 644 mutex_init(&sc->mutex);
616#ifdef CONFIG_ATH9K_MAC_DEBUG
617 spin_lock_init(&sc->debug.samp_lock);
618#endif
619 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 645 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
620 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, 646 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
621 (unsigned long)sc); 647 (unsigned long)sc);
@@ -755,6 +781,15 @@ static const struct ieee80211_iface_combination if_comb[] = {
755 } 781 }
756}; 782};
757 783
784#ifdef CONFIG_PM
785static const struct wiphy_wowlan_support ath9k_wowlan_support = {
786 .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
787 .n_patterns = MAX_NUM_USER_PATTERN,
788 .pattern_min_len = 1,
789 .pattern_max_len = MAX_PATTERN_SIZE,
790};
791#endif
792
758void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 793void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
759{ 794{
760 struct ath_hw *ah = sc->sc_ah; 795 struct ath_hw *ah = sc->sc_ah;
@@ -769,12 +804,19 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
769 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 804 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
770 IEEE80211_HW_SUPPORTS_RC_TABLE; 805 IEEE80211_HW_SUPPORTS_RC_TABLE;
771 806
772 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 807 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
773 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 808 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
809
810 if (AR_SREV_9280_20_OR_LATER(ah))
811 hw->radiotap_mcs_details |=
812 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
813 }
774 814
775 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) 815 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
776 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 816 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
777 817
818 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
819
778 hw->wiphy->interface_modes = 820 hw->wiphy->interface_modes =
779 BIT(NL80211_IFTYPE_P2P_GO) | 821 BIT(NL80211_IFTYPE_P2P_GO) |
780 BIT(NL80211_IFTYPE_P2P_CLIENT) | 822 BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -794,21 +836,13 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
794 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 836 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
795 837
796#ifdef CONFIG_PM_SLEEP 838#ifdef CONFIG_PM_SLEEP
797
798 if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && 839 if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
799 device_can_wakeup(sc->dev)) { 840 (sc->driver_data & ATH9K_PCI_WOW) &&
800 841 device_can_wakeup(sc->dev))
801 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 842 hw->wiphy->wowlan = &ath9k_wowlan_support;
802 WIPHY_WOWLAN_DISCONNECT;
803 hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
804 hw->wiphy->wowlan.pattern_min_len = 1;
805 hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
806
807 }
808 843
809 atomic_set(&sc->wow_sleep_proc_intr, -1); 844 atomic_set(&sc->wow_sleep_proc_intr, -1);
810 atomic_set(&sc->wow_got_bmiss_intr, -1); 845 atomic_set(&sc->wow_got_bmiss_intr, -1);
811
812#endif 846#endif
813 847
814 hw->queues = 4; 848 hw->queues = 4;
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 849259b07370..fff5d3ccc663 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -390,9 +390,7 @@ void ath_ani_calibrate(unsigned long data)
390 } 390 }
391 391
392 /* Verify whether we must check ANI */ 392 /* Verify whether we must check ANI */
393 if (sc->sc_ah->config.enable_ani 393 if ((timestamp - common->ani.checkani_timer) >= ah->config.ani_poll_interval) {
394 && (timestamp - common->ani.checkani_timer) >=
395 ah->config.ani_poll_interval) {
396 aniflag = true; 394 aniflag = true;
397 common->ani.checkani_timer = timestamp; 395 common->ani.checkani_timer = timestamp;
398 } 396 }
@@ -418,7 +416,6 @@ void ath_ani_calibrate(unsigned long data)
418 longcal ? "long" : "", shortcal ? "short" : "", 416 longcal ? "long" : "", shortcal ? "short" : "",
419 aniflag ? "ani" : "", common->ani.caldone ? "true" : "false"); 417 aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
420 418
421 ath9k_debug_samp_bb_mac(sc);
422 ath9k_ps_restore(sc); 419 ath9k_ps_restore(sc);
423 420
424set_timer: 421set_timer:
@@ -428,9 +425,7 @@ set_timer:
428 * short calibration and long calibration. 425 * short calibration and long calibration.
429 */ 426 */
430 cal_interval = ATH_LONG_CALINTERVAL; 427 cal_interval = ATH_LONG_CALINTERVAL;
431 if (sc->sc_ah->config.enable_ani) 428 cal_interval = min(cal_interval, (u32)ah->config.ani_poll_interval);
432 cal_interval = min(cal_interval,
433 (u32)ah->config.ani_poll_interval);
434 if (!common->ani.caldone) 429 if (!common->ani.caldone)
435 cal_interval = min(cal_interval, (u32)short_cal_interval); 430 cal_interval = min(cal_interval, (u32)short_cal_interval);
436 431
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 566109a40fb3..2ef05ebffbcf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -547,6 +547,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
547 547
548 rs->rs_status = 0; 548 rs->rs_status = 0;
549 rs->rs_flags = 0; 549 rs->rs_flags = 0;
550 rs->flag = 0;
550 551
551 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 552 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
552 rs->rs_tstamp = ads.AR_RcvTimestamp; 553 rs->rs_tstamp = ads.AR_RcvTimestamp;
@@ -586,10 +587,17 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
586 rs->rs_moreaggr = 587 rs->rs_moreaggr =
587 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 588 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
588 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 589 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
589 rs->rs_flags = 590
590 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 591 /* directly mapped flags for ieee80211_rx_status */
591 rs->rs_flags |= 592 rs->flag |=
592 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 593 (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
594 rs->flag |=
595 (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0;
596 if (AR_SREV_9280_20_OR_LATER(ah))
597 rs->flag |=
598 (ads.ds_rxstatus3 & AR_STBC) ?
599 /* we can only Nss=1 STBC */
600 (1 << RX_FLAG_STBC_SHIFT) : 0;
593 601
594 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 602 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
595 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 603 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 5865f92998e1..b02dfce964b4 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -149,6 +149,7 @@ struct ath_rx_status {
149 u32 evm2; 149 u32 evm2;
150 u32 evm3; 150 u32 evm3;
151 u32 evm4; 151 u32 evm4;
152 u32 flag; /* see enum mac80211_rx_flags */
152}; 153};
153 154
154struct ath_htc_rx_status { 155struct ath_htc_rx_status {
@@ -533,7 +534,8 @@ struct ar5416_desc {
533#define AR_2040 0x00000002 534#define AR_2040 0x00000002
534#define AR_Parallel40 0x00000004 535#define AR_Parallel40 0x00000004
535#define AR_Parallel40_S 2 536#define AR_Parallel40_S 2
536#define AR_RxStatusRsvd30 0x000000f8 537#define AR_STBC 0x00000008 /* on ar9280 and later */
538#define AR_RxStatusRsvd30 0x000000f0
537#define AR_RxAntenna 0xffffff00 539#define AR_RxAntenna 0xffffff00
538#define AR_RxAntenna_S 8 540#define AR_RxAntenna_S 8
539 541
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5092ecae7706..1737a3e33685 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -193,7 +193,6 @@ static bool ath_prepare_reset(struct ath_softc *sc)
193 ath_stop_ani(sc); 193 ath_stop_ani(sc);
194 del_timer_sync(&sc->rx_poll_timer); 194 del_timer_sync(&sc->rx_poll_timer);
195 195
196 ath9k_debug_samp_bb_mac(sc);
197 ath9k_hw_disable_interrupts(ah); 196 ath9k_hw_disable_interrupts(ah);
198 197
199 if (!ath_drain_all_txq(sc)) 198 if (!ath_drain_all_txq(sc))
@@ -1211,13 +1210,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1211 ath_update_survey_stats(sc); 1210 ath_update_survey_stats(sc);
1212 spin_unlock_irqrestore(&common->cc_lock, flags); 1211 spin_unlock_irqrestore(&common->cc_lock, flags);
1213 1212
1214 /*
1215 * Preserve the current channel values, before updating
1216 * the same channel
1217 */
1218 if (ah->curchan && (old_pos == pos))
1219 ath9k_hw_getnf(ah, ah->curchan);
1220
1221 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], 1213 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
1222 curchan, channel_type); 1214 curchan, channel_type);
1223 1215
@@ -1273,7 +1265,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1273 curchan->center_freq); 1265 curchan->center_freq);
1274 } else { 1266 } else {
1275 /* perform spectral scan if requested. */ 1267 /* perform spectral scan if requested. */
1276 if (sc->scanning && 1268 if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
1277 sc->spectral_mode == SPECTRAL_CHANSCAN) 1269 sc->spectral_mode == SPECTRAL_CHANSCAN)
1278 ath9k_spectral_scan_trigger(hw); 1270 ath9k_spectral_scan_trigger(hw);
1279 } 1271 }
@@ -1690,7 +1682,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1690 bool flush = false; 1682 bool flush = false;
1691 int ret = 0; 1683 int ret = 0;
1692 1684
1693 local_bh_disable(); 1685 mutex_lock(&sc->mutex);
1694 1686
1695 switch (action) { 1687 switch (action) {
1696 case IEEE80211_AMPDU_RX_START: 1688 case IEEE80211_AMPDU_RX_START:
@@ -1723,7 +1715,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1723 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n"); 1715 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
1724 } 1716 }
1725 1717
1726 local_bh_enable(); 1718 mutex_unlock(&sc->mutex);
1727 1719
1728 return ret; 1720 return ret;
1729} 1721}
@@ -2007,7 +1999,6 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
2007{ 1999{
2008 struct ath_hw *ah = sc->sc_ah; 2000 struct ath_hw *ah = sc->sc_ah;
2009 struct ath_common *common = ath9k_hw_common(ah); 2001 struct ath_common *common = ath9k_hw_common(ah);
2010 struct ath9k_hw_capabilities *pcaps = &ah->caps;
2011 int pattern_count = 0; 2002 int pattern_count = 0;
2012 int i, byte_cnt; 2003 int i, byte_cnt;
2013 u8 dis_deauth_pattern[MAX_PATTERN_SIZE]; 2004 u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
@@ -2077,36 +2068,9 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
2077 2068
2078 /* Create Disassociate pattern mask */ 2069 /* Create Disassociate pattern mask */
2079 2070
2080 if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_EXACT) { 2071 dis_deauth_mask[0] = 0xfe;
2081 2072 dis_deauth_mask[1] = 0x03;
2082 if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_DWORD) { 2073 dis_deauth_mask[2] = 0xc0;
2083 /*
2084 * for AR9280, because of hardware limitation, the
2085 * first 4 bytes have to be matched for all patterns.
2086 * the mask for disassociation and de-auth pattern
2087 * matching need to enable the first 4 bytes.
2088 * also the duration field needs to be filled.
2089 */
2090 dis_deauth_mask[0] = 0xf0;
2091
2092 /*
2093 * fill in duration field
2094 FIXME: what is the exact value ?
2095 */
2096 dis_deauth_pattern[2] = 0xff;
2097 dis_deauth_pattern[3] = 0xff;
2098 } else {
2099 dis_deauth_mask[0] = 0xfe;
2100 }
2101
2102 dis_deauth_mask[1] = 0x03;
2103 dis_deauth_mask[2] = 0xc0;
2104 } else {
2105 dis_deauth_mask[0] = 0xef;
2106 dis_deauth_mask[1] = 0x3f;
2107 dis_deauth_mask[2] = 0x00;
2108 dis_deauth_mask[3] = 0xfc;
2109 }
2110 2074
2111 ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n"); 2075 ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
2112 2076
@@ -2342,15 +2306,13 @@ static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
2342static void ath9k_sw_scan_start(struct ieee80211_hw *hw) 2306static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2343{ 2307{
2344 struct ath_softc *sc = hw->priv; 2308 struct ath_softc *sc = hw->priv;
2345 2309 set_bit(SC_OP_SCANNING, &sc->sc_flags);
2346 sc->scanning = 1;
2347} 2310}
2348 2311
2349static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) 2312static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2350{ 2313{
2351 struct ath_softc *sc = hw->priv; 2314 struct ath_softc *sc = hw->priv;
2352 2315 clear_bit(SC_OP_SCANNING, &sc->sc_flags);
2353 sc->scanning = 0;
2354} 2316}
2355 2317
2356struct ieee80211_ops ath9k_ops = { 2318struct ieee80211_ops ath9k_ops = {
@@ -2378,6 +2340,7 @@ struct ieee80211_ops ath9k_ops = {
2378 .flush = ath9k_flush, 2340 .flush = ath9k_flush,
2379 .tx_frames_pending = ath9k_tx_frames_pending, 2341 .tx_frames_pending = ath9k_tx_frames_pending,
2380 .tx_last_beacon = ath9k_tx_last_beacon, 2342 .tx_last_beacon = ath9k_tx_last_beacon,
2343 .release_buffered_frames = ath9k_release_buffered_frames,
2381 .get_stats = ath9k_get_stats, 2344 .get_stats = ath9k_get_stats,
2382 .set_antenna = ath9k_set_antenna, 2345 .set_antenna = ath9k_set_antenna,
2383 .get_antenna = ath9k_get_antenna, 2346 .get_antenna = ath9k_get_antenna,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 0e0d39583837..c585c9b35973 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -34,8 +34,108 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
34 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 34 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
35 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 35 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
36 { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ 36 { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
37
38 /* PCI-E CUS198 */
39 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
40 0x0032,
41 PCI_VENDOR_ID_AZWAVE,
42 0x2086),
43 .driver_data = ATH9K_PCI_CUS198 },
44 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
45 0x0032,
46 PCI_VENDOR_ID_AZWAVE,
47 0x1237),
48 .driver_data = ATH9K_PCI_CUS198 },
49 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
50 0x0032,
51 PCI_VENDOR_ID_AZWAVE,
52 0x2126),
53 .driver_data = ATH9K_PCI_CUS198 },
54
55 /* PCI-E CUS230 */
56 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
57 0x0032,
58 PCI_VENDOR_ID_AZWAVE,
59 0x2152),
60 .driver_data = ATH9K_PCI_CUS230 },
61 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
62 0x0032,
63 PCI_VENDOR_ID_FOXCONN,
64 0xE075),
65 .driver_data = ATH9K_PCI_CUS230 },
66
37 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ 67 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ 68 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
69
70 /* PCI-E CUS217 */
71 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
72 0x0034,
73 PCI_VENDOR_ID_AZWAVE,
74 0x2116),
75 .driver_data = ATH9K_PCI_CUS217 },
76 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
77 0x0034,
78 0x11AD, /* LITEON */
79 0x6661),
80 .driver_data = ATH9K_PCI_CUS217 },
81
82 /* AR9462 with WoW support */
83 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
84 0x0034,
85 PCI_VENDOR_ID_ATHEROS,
86 0x3117),
87 .driver_data = ATH9K_PCI_WOW },
88 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
89 0x0034,
90 PCI_VENDOR_ID_LENOVO,
91 0x3214),
92 .driver_data = ATH9K_PCI_WOW },
93 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
94 0x0034,
95 PCI_VENDOR_ID_ATTANSIC,
96 0x0091),
97 .driver_data = ATH9K_PCI_WOW },
98 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
99 0x0034,
100 PCI_VENDOR_ID_AZWAVE,
101 0x2110),
102 .driver_data = ATH9K_PCI_WOW },
103 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
104 0x0034,
105 PCI_VENDOR_ID_ASUSTEK,
106 0x850E),
107 .driver_data = ATH9K_PCI_WOW },
108 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
109 0x0034,
110 0x11AD, /* LITEON */
111 0x6631),
112 .driver_data = ATH9K_PCI_WOW },
113 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
114 0x0034,
115 0x11AD, /* LITEON */
116 0x6641),
117 .driver_data = ATH9K_PCI_WOW },
118 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
119 0x0034,
120 PCI_VENDOR_ID_HP,
121 0x1864),
122 .driver_data = ATH9K_PCI_WOW },
123 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
124 0x0034,
125 0x14CD, /* USI */
126 0x0063),
127 .driver_data = ATH9K_PCI_WOW },
128 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
129 0x0034,
130 0x14CD, /* USI */
131 0x0064),
132 .driver_data = ATH9K_PCI_WOW },
133 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
134 0x0034,
135 0x10CF, /* Fujitsu */
136 0x1783),
137 .driver_data = ATH9K_PCI_WOW },
138
39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ 139 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
40 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ 140 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
41 { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */ 141 { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
@@ -221,6 +321,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
221 sc->hw = hw; 321 sc->hw = hw;
222 sc->dev = &pdev->dev; 322 sc->dev = &pdev->dev;
223 sc->mem = pcim_iomap_table(pdev)[0]; 323 sc->mem = pcim_iomap_table(pdev)[0];
324 sc->driver_data = id->driver_data;
224 325
225 /* Will be cleared in ath9k_start() */ 326 /* Will be cleared in ath9k_start() */
226 set_bit(SC_OP_INVALID, &sc->sc_flags); 327 set_bit(SC_OP_INVALID, &sc->sc_flags);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 8be2b5d8c155..865e043e8aa6 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -868,10 +868,7 @@ static int ath9k_process_rate(struct ath_common *common,
868 if (rx_stats->rs_rate & 0x80) { 868 if (rx_stats->rs_rate & 0x80) {
869 /* HT rate */ 869 /* HT rate */
870 rxs->flag |= RX_FLAG_HT; 870 rxs->flag |= RX_FLAG_HT;
871 if (rx_stats->rs_flags & ATH9K_RX_2040) 871 rxs->flag |= rx_stats->flag;
872 rxs->flag |= RX_FLAG_40MHZ;
873 if (rx_stats->rs_flags & ATH9K_RX_GI)
874 rxs->flag |= RX_FLAG_SHORT_GI;
875 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 872 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
876 return 0; 873 return 0;
877 } 874 }
@@ -958,11 +955,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
958 if (rx_stats->rs_more) 955 if (rx_stats->rs_more)
959 return 0; 956 return 0;
960 957
961 ath9k_process_rssi(common, hw, hdr, rx_stats);
962
963 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 958 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
964 return -EINVAL; 959 return -EINVAL;
965 960
961 ath9k_process_rssi(common, hw, hdr, rx_stats);
962
966 rx_status->band = hw->conf.chandef.chan->band; 963 rx_status->band = hw->conf.chandef.chan->band;
967 rx_status->freq = hw->conf.chandef.chan->center_freq; 964 rx_status->freq = hw->conf.chandef.chan->center_freq;
968 rx_status->signal = ah->noise + rx_stats->rs_rssi; 965 rx_status->signal = ah->noise + rx_stats->rs_rssi;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index f7c90cc58d56..5af97442ac37 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -806,6 +806,7 @@
806#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */ 806#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
807#define AR_SREV_VERSION_9462 0x280 807#define AR_SREV_VERSION_9462 0x280
808#define AR_SREV_REVISION_9462_20 2 808#define AR_SREV_REVISION_9462_20 2
809#define AR_SREV_REVISION_9462_21 3
809#define AR_SREV_VERSION_9565 0x2C0 810#define AR_SREV_VERSION_9565 0x2C0
810#define AR_SREV_REVISION_9565_10 0 811#define AR_SREV_REVISION_9565_10 0
811#define AR_SREV_VERSION_9550 0x400 812#define AR_SREV_VERSION_9550 0x400
@@ -911,10 +912,18 @@
911 912
912#define AR_SREV_9462(_ah) \ 913#define AR_SREV_9462(_ah) \
913 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462)) 914 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
914
915#define AR_SREV_9462_20(_ah) \ 915#define AR_SREV_9462_20(_ah) \
916 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ 916 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
917 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20)) 917 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
918#define AR_SREV_9462_21(_ah) \
919 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
920 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_21))
921#define AR_SREV_9462_20_OR_LATER(_ah) \
922 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
923 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
924#define AR_SREV_9462_21_OR_LATER(_ah) \
925 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
926 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_21))
918 927
919#define AR_SREV_9565(_ah) \ 928#define AR_SREV_9565(_ah) \
920 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565)) 929 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 9f8563091bea..81c88dd606dc 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -34,17 +34,6 @@ const char *ath9k_hw_wow_event_to_string(u32 wow_event)
34} 34}
35EXPORT_SYMBOL(ath9k_hw_wow_event_to_string); 35EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
36 36
37static void ath9k_hw_config_serdes_wow_sleep(struct ath_hw *ah)
38{
39 int i;
40
41 for (i = 0; i < ah->iniPcieSerdesWow.ia_rows; i++)
42 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdesWow, i, 0),
43 INI_RA(&ah->iniPcieSerdesWow, i, 1));
44
45 usleep_range(1000, 1500);
46}
47
48static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) 37static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
49{ 38{
50 struct ath_common *common = ath9k_hw_common(ah); 39 struct ath_common *common = ath9k_hw_common(ah);
@@ -58,15 +47,8 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
58 ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 47 ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
59 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW)); 48 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
60 return; 49 return;
61 } else {
62 if (!AR_SREV_9300_20_OR_LATER(ah))
63 REG_WRITE(ah, AR_RXDP, 0x0);
64 } 50 }
65 51
66 /* AR9280 WoW has sleep issue, do not set it to sleep */
67 if (AR_SREV_9280_20(ah))
68 return;
69
70 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); 52 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
71} 53}
72 54
@@ -84,27 +66,16 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
84 66
85 /* set the transmit buffer */ 67 /* set the transmit buffer */
86 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16)); 68 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
87
88 if (!(AR_SREV_9300_20_OR_LATER(ah)))
89 ctl[0] += (KAL_ANTENNA_MODE << 25);
90
91 ctl[1] = 0; 69 ctl[1] = 0;
92 ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */ 70 ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
93 ctl[4] = 0; 71 ctl[4] = 0;
94 ctl[7] = (ah->txchainmask) << 2; 72 ctl[7] = (ah->txchainmask) << 2;
95 73 ctl[2] = 0xf << 16; /* tx_tries 0 */
96 if (AR_SREV_9300_20_OR_LATER(ah))
97 ctl[2] = 0xf << 16; /* tx_tries 0 */
98 else
99 ctl[2] = 0x7 << 16; /* tx_tries 0 */
100
101 74
102 for (i = 0; i < KAL_NUM_DESC_WORDS; i++) 75 for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
103 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); 76 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
104 77
105 /* for AR9300 family 13 descriptor words */ 78 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
106 if (AR_SREV_9300_20_OR_LATER(ah))
107 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
108 79
109 data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) | 80 data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
110 (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16); 81 (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
@@ -183,9 +154,6 @@ void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
183 154
184 ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT); 155 ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
185 156
186 if (!AR_SREV_9285_12_OR_LATER(ah))
187 return;
188
189 if (pattern_count < 4) { 157 if (pattern_count < 4) {
190 /* Pattern 0-3 uses AR_WOW_LENGTH1 register */ 158 /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
191 set = (pattern_len & AR_WOW_LENGTH_MAX) << 159 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
@@ -207,6 +175,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
207{ 175{
208 u32 wow_status = 0; 176 u32 wow_status = 0;
209 u32 val = 0, rval; 177 u32 val = 0, rval;
178
210 /* 179 /*
211 * read the WoW status register to know 180 * read the WoW status register to know
212 * the wakeup reason 181 * the wakeup reason
@@ -223,19 +192,14 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
223 val &= ah->wow_event_mask; 192 val &= ah->wow_event_mask;
224 193
225 if (val) { 194 if (val) {
226
227 if (val & AR_WOW_MAGIC_PAT_FOUND) 195 if (val & AR_WOW_MAGIC_PAT_FOUND)
228 wow_status |= AH_WOW_MAGIC_PATTERN_EN; 196 wow_status |= AH_WOW_MAGIC_PATTERN_EN;
229
230 if (AR_WOW_PATTERN_FOUND(val)) 197 if (AR_WOW_PATTERN_FOUND(val))
231 wow_status |= AH_WOW_USER_PATTERN_EN; 198 wow_status |= AH_WOW_USER_PATTERN_EN;
232
233 if (val & AR_WOW_KEEP_ALIVE_FAIL) 199 if (val & AR_WOW_KEEP_ALIVE_FAIL)
234 wow_status |= AH_WOW_LINK_CHANGE; 200 wow_status |= AH_WOW_LINK_CHANGE;
235
236 if (val & AR_WOW_BEACON_FAIL) 201 if (val & AR_WOW_BEACON_FAIL)
237 wow_status |= AH_WOW_BEACON_MISS; 202 wow_status |= AH_WOW_BEACON_MISS;
238
239 } 203 }
240 204
241 /* 205 /*
@@ -255,17 +219,6 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
255 AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN))); 219 AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
256 220
257 /* 221 /*
258 * tie reset register for AR9002 family of chipsets
259 * NB: not tieing it back might have some repurcussions.
260 */
261
262 if (!AR_SREV_9300_20_OR_LATER(ah)) {
263 REG_SET_BIT(ah, AR_WA, AR_WA_UNTIE_RESET_EN |
264 AR_WA_POR_SHORT | AR_WA_RESET_EN);
265 }
266
267
268 /*
269 * restore the beacon threshold to init value 222 * restore the beacon threshold to init value
270 */ 223 */
271 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 224 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
@@ -277,8 +230,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
277 * reset to our Chip's Power On Reset so that any PCI-E 230 * reset to our Chip's Power On Reset so that any PCI-E
278 * reset from the bus will not reset our chip 231 * reset from the bus will not reset our chip
279 */ 232 */
280 233 if (ah->is_pciexpress)
281 if (AR_SREV_9280_20_OR_LATER(ah) && ah->is_pciexpress)
282 ath9k_hw_configpcipowersave(ah, false); 234 ath9k_hw_configpcipowersave(ah, false);
283 235
284 ah->wow_event_mask = 0; 236 ah->wow_event_mask = 0;
@@ -298,7 +250,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
298 * are from the 'pattern_enable' in this function and 250 * are from the 'pattern_enable' in this function and
299 * 'pattern_count' of ath9k_hw_wow_apply_pattern() 251 * 'pattern_count' of ath9k_hw_wow_apply_pattern()
300 */ 252 */
301
302 wow_event_mask = ah->wow_event_mask; 253 wow_event_mask = ah->wow_event_mask;
303 254
304 /* 255 /*
@@ -306,50 +257,15 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
306 * WOW sleep, we do want the Reset from the PCI-E to disturb 257 * WOW sleep, we do want the Reset from the PCI-E to disturb
307 * our hw state 258 * our hw state
308 */ 259 */
309
310 if (ah->is_pciexpress) { 260 if (ah->is_pciexpress) {
311
312 /* 261 /*
313 * we need to untie the internal POR (power-on-reset) 262 * we need to untie the internal POR (power-on-reset)
314 * to the external PCI-E reset. We also need to tie 263 * to the external PCI-E reset. We also need to tie
315 * the PCI-E Phy reset to the PCI-E reset. 264 * the PCI-E Phy reset to the PCI-E reset.
316 */ 265 */
317 266 set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
318 if (AR_SREV_9300_20_OR_LATER(ah)) { 267 clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
319 set = AR_WA_RESET_EN | AR_WA_POR_SHORT; 268 REG_RMW(ah, AR_WA, set, clr);
320 clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
321 REG_RMW(ah, AR_WA, set, clr);
322 } else {
323 if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
324 set = AR9285_WA_DEFAULT;
325 else
326 set = AR9280_WA_DEFAULT;
327
328 /*
329 * In AR9280 and AR9285, bit 14 in WA register
330 * (disable L1) should only be set when device
331 * enters D3 state and be cleared when device
332 * comes back to D0
333 */
334
335 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
336 set |= AR_WA_D3_L1_DISABLE;
337
338 clr = AR_WA_UNTIE_RESET_EN;
339 set |= AR_WA_RESET_EN | AR_WA_POR_SHORT;
340 REG_RMW(ah, AR_WA, set, clr);
341
342 /*
343 * for WoW sleep, we reprogram the SerDes so that the
344 * PLL and CLK REQ are both enabled. This uses more
345 * power but otherwise WoW sleep is unstable and the
346 * chip may disappear.
347 */
348
349 if (AR_SREV_9285_12_OR_LATER(ah))
350 ath9k_hw_config_serdes_wow_sleep(ah);
351
352 }
353 } 269 }
354 270
355 /* 271 /*
@@ -378,7 +294,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
378 * Program default values for pattern backoff, aifs/slot/KAL count, 294 * Program default values for pattern backoff, aifs/slot/KAL count,
379 * beacon miss timeout, KAL timeout, etc. 295 * beacon miss timeout, KAL timeout, etc.
380 */ 296 */
381
382 set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF); 297 set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
383 REG_SET_BIT(ah, AR_WOW_PATTERN, set); 298 REG_SET_BIT(ah, AR_WOW_PATTERN, set);
384 299
@@ -398,7 +313,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
398 /* 313 /*
399 * Keep alive timo in ms except AR9280 314 * Keep alive timo in ms except AR9280
400 */ 315 */
401 if (!pattern_enable || AR_SREV_9280(ah)) 316 if (!pattern_enable)
402 set = AR_WOW_KEEP_ALIVE_NEVER; 317 set = AR_WOW_KEEP_ALIVE_NEVER;
403 else 318 else
404 set = KAL_TIMEOUT * 32; 319 set = KAL_TIMEOUT * 32;
@@ -420,7 +335,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
420 /* 335 /*
421 * Configure MAC WoW Registers 336 * Configure MAC WoW Registers
422 */ 337 */
423
424 set = 0; 338 set = 0;
425 /* Send keep alive timeouts anyway */ 339 /* Send keep alive timeouts anyway */
426 clr = AR_WOW_KEEP_ALIVE_AUTO_DIS; 340 clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
@@ -430,16 +344,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
430 else 344 else
431 set = AR_WOW_KEEP_ALIVE_FAIL_DIS; 345 set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
432 346
433 /*
434 * FIXME: For now disable keep alive frame
435 * failure. This seems to sometimes trigger
436 * unnecessary wake up with AR9485 chipsets.
437 */
438 set = AR_WOW_KEEP_ALIVE_FAIL_DIS; 347 set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
439
440 REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr); 348 REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
441 349
442
443 /* 350 /*
444 * we are relying on a bmiss failure. ensure we have 351 * we are relying on a bmiss failure. ensure we have
445 * enough threshold to prevent false positives 352 * enough threshold to prevent false positives
@@ -473,14 +380,8 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
473 set |= AR_WOW_MAC_INTR_EN; 380 set |= AR_WOW_MAC_INTR_EN;
474 REG_RMW(ah, AR_WOW_PATTERN, set, clr); 381 REG_RMW(ah, AR_WOW_PATTERN, set, clr);
475 382
476 /* 383 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
477 * For AR9285 and later version of chipsets 384 AR_WOW_PATTERN_SUPPORTED);
478 * enable WoW pattern match for packets less
479 * than 256 bytes for all patterns
480 */
481 if (AR_SREV_9285_12_OR_LATER(ah))
482 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
483 AR_WOW_PATTERN_SUPPORTED);
484 385
485 /* 386 /*
486 * Set the power states appropriately and enable PME 387 * Set the power states appropriately and enable PME
@@ -488,43 +389,32 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
488 clr = 0; 389 clr = 0;
489 set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN | 390 set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
490 AR_PMCTRL_PWR_PM_CTRL_ENA; 391 AR_PMCTRL_PWR_PM_CTRL_ENA;
491 /*
492 * This is needed for AR9300 chipsets to wake-up
493 * the host.
494 */
495 if (AR_SREV_9300_20_OR_LATER(ah))
496 clr = AR_PCIE_PM_CTRL_ENA;
497 392
393 clr = AR_PCIE_PM_CTRL_ENA;
498 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr); 394 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
499 395
500 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 396 /*
501 /* 397 * this is needed to prevent the chip waking up
502 * this is needed to prevent the chip waking up 398 * the host within 3-4 seconds with certain
503 * the host within 3-4 seconds with certain 399 * platform/BIOS. The fix is to enable
504 * platform/BIOS. The fix is to enable 400 * D1 & D3 to match original definition and
505 * D1 & D3 to match original definition and 401 * also match the OTP value. Anyway this
506 * also match the OTP value. Anyway this 402 * is more related to SW WOW.
507 * is more related to SW WOW. 403 */
508 */ 404 clr = AR_PMCTRL_PWR_STATE_D1D3;
509 clr = AR_PMCTRL_PWR_STATE_D1D3; 405 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
510 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
511
512 set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
513 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
514 }
515
516 406
407 set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
408 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
517 409
518 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); 410 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
519 411
520 if (AR_SREV_9300_20_OR_LATER(ah)) { 412 /* to bring down WOW power low margin */
521 /* to bring down WOW power low margin */ 413 set = BIT(13);
522 set = BIT(13); 414 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
523 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set); 415 /* HW WoW */
524 /* HW WoW */ 416 clr = BIT(5);
525 clr = BIT(5); 417 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
526 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
527 }
528 418
529 ath9k_hw_set_powermode_wow_sleep(ah); 419 ath9k_hw_set_powermode_wow_sleep(ah);
530 ah->wow_event_mask = wow_event_mask; 420 ah->wow_event_mask = wow_event_mask;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 83ab6be3fe6d..c59ae43b9b35 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -518,6 +518,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
518 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 518 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
519 !txfail); 519 !txfail);
520 } else { 520 } else {
521 if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
522 tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
523 ieee80211_sta_eosp(sta);
524 }
521 /* retry the un-acked ones */ 525 /* retry the un-acked ones */
522 if (bf->bf_next == NULL && bf_last->bf_stale) { 526 if (bf->bf_next == NULL && bf_last->bf_stale) {
523 struct ath_buf *tbf; 527 struct ath_buf *tbf;
@@ -786,25 +790,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
786 return ndelim; 790 return ndelim;
787} 791}
788 792
789static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 793static struct ath_buf *
790 struct ath_txq *txq, 794ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
791 struct ath_atx_tid *tid, 795 struct ath_atx_tid *tid)
792 struct list_head *bf_q,
793 int *aggr_len)
794{ 796{
795#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
796 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
797 int rl = 0, nframes = 0, ndelim, prev_al = 0;
798 u16 aggr_limit = 0, al = 0, bpad = 0,
799 al_delta, h_baw = tid->baw_size / 2;
800 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
801 struct ieee80211_tx_info *tx_info;
802 struct ath_frame_info *fi; 797 struct ath_frame_info *fi;
803 struct sk_buff *skb; 798 struct sk_buff *skb;
799 struct ath_buf *bf;
804 u16 seqno; 800 u16 seqno;
805 801
806 do { 802 while (1) {
807 skb = skb_peek(&tid->buf_q); 803 skb = skb_peek(&tid->buf_q);
804 if (!skb)
805 break;
806
808 fi = get_frame_info(skb); 807 fi = get_frame_info(skb);
809 bf = fi->bf; 808 bf = fi->bf;
810 if (!fi->bf) 809 if (!fi->bf)
@@ -820,10 +819,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
820 seqno = bf->bf_state.seqno; 819 seqno = bf->bf_state.seqno;
821 820
822 /* do not step over block-ack window */ 821 /* do not step over block-ack window */
823 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { 822 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
824 status = ATH_AGGR_BAW_CLOSED;
825 break; 823 break;
826 }
827 824
828 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { 825 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
829 struct ath_tx_status ts = {}; 826 struct ath_tx_status ts = {};
@@ -837,6 +834,40 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
837 continue; 834 continue;
838 } 835 }
839 836
837 bf->bf_next = NULL;
838 bf->bf_lastbf = bf;
839 return bf;
840 }
841
842 return NULL;
843}
844
845static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
846 struct ath_txq *txq,
847 struct ath_atx_tid *tid,
848 struct list_head *bf_q,
849 int *aggr_len)
850{
851#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
852 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
853 int rl = 0, nframes = 0, ndelim, prev_al = 0;
854 u16 aggr_limit = 0, al = 0, bpad = 0,
855 al_delta, h_baw = tid->baw_size / 2;
856 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
857 struct ieee80211_tx_info *tx_info;
858 struct ath_frame_info *fi;
859 struct sk_buff *skb;
860
861 do {
862 bf = ath_tx_get_tid_subframe(sc, txq, tid);
863 if (!bf) {
864 status = ATH_AGGR_BAW_CLOSED;
865 break;
866 }
867
868 skb = bf->bf_mpdu;
869 fi = get_frame_info(skb);
870
840 if (!bf_first) 871 if (!bf_first)
841 bf_first = bf; 872 bf_first = bf;
842 873
@@ -882,7 +913,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
882 913
883 /* link buffers of this frame to the aggregate */ 914 /* link buffers of this frame to the aggregate */
884 if (!fi->retries) 915 if (!fi->retries)
885 ath_tx_addto_baw(sc, tid, seqno); 916 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
886 bf->bf_state.ndelim = ndelim; 917 bf->bf_state.ndelim = ndelim;
887 918
888 __skb_unlink(skb, &tid->buf_q); 919 __skb_unlink(skb, &tid->buf_q);
@@ -1090,10 +1121,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1090 struct ath_txq *txq, int len) 1121 struct ath_txq *txq, int len)
1091{ 1122{
1092 struct ath_hw *ah = sc->sc_ah; 1123 struct ath_hw *ah = sc->sc_ah;
1093 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1124 struct ath_buf *bf_first = NULL;
1094 struct ath_buf *bf_first = bf;
1095 struct ath_tx_info info; 1125 struct ath_tx_info info;
1096 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1097 1126
1098 memset(&info, 0, sizeof(info)); 1127 memset(&info, 0, sizeof(info));
1099 info.is_first = true; 1128 info.is_first = true;
@@ -1101,24 +1130,11 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1101 info.txpower = MAX_RATE_POWER; 1130 info.txpower = MAX_RATE_POWER;
1102 info.qcu = txq->axq_qnum; 1131 info.qcu = txq->axq_qnum;
1103 1132
1104 info.flags = ATH9K_TXDESC_INTREQ;
1105 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1106 info.flags |= ATH9K_TXDESC_NOACK;
1107 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1108 info.flags |= ATH9K_TXDESC_LDPC;
1109
1110 ath_buf_set_rate(sc, bf, &info, len);
1111
1112 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1113 info.flags |= ATH9K_TXDESC_CLRDMASK;
1114
1115 if (bf->bf_state.bfs_paprd)
1116 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1117
1118
1119 while (bf) { 1133 while (bf) {
1120 struct sk_buff *skb = bf->bf_mpdu; 1134 struct sk_buff *skb = bf->bf_mpdu;
1135 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1121 struct ath_frame_info *fi = get_frame_info(skb); 1136 struct ath_frame_info *fi = get_frame_info(skb);
1137 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1122 1138
1123 info.type = get_hw_packet_type(skb); 1139 info.type = get_hw_packet_type(skb);
1124 if (bf->bf_next) 1140 if (bf->bf_next)
@@ -1126,6 +1142,26 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1126 else 1142 else
1127 info.link = 0; 1143 info.link = 0;
1128 1144
1145 if (!bf_first) {
1146 bf_first = bf;
1147
1148 info.flags = ATH9K_TXDESC_INTREQ;
1149 if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
1150 txq == sc->tx.uapsdq)
1151 info.flags |= ATH9K_TXDESC_CLRDMASK;
1152
1153 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1154 info.flags |= ATH9K_TXDESC_NOACK;
1155 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1156 info.flags |= ATH9K_TXDESC_LDPC;
1157
1158 if (bf->bf_state.bfs_paprd)
1159 info.flags |= (u32) bf->bf_state.bfs_paprd <<
1160 ATH9K_TXDESC_PAPRD_S;
1161
1162 ath_buf_set_rate(sc, bf, &info, len);
1163 }
1164
1129 info.buf_addr[0] = bf->bf_buf_addr; 1165 info.buf_addr[0] = bf->bf_buf_addr;
1130 info.buf_len[0] = skb->len; 1166 info.buf_len[0] = skb->len;
1131 info.pkt_len = fi->framelen; 1167 info.pkt_len = fi->framelen;
@@ -1135,7 +1171,7 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1135 if (aggr) { 1171 if (aggr) {
1136 if (bf == bf_first) 1172 if (bf == bf_first)
1137 info.aggr = AGGR_BUF_FIRST; 1173 info.aggr = AGGR_BUF_FIRST;
1138 else if (!bf->bf_next) 1174 else if (bf == bf_first->bf_lastbf)
1139 info.aggr = AGGR_BUF_LAST; 1175 info.aggr = AGGR_BUF_LAST;
1140 else 1176 else
1141 info.aggr = AGGR_BUF_MIDDLE; 1177 info.aggr = AGGR_BUF_MIDDLE;
@@ -1144,6 +1180,9 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1144 info.aggr_len = len; 1180 info.aggr_len = len;
1145 } 1181 }
1146 1182
1183 if (bf == bf_first->bf_lastbf)
1184 bf_first = NULL;
1185
1147 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); 1186 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1148 bf = bf->bf_next; 1187 bf = bf->bf_next;
1149 } 1188 }
@@ -1328,6 +1367,70 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
1328 ath_txq_unlock_complete(sc, txq); 1367 ath_txq_unlock_complete(sc, txq);
1329} 1368}
1330 1369
1370void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1371 struct ieee80211_sta *sta,
1372 u16 tids, int nframes,
1373 enum ieee80211_frame_release_type reason,
1374 bool more_data)
1375{
1376 struct ath_softc *sc = hw->priv;
1377 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1378 struct ath_txq *txq = sc->tx.uapsdq;
1379 struct ieee80211_tx_info *info;
1380 struct list_head bf_q;
1381 struct ath_buf *bf_tail = NULL, *bf;
1382 int sent = 0;
1383 int i;
1384
1385 INIT_LIST_HEAD(&bf_q);
1386 for (i = 0; tids && nframes; i++, tids >>= 1) {
1387 struct ath_atx_tid *tid;
1388
1389 if (!(tids & 1))
1390 continue;
1391
1392 tid = ATH_AN_2_TID(an, i);
1393 if (tid->paused)
1394 continue;
1395
1396 ath_txq_lock(sc, tid->ac->txq);
1397 while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
1398 bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
1399 if (!bf)
1400 break;
1401
1402 __skb_unlink(bf->bf_mpdu, &tid->buf_q);
1403 list_add_tail(&bf->list, &bf_q);
1404 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1405 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
1406 bf->bf_state.bf_type &= ~BUF_AGGR;
1407 if (bf_tail)
1408 bf_tail->bf_next = bf;
1409
1410 bf_tail = bf;
1411 nframes--;
1412 sent++;
1413 TX_STAT_INC(txq->axq_qnum, a_queued_hw);
1414
1415 if (skb_queue_empty(&tid->buf_q))
1416 ieee80211_sta_set_buffered(an->sta, i, false);
1417 }
1418 ath_txq_unlock_complete(sc, tid->ac->txq);
1419 }
1420
1421 if (list_empty(&bf_q))
1422 return;
1423
1424 info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
1425 info->flags |= IEEE80211_TX_STATUS_EOSP;
1426
1427 bf = list_first_entry(&bf_q, struct ath_buf, list);
1428 ath_txq_lock(sc, txq);
1429 ath_tx_fill_desc(sc, bf, txq, 0);
1430 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1431 ath_txq_unlock(sc, txq);
1432}
1433
1331/********************/ 1434/********************/
1332/* Queue Management */ 1435/* Queue Management */
1333/********************/ 1436/********************/
@@ -1679,14 +1782,19 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1679 } 1782 }
1680 1783
1681 if (!internal) { 1784 if (!internal) {
1682 txq->axq_depth++; 1785 while (bf) {
1683 if (bf_is_ampdu_not_probing(bf)) 1786 txq->axq_depth++;
1684 txq->axq_ampdu_depth++; 1787 if (bf_is_ampdu_not_probing(bf))
1788 txq->axq_ampdu_depth++;
1789
1790 bf = bf->bf_lastbf->bf_next;
1791 }
1685 } 1792 }
1686} 1793}
1687 1794
1688static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1795static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
1689 struct sk_buff *skb, struct ath_tx_control *txctl) 1796 struct ath_atx_tid *tid, struct sk_buff *skb,
1797 struct ath_tx_control *txctl)
1690{ 1798{
1691 struct ath_frame_info *fi = get_frame_info(skb); 1799 struct ath_frame_info *fi = get_frame_info(skb);
1692 struct list_head bf_head; 1800 struct list_head bf_head;
@@ -1699,21 +1807,22 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1699 * - seqno is not within block-ack window 1807 * - seqno is not within block-ack window
1700 * - h/w queue depth exceeds low water mark 1808 * - h/w queue depth exceeds low water mark
1701 */ 1809 */
1702 if (!skb_queue_empty(&tid->buf_q) || tid->paused || 1810 if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
1703 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || 1811 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1704 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { 1812 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
1813 txq != sc->tx.uapsdq) {
1705 /* 1814 /*
1706 * Add this frame to software queue for scheduling later 1815 * Add this frame to software queue for scheduling later
1707 * for aggregation. 1816 * for aggregation.
1708 */ 1817 */
1709 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); 1818 TX_STAT_INC(txq->axq_qnum, a_queued_sw);
1710 __skb_queue_tail(&tid->buf_q, skb); 1819 __skb_queue_tail(&tid->buf_q, skb);
1711 if (!txctl->an || !txctl->an->sleeping) 1820 if (!txctl->an || !txctl->an->sleeping)
1712 ath_tx_queue_tid(txctl->txq, tid); 1821 ath_tx_queue_tid(txq, tid);
1713 return; 1822 return;
1714 } 1823 }
1715 1824
1716 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1825 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1717 if (!bf) { 1826 if (!bf) {
1718 ieee80211_free_txskb(sc->hw, skb); 1827 ieee80211_free_txskb(sc->hw, skb);
1719 return; 1828 return;
@@ -1728,10 +1837,10 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1728 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 1837 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
1729 1838
1730 /* Queue to h/w without aggregation */ 1839 /* Queue to h/w without aggregation */
1731 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); 1840 TX_STAT_INC(txq->axq_qnum, a_queued_hw);
1732 bf->bf_lastbf = bf; 1841 bf->bf_lastbf = bf;
1733 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen); 1842 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1734 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false); 1843 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
1735} 1844}
1736 1845
1737static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1846static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
@@ -1869,22 +1978,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1869 return bf; 1978 return bf;
1870} 1979}
1871 1980
1872/* Upon failure caller should free skb */ 1981static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
1873int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1982 struct ath_tx_control *txctl)
1874 struct ath_tx_control *txctl)
1875{ 1983{
1876 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1984 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1877 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1985 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1878 struct ieee80211_sta *sta = txctl->sta; 1986 struct ieee80211_sta *sta = txctl->sta;
1879 struct ieee80211_vif *vif = info->control.vif; 1987 struct ieee80211_vif *vif = info->control.vif;
1880 struct ath_softc *sc = hw->priv; 1988 struct ath_softc *sc = hw->priv;
1881 struct ath_txq *txq = txctl->txq;
1882 struct ath_atx_tid *tid = NULL;
1883 struct ath_buf *bf;
1884 int padpos, padsize;
1885 int frmlen = skb->len + FCS_LEN; 1989 int frmlen = skb->len + FCS_LEN;
1886 u8 tidno; 1990 int padpos, padsize;
1887 int q;
1888 1991
1889 /* NOTE: sta can be NULL according to net/mac80211.h */ 1992 /* NOTE: sta can be NULL according to net/mac80211.h */
1890 if (sta) 1993 if (sta)
@@ -1905,6 +2008,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1905 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 2008 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1906 } 2009 }
1907 2010
2011 if ((vif && vif->type != NL80211_IFTYPE_AP &&
2012 vif->type != NL80211_IFTYPE_AP_VLAN) ||
2013 !ieee80211_is_data(hdr->frame_control))
2014 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
2015
1908 /* Add the padding after the header if this is not already done */ 2016 /* Add the padding after the header if this is not already done */
1909 padpos = ieee80211_hdrlen(hdr->frame_control); 2017 padpos = ieee80211_hdrlen(hdr->frame_control);
1910 padsize = padpos & 3; 2018 padsize = padpos & 3;
@@ -1914,16 +2022,34 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1914 2022
1915 skb_push(skb, padsize); 2023 skb_push(skb, padsize);
1916 memmove(skb->data, skb->data + padsize, padpos); 2024 memmove(skb->data, skb->data + padsize, padpos);
1917 hdr = (struct ieee80211_hdr *) skb->data;
1918 } 2025 }
1919 2026
1920 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1921 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1922 !ieee80211_is_data(hdr->frame_control))
1923 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1924
1925 setup_frame_info(hw, sta, skb, frmlen); 2027 setup_frame_info(hw, sta, skb, frmlen);
2028 return 0;
2029}
2030
2031
2032/* Upon failure caller should free skb */
2033int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2034 struct ath_tx_control *txctl)
2035{
2036 struct ieee80211_hdr *hdr;
2037 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2038 struct ieee80211_sta *sta = txctl->sta;
2039 struct ieee80211_vif *vif = info->control.vif;
2040 struct ath_softc *sc = hw->priv;
2041 struct ath_txq *txq = txctl->txq;
2042 struct ath_atx_tid *tid = NULL;
2043 struct ath_buf *bf;
2044 u8 tidno;
2045 int q;
2046 int ret;
2047
2048 ret = ath_tx_prepare(hw, skb, txctl);
2049 if (ret)
2050 return ret;
1926 2051
2052 hdr = (struct ieee80211_hdr *) skb->data;
1927 /* 2053 /*
1928 * At this point, the vif, hw_key and sta pointers in the tx control 2054 * At this point, the vif, hw_key and sta pointers in the tx control
1929 * info are no longer valid (overwritten by the ath_frame_info data. 2055 * info are no longer valid (overwritten by the ath_frame_info data.
@@ -1939,6 +2065,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1939 txq->stopped = true; 2065 txq->stopped = true;
1940 } 2066 }
1941 2067
2068 if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
2069 ath_txq_unlock(sc, txq);
2070 txq = sc->tx.uapsdq;
2071 ath_txq_lock(sc, txq);
2072 }
2073
1942 if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { 2074 if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
1943 tidno = ieee80211_get_qos_ctl(hdr)[0] & 2075 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1944 IEEE80211_QOS_CTL_TID_MASK; 2076 IEEE80211_QOS_CTL_TID_MASK;
@@ -1952,11 +2084,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1952 * Try aggregation if it's a unicast data frame 2084 * Try aggregation if it's a unicast data frame
1953 * and the destination is HT capable. 2085 * and the destination is HT capable.
1954 */ 2086 */
1955 ath_tx_send_ampdu(sc, tid, skb, txctl); 2087 ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
1956 goto out; 2088 goto out;
1957 } 2089 }
1958 2090
1959 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 2091 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1960 if (!bf) { 2092 if (!bf) {
1961 if (txctl->paprd) 2093 if (txctl->paprd)
1962 dev_kfree_skb_any(skb); 2094 dev_kfree_skb_any(skb);
@@ -1971,7 +2103,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1971 bf->bf_state.bfs_paprd_timestamp = jiffies; 2103 bf->bf_state.bfs_paprd_timestamp = jiffies;
1972 2104
1973 ath_set_rates(vif, sta, bf); 2105 ath_set_rates(vif, sta, bf);
1974 ath_tx_send_normal(sc, txctl->txq, tid, skb); 2106 ath_tx_send_normal(sc, txq, tid, skb);
1975 2107
1976out: 2108out:
1977 ath_txq_unlock(sc, txq); 2109 ath_txq_unlock(sc, txq);
@@ -1979,6 +2111,74 @@ out:
1979 return 0; 2111 return 0;
1980} 2112}
1981 2113
2114void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2115 struct sk_buff *skb)
2116{
2117 struct ath_softc *sc = hw->priv;
2118 struct ath_tx_control txctl = {
2119 .txq = sc->beacon.cabq
2120 };
2121 struct ath_tx_info info = {};
2122 struct ieee80211_hdr *hdr;
2123 struct ath_buf *bf_tail = NULL;
2124 struct ath_buf *bf;
2125 LIST_HEAD(bf_q);
2126 int duration = 0;
2127 int max_duration;
2128
2129 max_duration =
2130 sc->cur_beacon_conf.beacon_interval * 1000 *
2131 sc->cur_beacon_conf.dtim_period / ATH_BCBUF;
2132
2133 do {
2134 struct ath_frame_info *fi = get_frame_info(skb);
2135
2136 if (ath_tx_prepare(hw, skb, &txctl))
2137 break;
2138
2139 bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
2140 if (!bf)
2141 break;
2142
2143 bf->bf_lastbf = bf;
2144 ath_set_rates(vif, NULL, bf);
2145 ath_buf_set_rate(sc, bf, &info, fi->framelen);
2146 duration += info.rates[0].PktDuration;
2147 if (bf_tail)
2148 bf_tail->bf_next = bf;
2149
2150 list_add_tail(&bf->list, &bf_q);
2151 bf_tail = bf;
2152 skb = NULL;
2153
2154 if (duration > max_duration)
2155 break;
2156
2157 skb = ieee80211_get_buffered_bc(hw, vif);
2158 } while(skb);
2159
2160 if (skb)
2161 ieee80211_free_txskb(hw, skb);
2162
2163 if (list_empty(&bf_q))
2164 return;
2165
2166 bf = list_first_entry(&bf_q, struct ath_buf, list);
2167 hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
2168
2169 if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
2170 hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
2171 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
2172 sizeof(*hdr), DMA_TO_DEVICE);
2173 }
2174
2175 ath_txq_lock(sc, txctl.txq);
2176 ath_tx_fill_desc(sc, bf, txctl.txq, 0);
2177 ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
2178 TX_STAT_INC(txctl.txq->axq_qnum, queued);
2179 ath_txq_unlock(sc, txctl.txq);
2180}
2181
1982/*****************/ 2182/*****************/
1983/* TX Completion */ 2183/* TX Completion */
1984/*****************/ 2184/*****************/
@@ -2024,7 +2224,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2024 } 2224 }
2025 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 2225 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2026 2226
2227 __skb_queue_tail(&txq->complete_q, skb);
2228
2027 q = skb_get_queue_mapping(skb); 2229 q = skb_get_queue_mapping(skb);
2230 if (txq == sc->tx.uapsdq)
2231 txq = sc->tx.txq_map[q];
2232
2028 if (txq == sc->tx.txq_map[q]) { 2233 if (txq == sc->tx.txq_map[q]) {
2029 if (WARN_ON(--txq->pending_frames < 0)) 2234 if (WARN_ON(--txq->pending_frames < 0))
2030 txq->pending_frames = 0; 2235 txq->pending_frames = 0;
@@ -2035,8 +2240,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2035 txq->stopped = false; 2240 txq->stopped = false;
2036 } 2241 }
2037 } 2242 }
2038
2039 __skb_queue_tail(&txq->complete_q, skb);
2040} 2243}
2041 2244
2042static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 2245static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 9dce106cd6d4..8596aba34f96 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -133,6 +133,9 @@ struct carl9170_sta_tid {
133 133
134 /* Preaggregation reorder queue */ 134 /* Preaggregation reorder queue */
135 struct sk_buff_head queue; 135 struct sk_buff_head queue;
136
137 struct ieee80211_sta *sta;
138 struct ieee80211_vif *vif;
136}; 139};
137 140
138#define CARL9170_QUEUE_TIMEOUT 256 141#define CARL9170_QUEUE_TIMEOUT 256
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index e9010a481dfd..4a33c6e39ca2 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1448,6 +1448,8 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1448 tid_info->state = CARL9170_TID_STATE_PROGRESS; 1448 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1449 tid_info->tid = tid; 1449 tid_info->tid = tid;
1450 tid_info->max = sta_info->ampdu_max_len; 1450 tid_info->max = sta_info->ampdu_max_len;
1451 tid_info->sta = sta;
1452 tid_info->vif = vif;
1451 1453
1452 INIT_LIST_HEAD(&tid_info->list); 1454 INIT_LIST_HEAD(&tid_info->list);
1453 INIT_LIST_HEAD(&tid_info->tmp_list); 1455 INIT_LIST_HEAD(&tid_info->tmp_list);
@@ -1857,6 +1859,7 @@ void *carl9170_alloc(size_t priv_size)
1857 IEEE80211_HW_SUPPORTS_PS | 1859 IEEE80211_HW_SUPPORTS_PS |
1858 IEEE80211_HW_PS_NULLFUNC_STACK | 1860 IEEE80211_HW_PS_NULLFUNC_STACK |
1859 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | 1861 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
1862 IEEE80211_HW_SUPPORTS_RC_TABLE |
1860 IEEE80211_HW_SIGNAL_DBM; 1863 IEEE80211_HW_SIGNAL_DBM;
1861 1864
1862 if (!modparam_noht) { 1865 if (!modparam_noht) {
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index c61cafa2665b..e3f696ee4d23 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -625,7 +625,7 @@ static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
625 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT))) 625 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
626 goto unlock; 626 goto unlock;
627 627
628 sta = __carl9170_get_tx_sta(ar, skb); 628 sta = iter->sta;
629 if (WARN_ON(!sta)) 629 if (WARN_ON(!sta))
630 goto unlock; 630 goto unlock;
631 631
@@ -866,6 +866,93 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
866 return false; 866 return false;
867} 867}
868 868
869static void carl9170_tx_get_rates(struct ar9170 *ar,
870 struct ieee80211_vif *vif,
871 struct ieee80211_sta *sta,
872 struct sk_buff *skb)
873{
874 struct ieee80211_tx_info *info;
875
876 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
877 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE);
878
879 info = IEEE80211_SKB_CB(skb);
880
881 ieee80211_get_tx_rates(vif, sta, skb,
882 info->control.rates,
883 IEEE80211_TX_MAX_RATES);
884}
885
886static void carl9170_tx_apply_rateset(struct ar9170 *ar,
887 struct ieee80211_tx_info *sinfo,
888 struct sk_buff *skb)
889{
890 struct ieee80211_tx_rate *txrate;
891 struct ieee80211_tx_info *info;
892 struct _carl9170_tx_superframe *txc = (void *) skb->data;
893 int i;
894 bool ampdu;
895 bool no_ack;
896
897 info = IEEE80211_SKB_CB(skb);
898 ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
899 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
900
901 /* Set the rate control probe flag for all (sub-) frames.
902 * This is because the TX_STATS_AMPDU flag is only set on
903 * the last frame, so it has to be inherited.
904 */
905 info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
906
907 /* NOTE: For the first rate, the ERP & AMPDU flags are directly
908 * taken from mac_control. For all fallback rate, the firmware
909 * updates the mac_control flags from the rate info field.
910 */
911 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
912 __le32 phy_set;
913
914 txrate = &sinfo->control.rates[i];
915 if (txrate->idx < 0)
916 break;
917
918 phy_set = carl9170_tx_physet(ar, info, txrate);
919 if (i == 0) {
920 __le16 mac_tmp = cpu_to_le16(0);
921
922 /* first rate - part of the hw's frame header */
923 txc->f.phy_control = phy_set;
924
925 if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
926 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
927
928 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
929 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
930 else if (carl9170_tx_cts_check(ar, txrate))
931 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
932
933 txc->f.mac_control |= mac_tmp;
934 } else {
935 /* fallback rates are stored in the firmware's
936 * retry rate set array.
937 */
938 txc->s.rr[i - 1] = phy_set;
939 }
940
941 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
942 txrate->count);
943
944 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
945 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
946 CARL9170_TX_SUPER_RI_ERP_PROT_S);
947 else if (carl9170_tx_cts_check(ar, txrate))
948 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
949 CARL9170_TX_SUPER_RI_ERP_PROT_S);
950
951 if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
952 txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
953 }
954}
955
869static int carl9170_tx_prepare(struct ar9170 *ar, 956static int carl9170_tx_prepare(struct ar9170 *ar,
870 struct ieee80211_sta *sta, 957 struct ieee80211_sta *sta,
871 struct sk_buff *skb) 958 struct sk_buff *skb)
@@ -874,13 +961,10 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
874 struct _carl9170_tx_superframe *txc; 961 struct _carl9170_tx_superframe *txc;
875 struct carl9170_vif_info *cvif; 962 struct carl9170_vif_info *cvif;
876 struct ieee80211_tx_info *info; 963 struct ieee80211_tx_info *info;
877 struct ieee80211_tx_rate *txrate;
878 struct carl9170_tx_info *arinfo; 964 struct carl9170_tx_info *arinfo;
879 unsigned int hw_queue; 965 unsigned int hw_queue;
880 int i;
881 __le16 mac_tmp; 966 __le16 mac_tmp;
882 u16 len; 967 u16 len;
883 bool ampdu, no_ack;
884 968
885 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); 969 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
886 BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) != 970 BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
@@ -889,8 +973,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
889 BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) != 973 BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
890 AR9170_TX_HWDESC_LEN); 974 AR9170_TX_HWDESC_LEN);
891 975
892 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
893
894 BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC > 976 BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
895 ((CARL9170_TX_SUPER_MISC_VIF_ID >> 977 ((CARL9170_TX_SUPER_MISC_VIF_ID >>
896 CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1)); 978 CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
@@ -932,8 +1014,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
932 mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) & 1014 mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
933 AR9170_TX_MAC_QOS); 1015 AR9170_TX_MAC_QOS);
934 1016
935 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); 1017 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK))
936 if (unlikely(no_ack))
937 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); 1018 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
938 1019
939 if (info->control.hw_key) { 1020 if (info->control.hw_key) {
@@ -954,8 +1035,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
954 } 1035 }
955 } 1036 }
956 1037
957 ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU); 1038 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
958 if (ampdu) {
959 unsigned int density, factor; 1039 unsigned int density, factor;
960 1040
961 if (unlikely(!sta || !cvif)) 1041 if (unlikely(!sta || !cvif))
@@ -982,50 +1062,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
982 txc->s.ampdu_settings, factor); 1062 txc->s.ampdu_settings, factor);
983 } 1063 }
984 1064
985 /*
986 * NOTE: For the first rate, the ERP & AMPDU flags are directly
987 * taken from mac_control. For all fallback rate, the firmware
988 * updates the mac_control flags from the rate info field.
989 */
990 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
991 __le32 phy_set;
992 txrate = &info->control.rates[i];
993 if (txrate->idx < 0)
994 break;
995
996 phy_set = carl9170_tx_physet(ar, info, txrate);
997 if (i == 0) {
998 /* first rate - part of the hw's frame header */
999 txc->f.phy_control = phy_set;
1000
1001 if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
1002 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1003 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
1004 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1005 else if (carl9170_tx_cts_check(ar, txrate))
1006 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1007
1008 } else {
1009 /* fallback rates are stored in the firmware's
1010 * retry rate set array.
1011 */
1012 txc->s.rr[i - 1] = phy_set;
1013 }
1014
1015 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
1016 txrate->count);
1017
1018 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
1019 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
1020 CARL9170_TX_SUPER_RI_ERP_PROT_S);
1021 else if (carl9170_tx_cts_check(ar, txrate))
1022 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
1023 CARL9170_TX_SUPER_RI_ERP_PROT_S);
1024
1025 if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
1026 txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
1027 }
1028
1029 txc->s.len = cpu_to_le16(skb->len); 1065 txc->s.len = cpu_to_le16(skb->len);
1030 txc->f.length = cpu_to_le16(len + FCS_LEN); 1066 txc->f.length = cpu_to_le16(len + FCS_LEN);
1031 txc->f.mac_control = mac_tmp; 1067 txc->f.mac_control = mac_tmp;
@@ -1086,31 +1122,12 @@ static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
1086 } 1122 }
1087} 1123}
1088 1124
1089static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
1090 struct sk_buff *_src)
1091{
1092 struct _carl9170_tx_superframe *dest, *src;
1093
1094 dest = (void *) _dest->data;
1095 src = (void *) _src->data;
1096
1097 /*
1098 * The mac80211 rate control algorithm expects that all MPDUs in
1099 * an AMPDU share the same tx vectors.
1100 * This is not really obvious right now, because the hardware
1101 * does the AMPDU setup according to its own rulebook.
1102 * Our nicely assembled, strictly monotonic increasing mpdu
1103 * chains will be broken up, mashed back together...
1104 */
1105
1106 return (dest->f.phy_control == src->f.phy_control);
1107}
1108
1109static void carl9170_tx_ampdu(struct ar9170 *ar) 1125static void carl9170_tx_ampdu(struct ar9170 *ar)
1110{ 1126{
1111 struct sk_buff_head agg; 1127 struct sk_buff_head agg;
1112 struct carl9170_sta_tid *tid_info; 1128 struct carl9170_sta_tid *tid_info;
1113 struct sk_buff *skb, *first; 1129 struct sk_buff *skb, *first;
1130 struct ieee80211_tx_info *tx_info_first;
1114 unsigned int i = 0, done_ampdus = 0; 1131 unsigned int i = 0, done_ampdus = 0;
1115 u16 seq, queue, tmpssn; 1132 u16 seq, queue, tmpssn;
1116 1133
@@ -1156,6 +1173,7 @@ retry:
1156 goto processed; 1173 goto processed;
1157 } 1174 }
1158 1175
1176 tx_info_first = NULL;
1159 while ((skb = skb_peek(&tid_info->queue))) { 1177 while ((skb = skb_peek(&tid_info->queue))) {
1160 /* strict 0, 1, ..., n - 1, n frame sequence order */ 1178 /* strict 0, 1, ..., n - 1, n frame sequence order */
1161 if (unlikely(carl9170_get_seq(skb) != seq)) 1179 if (unlikely(carl9170_get_seq(skb) != seq))
@@ -1166,8 +1184,13 @@ retry:
1166 (tid_info->max - 1))) 1184 (tid_info->max - 1)))
1167 break; 1185 break;
1168 1186
1169 if (!carl9170_tx_rate_check(ar, skb, first)) 1187 if (!tx_info_first) {
1170 break; 1188 carl9170_tx_get_rates(ar, tid_info->vif,
1189 tid_info->sta, first);
1190 tx_info_first = IEEE80211_SKB_CB(first);
1191 }
1192
1193 carl9170_tx_apply_rateset(ar, tx_info_first, skb);
1171 1194
1172 atomic_inc(&ar->tx_ampdu_upload); 1195 atomic_inc(&ar->tx_ampdu_upload);
1173 tid_info->snx = seq = SEQ_NEXT(seq); 1196 tid_info->snx = seq = SEQ_NEXT(seq);
@@ -1182,8 +1205,7 @@ retry:
1182 if (skb_queue_empty(&tid_info->queue) || 1205 if (skb_queue_empty(&tid_info->queue) ||
1183 carl9170_get_seq(skb_peek(&tid_info->queue)) != 1206 carl9170_get_seq(skb_peek(&tid_info->queue)) !=
1184 tid_info->snx) { 1207 tid_info->snx) {
1185 /* 1208 /* stop TID, if A-MPDU frames are still missing,
1186 * stop TID, if A-MPDU frames are still missing,
1187 * or whenever the queue is empty. 1209 * or whenever the queue is empty.
1188 */ 1210 */
1189 1211
@@ -1450,12 +1472,14 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
1450 struct ar9170 *ar = hw->priv; 1472 struct ar9170 *ar = hw->priv;
1451 struct ieee80211_tx_info *info; 1473 struct ieee80211_tx_info *info;
1452 struct ieee80211_sta *sta = control->sta; 1474 struct ieee80211_sta *sta = control->sta;
1475 struct ieee80211_vif *vif;
1453 bool run; 1476 bool run;
1454 1477
1455 if (unlikely(!IS_STARTED(ar))) 1478 if (unlikely(!IS_STARTED(ar)))
1456 goto err_free; 1479 goto err_free;
1457 1480
1458 info = IEEE80211_SKB_CB(skb); 1481 info = IEEE80211_SKB_CB(skb);
1482 vif = info->control.vif;
1459 1483
1460 if (unlikely(carl9170_tx_prepare(ar, sta, skb))) 1484 if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
1461 goto err_free; 1485 goto err_free;
@@ -1486,6 +1510,8 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
1486 } else { 1510 } else {
1487 unsigned int queue = skb_get_queue_mapping(skb); 1511 unsigned int queue = skb_get_queue_mapping(skb);
1488 1512
1513 carl9170_tx_get_rates(ar, vif, sta, skb);
1514 carl9170_tx_apply_rateset(ar, info, skb);
1489 skb_queue_tail(&ar->tx_pending[queue], skb); 1515 skb_queue_tail(&ar->tx_pending[queue], skb);
1490 } 1516 }
1491 1517
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index ccc4c718f124..7d077c752dd5 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -42,11 +42,11 @@ static int __ath_regd_init(struct ath_regulatory *reg);
42 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM) 42 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
43 43
44/* We allow IBSS on these on a case by case basis by regulatory domain */ 44/* We allow IBSS on these on a case by case basis by regulatory domain */
45#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 40, 0, 30,\ 45#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
46 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) 46 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
47#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 40, 0, 30,\ 47#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
48 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) 48 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
49#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 40, 0, 30,\ 49#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
50 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) 50 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
51 51
52#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \ 52#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index bac3d98a0cfb..ce8c0381825e 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -27,3 +27,15 @@ config WIL6210_ISR_COR
27 self-clear when accessed for debug purposes, it makes 27 self-clear when accessed for debug purposes, it makes
28 such monitoring impossible. 28 such monitoring impossible.
29 Say y unless you debug interrupts 29 Say y unless you debug interrupts
30
31config WIL6210_TRACING
32 bool "wil6210 tracing support"
33 depends on WIL6210
34 depends on EVENT_TRACING
35 default y
36 ---help---
37 Say Y here to enable tracepoints for the wil6210 driver
38 using the kernel tracing infrastructure. Select this
39 option if you are interested in debugging the driver.
40
41 If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index d288eea0a26a..f891d514d881 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -1,15 +1,20 @@
1obj-$(CONFIG_WIL6210) += wil6210.o 1obj-$(CONFIG_WIL6210) += wil6210.o
2 2
3wil6210-objs := main.o 3wil6210-y := main.o
4wil6210-objs += netdev.o 4wil6210-y += netdev.o
5wil6210-objs += cfg80211.o 5wil6210-y += cfg80211.o
6wil6210-objs += pcie_bus.o 6wil6210-y += pcie_bus.o
7wil6210-objs += debugfs.o 7wil6210-y += debugfs.o
8wil6210-objs += wmi.o 8wil6210-y += wmi.o
9wil6210-objs += interrupt.o 9wil6210-y += interrupt.o
10wil6210-objs += txrx.o 10wil6210-y += txrx.o
11wil6210-y += debug.o
12wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
11 13
12ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) 14ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
13 subdir-ccflags-y += -Werror 15 subdir-ccflags-y += -Werror
14endif 16endif
17# for tracing framework to find trace.h
18CFLAGS_trace.o := -I$(src)
19
15subdir-ccflags-y += -D__CHECK_ENDIAN__ 20subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c5d4a87abaaf..61c302a6bdea 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -322,12 +322,16 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
322 * FW don't support scan after connection attempt 322 * FW don't support scan after connection attempt
323 */ 323 */
324 set_bit(wil_status_dontscan, &wil->status); 324 set_bit(wil_status_dontscan, &wil->status);
325 set_bit(wil_status_fwconnecting, &wil->status);
325 326
326 rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); 327 rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
327 if (rc == 0) { 328 if (rc == 0) {
328 /* Connect can take lots of time */ 329 /* Connect can take lots of time */
329 mod_timer(&wil->connect_timer, 330 mod_timer(&wil->connect_timer,
330 jiffies + msecs_to_jiffies(2000)); 331 jiffies + msecs_to_jiffies(2000));
332 } else {
333 clear_bit(wil_status_dontscan, &wil->status);
334 clear_bit(wil_status_fwconnecting, &wil->status);
331 } 335 }
332 336
333 out: 337 out:
@@ -398,6 +402,30 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
398 return 0; 402 return 0;
399} 403}
400 404
405static int wil_fix_bcon(struct wil6210_priv *wil,
406 struct cfg80211_beacon_data *bcon)
407{
408 struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
409 size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
410 int rc = 0;
411
412 if (bcon->probe_resp_len <= hlen)
413 return 0;
414
415 if (!bcon->proberesp_ies) {
416 bcon->proberesp_ies = f->u.probe_resp.variable;
417 bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
418 rc = 1;
419 }
420 if (!bcon->assocresp_ies) {
421 bcon->assocresp_ies = f->u.probe_resp.variable;
422 bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
423 rc = 1;
424 }
425
426 return rc;
427}
428
401static int wil_cfg80211_start_ap(struct wiphy *wiphy, 429static int wil_cfg80211_start_ap(struct wiphy *wiphy,
402 struct net_device *ndev, 430 struct net_device *ndev,
403 struct cfg80211_ap_settings *info) 431 struct cfg80211_ap_settings *info)
@@ -419,10 +447,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
419 print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, 447 print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
420 info->ssid, info->ssid_len); 448 info->ssid, info->ssid_len);
421 449
450 if (wil_fix_bcon(wil, bcon))
451 wil_dbg_misc(wil, "Fixed bcon\n");
452
422 rc = wil_reset(wil); 453 rc = wil_reset(wil);
423 if (rc) 454 if (rc)
424 return rc; 455 return rc;
425 456
457 /* Rx VRING. */
458 rc = wil_rx_init(wil);
459 if (rc)
460 return rc;
461
426 rc = wmi_set_ssid(wil, info->ssid_len, info->ssid); 462 rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
427 if (rc) 463 if (rc)
428 return rc; 464 return rc;
@@ -451,8 +487,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
451 if (rc) 487 if (rc)
452 return rc; 488 return rc;
453 489
454 /* Rx VRING. After MAC and beacon */
455 rc = wil_rx_init(wil);
456 490
457 netif_carrier_on(ndev); 491 netif_carrier_on(ndev);
458 492
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
new file mode 100644
index 000000000000..9eeabf4a5879
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -0,0 +1,69 @@
1/*
2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "wil6210.h"
18#include "trace.h"
19
20int wil_err(struct wil6210_priv *wil, const char *fmt, ...)
21{
22 struct net_device *ndev = wil_to_ndev(wil);
23 struct va_format vaf = {
24 .fmt = fmt,
25 };
26 va_list args;
27 int ret;
28
29 va_start(args, fmt);
30 vaf.va = &args;
31 ret = netdev_err(ndev, "%pV", &vaf);
32 trace_wil6210_log_err(&vaf);
33 va_end(args);
34
35 return ret;
36}
37
38int wil_info(struct wil6210_priv *wil, const char *fmt, ...)
39{
40 struct net_device *ndev = wil_to_ndev(wil);
41 struct va_format vaf = {
42 .fmt = fmt,
43 };
44 va_list args;
45 int ret;
46
47 va_start(args, fmt);
48 vaf.va = &args;
49 ret = netdev_info(ndev, "%pV", &vaf);
50 trace_wil6210_log_info(&vaf);
51 va_end(args);
52
53 return ret;
54}
55
56int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
57{
58 struct va_format vaf = {
59 .fmt = fmt,
60 };
61 va_list args;
62
63 va_start(args, fmt);
64 vaf.va = &args;
65 trace_wil6210_log_dbg(&vaf);
66 va_end(args);
67
68 return 0;
69}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 727b1f53e6ad..e8308ec30970 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -418,9 +418,15 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
418 if (skb) { 418 if (skb) {
419 unsigned char printbuf[16 * 3 + 2]; 419 unsigned char printbuf[16 * 3 + 2];
420 int i = 0; 420 int i = 0;
421 int len = skb_headlen(skb); 421 int len = le16_to_cpu(d->dma.length);
422 void *p = skb->data; 422 void *p = skb->data;
423 423
424 if (len != skb_headlen(skb)) {
425 seq_printf(s, "!!! len: desc = %d skb = %d\n",
426 len, skb_headlen(skb));
427 len = min_t(int, len, skb_headlen(skb));
428 }
429
424 seq_printf(s, " len = %d\n", len); 430 seq_printf(s, " len = %d\n", len);
425 431
426 while (i < len) { 432 while (i < len) {
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index e3c1e7684f9c..8205d3e4ab66 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -17,6 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18 18
19#include "wil6210.h" 19#include "wil6210.h"
20#include "trace.h"
20 21
21/** 22/**
22 * Theory of operation: 23 * Theory of operation:
@@ -103,14 +104,14 @@ static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
103 clear_bit(wil_status_irqen, &wil->status); 104 clear_bit(wil_status_irqen, &wil->status);
104} 105}
105 106
106static void wil6210_unmask_irq_tx(struct wil6210_priv *wil) 107void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
107{ 108{
108 iowrite32(WIL6210_IMC_TX, wil->csr + 109 iowrite32(WIL6210_IMC_TX, wil->csr +
109 HOSTADDR(RGF_DMA_EP_TX_ICR) + 110 HOSTADDR(RGF_DMA_EP_TX_ICR) +
110 offsetof(struct RGF_ICR, IMC)); 111 offsetof(struct RGF_ICR, IMC));
111} 112}
112 113
113static void wil6210_unmask_irq_rx(struct wil6210_priv *wil) 114void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
114{ 115{
115 iowrite32(WIL6210_IMC_RX, wil->csr + 116 iowrite32(WIL6210_IMC_RX, wil->csr +
116 HOSTADDR(RGF_DMA_EP_RX_ICR) + 117 HOSTADDR(RGF_DMA_EP_RX_ICR) +
@@ -168,6 +169,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
168 HOSTADDR(RGF_DMA_EP_RX_ICR) + 169 HOSTADDR(RGF_DMA_EP_RX_ICR) +
169 offsetof(struct RGF_ICR, ICR)); 170 offsetof(struct RGF_ICR, ICR));
170 171
172 trace_wil6210_irq_rx(isr);
171 wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); 173 wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
172 174
173 if (!isr) { 175 if (!isr) {
@@ -180,13 +182,14 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
180 if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) { 182 if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
181 wil_dbg_irq(wil, "RX done\n"); 183 wil_dbg_irq(wil, "RX done\n");
182 isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE; 184 isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
183 wil_rx_handle(wil); 185 wil_dbg_txrx(wil, "NAPI schedule\n");
186 napi_schedule(&wil->napi_rx);
184 } 187 }
185 188
186 if (isr) 189 if (isr)
187 wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); 190 wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
188 191
189 wil6210_unmask_irq_rx(wil); 192 /* Rx IRQ will be enabled when NAPI processing finished */
190 193
191 return IRQ_HANDLED; 194 return IRQ_HANDLED;
192} 195}
@@ -198,6 +201,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
198 HOSTADDR(RGF_DMA_EP_TX_ICR) + 201 HOSTADDR(RGF_DMA_EP_TX_ICR) +
199 offsetof(struct RGF_ICR, ICR)); 202 offsetof(struct RGF_ICR, ICR));
200 203
204 trace_wil6210_irq_tx(isr);
201 wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); 205 wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
202 206
203 if (!isr) { 207 if (!isr) {
@@ -208,23 +212,17 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
208 wil6210_mask_irq_tx(wil); 212 wil6210_mask_irq_tx(wil);
209 213
210 if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { 214 if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
211 uint i;
212 wil_dbg_irq(wil, "TX done\n"); 215 wil_dbg_irq(wil, "TX done\n");
216 napi_schedule(&wil->napi_tx);
213 isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; 217 isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
214 for (i = 0; i < 24; i++) { 218 /* clear also all VRING interrupts */
215 u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i); 219 isr &= ~(BIT(25) - 1UL);
216 if (isr & mask) {
217 isr &= ~mask;
218 wil_dbg_irq(wil, "TX done(%i)\n", i);
219 wil_tx_complete(wil, i);
220 }
221 }
222 } 220 }
223 221
224 if (isr) 222 if (isr)
225 wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); 223 wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
226 224
227 wil6210_unmask_irq_tx(wil); 225 /* Tx IRQ will be enabled when NAPI processing finished */
228 226
229 return IRQ_HANDLED; 227 return IRQ_HANDLED;
230} 228}
@@ -256,6 +254,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
256 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 254 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
257 offsetof(struct RGF_ICR, ICR)); 255 offsetof(struct RGF_ICR, ICR));
258 256
257 trace_wil6210_irq_misc(isr);
259 wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr); 258 wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
260 259
261 if (!isr) { 260 if (!isr) {
@@ -301,6 +300,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
301 struct wil6210_priv *wil = cookie; 300 struct wil6210_priv *wil = cookie;
302 u32 isr = wil->isr_misc; 301 u32 isr = wil->isr_misc;
303 302
303 trace_wil6210_irq_misc_thread(isr);
304 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); 304 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
305 305
306 if (isr & ISR_MISC_FW_ERROR) { 306 if (isr & ISR_MISC_FW_ERROR) {
@@ -408,6 +408,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
408 if (wil6210_debug_irq_mask(wil, pseudo_cause)) 408 if (wil6210_debug_irq_mask(wil, pseudo_cause))
409 return IRQ_NONE; 409 return IRQ_NONE;
410 410
411 trace_wil6210_irq_pseudo(pseudo_cause);
411 wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause); 412 wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
412 413
413 wil6210_mask_irq_pseudo(wil); 414 wil6210_mask_irq_pseudo(wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index a0478e2f6868..0a2844c48a60 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -56,27 +56,21 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
56{ 56{
57 uint i; 57 uint i;
58 struct net_device *ndev = wil_to_ndev(wil); 58 struct net_device *ndev = wil_to_ndev(wil);
59 struct wireless_dev *wdev = wil->wdev;
60 59
61 wil_dbg_misc(wil, "%s()\n", __func__); 60 wil_dbg_misc(wil, "%s()\n", __func__);
62 61
63 wil_link_off(wil); 62 wil_link_off(wil);
64 clear_bit(wil_status_fwconnected, &wil->status); 63 if (test_bit(wil_status_fwconnected, &wil->status)) {
65 64 clear_bit(wil_status_fwconnected, &wil->status);
66 switch (wdev->sme_state) { 65 cfg80211_disconnected(ndev,
67 case CFG80211_SME_CONNECTED: 66 WLAN_STATUS_UNSPECIFIED_FAILURE,
68 cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
69 NULL, 0, GFP_KERNEL); 67 NULL, 0, GFP_KERNEL);
70 break; 68 } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
71 case CFG80211_SME_CONNECTING:
72 cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, 69 cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
73 WLAN_STATUS_UNSPECIFIED_FAILURE, 70 WLAN_STATUS_UNSPECIFIED_FAILURE,
74 GFP_KERNEL); 71 GFP_KERNEL);
75 break;
76 default:
77 break;
78 } 72 }
79 73 clear_bit(wil_status_fwconnecting, &wil->status);
80 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) 74 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
81 wil_vring_fini_tx(wil, i); 75 wil_vring_fini_tx(wil, i);
82 76
@@ -292,41 +286,36 @@ static int __wil_up(struct wil6210_priv *wil)
292{ 286{
293 struct net_device *ndev = wil_to_ndev(wil); 287 struct net_device *ndev = wil_to_ndev(wil);
294 struct wireless_dev *wdev = wil->wdev; 288 struct wireless_dev *wdev = wil->wdev;
295 struct ieee80211_channel *channel = wdev->preset_chandef.chan;
296 int rc; 289 int rc;
297 int bi;
298 u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
299 290
300 rc = wil_reset(wil); 291 rc = wil_reset(wil);
301 if (rc) 292 if (rc)
302 return rc; 293 return rc;
303 294
304 /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */ 295 /* Rx VRING. After MAC and beacon */
305 wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC); 296 rc = wil_rx_init(wil);
297 if (rc)
298 return rc;
299
306 switch (wdev->iftype) { 300 switch (wdev->iftype) {
307 case NL80211_IFTYPE_STATION: 301 case NL80211_IFTYPE_STATION:
308 wil_dbg_misc(wil, "type: STATION\n"); 302 wil_dbg_misc(wil, "type: STATION\n");
309 bi = 0;
310 ndev->type = ARPHRD_ETHER; 303 ndev->type = ARPHRD_ETHER;
311 break; 304 break;
312 case NL80211_IFTYPE_AP: 305 case NL80211_IFTYPE_AP:
313 wil_dbg_misc(wil, "type: AP\n"); 306 wil_dbg_misc(wil, "type: AP\n");
314 bi = 100;
315 ndev->type = ARPHRD_ETHER; 307 ndev->type = ARPHRD_ETHER;
316 break; 308 break;
317 case NL80211_IFTYPE_P2P_CLIENT: 309 case NL80211_IFTYPE_P2P_CLIENT:
318 wil_dbg_misc(wil, "type: P2P_CLIENT\n"); 310 wil_dbg_misc(wil, "type: P2P_CLIENT\n");
319 bi = 0;
320 ndev->type = ARPHRD_ETHER; 311 ndev->type = ARPHRD_ETHER;
321 break; 312 break;
322 case NL80211_IFTYPE_P2P_GO: 313 case NL80211_IFTYPE_P2P_GO:
323 wil_dbg_misc(wil, "type: P2P_GO\n"); 314 wil_dbg_misc(wil, "type: P2P_GO\n");
324 bi = 100;
325 ndev->type = ARPHRD_ETHER; 315 ndev->type = ARPHRD_ETHER;
326 break; 316 break;
327 case NL80211_IFTYPE_MONITOR: 317 case NL80211_IFTYPE_MONITOR:
328 wil_dbg_misc(wil, "type: Monitor\n"); 318 wil_dbg_misc(wil, "type: Monitor\n");
329 bi = 0;
330 ndev->type = ARPHRD_IEEE80211_RADIOTAP; 319 ndev->type = ARPHRD_IEEE80211_RADIOTAP;
331 /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */ 320 /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
332 break; 321 break;
@@ -334,36 +323,12 @@ static int __wil_up(struct wil6210_priv *wil)
334 return -EOPNOTSUPP; 323 return -EOPNOTSUPP;
335 } 324 }
336 325
337 /* Apply profile in the following order: */
338 /* SSID and channel for the AP */
339 switch (wdev->iftype) {
340 case NL80211_IFTYPE_AP:
341 case NL80211_IFTYPE_P2P_GO:
342 if (wdev->ssid_len == 0) {
343 wil_err(wil, "SSID not set\n");
344 return -EINVAL;
345 }
346 rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
347 if (rc)
348 return rc;
349 break;
350 default:
351 break;
352 }
353
354 /* MAC address - pre-requisite for other commands */ 326 /* MAC address - pre-requisite for other commands */
355 wmi_set_mac_address(wil, ndev->dev_addr); 327 wmi_set_mac_address(wil, ndev->dev_addr);
356 328
357 /* Set up beaconing if required. */
358 if (bi > 0) {
359 rc = wmi_pcp_start(wil, bi, wmi_nettype,
360 (channel ? channel->hw_value : 0));
361 if (rc)
362 return rc;
363 }
364 329
365 /* Rx VRING. After MAC and beacon */ 330 napi_enable(&wil->napi_rx);
366 wil_rx_init(wil); 331 napi_enable(&wil->napi_tx);
367 332
368 return 0; 333 return 0;
369} 334}
@@ -381,6 +346,9 @@ int wil_up(struct wil6210_priv *wil)
381 346
382static int __wil_down(struct wil6210_priv *wil) 347static int __wil_down(struct wil6210_priv *wil)
383{ 348{
349 napi_disable(&wil->napi_rx);
350 napi_disable(&wil->napi_tx);
351
384 if (wil->scan_request) { 352 if (wil->scan_request) {
385 cfg80211_scan_done(wil->scan_request, true); 353 cfg80211_scan_done(wil->scan_request, true);
386 wil->scan_request = NULL; 354 wil->scan_request = NULL;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 098a8ec6b841..29dd1e58cb17 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -40,6 +40,55 @@ static const struct net_device_ops wil_netdev_ops = {
40 .ndo_validate_addr = eth_validate_addr, 40 .ndo_validate_addr = eth_validate_addr,
41}; 41};
42 42
43static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
44{
45 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
46 napi_rx);
47 int quota = budget;
48 int done;
49
50 wil_rx_handle(wil, &quota);
51 done = budget - quota;
52
53 if (done <= 1) { /* burst ends - only one packet processed */
54 napi_complete(napi);
55 wil6210_unmask_irq_rx(wil);
56 wil_dbg_txrx(wil, "NAPI RX complete\n");
57 }
58
59 wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
60
61 return done;
62}
63
64static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
65{
66 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
67 napi_tx);
68 int tx_done = 0;
69 uint i;
70
71 /* always process ALL Tx complete, regardless budget - it is fast */
72 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
73 struct vring *vring = &wil->vring_tx[i];
74
75 if (!vring->va)
76 continue;
77
78 tx_done += wil_tx_complete(wil, i);
79 }
80
81 if (tx_done <= 1) { /* burst ends - only one packet processed */
82 napi_complete(napi);
83 wil6210_unmask_irq_tx(wil);
84 wil_dbg_txrx(wil, "NAPI TX complete\n");
85 }
86
87 wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
88
89 return min(tx_done, budget);
90}
91
43void *wil_if_alloc(struct device *dev, void __iomem *csr) 92void *wil_if_alloc(struct device *dev, void __iomem *csr)
44{ 93{
45 struct net_device *ndev; 94 struct net_device *ndev;
@@ -81,6 +130,11 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
81 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); 130 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
82 wdev->netdev = ndev; 131 wdev->netdev = ndev;
83 132
133 netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
134 WIL6210_NAPI_BUDGET);
135 netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
136 WIL6210_NAPI_BUDGET);
137
84 wil_link_off(wil); 138 wil_link_off(wil);
85 139
86 return wil; 140 return wil;
diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c
new file mode 100644
index 000000000000..cd2534b9c5aa
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/trace.c
@@ -0,0 +1,20 @@
1/*
2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/module.h>
18
19#define CREATE_TRACE_POINTS
20#include "trace.h"
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
new file mode 100644
index 000000000000..eff1239be53a
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -0,0 +1,235 @@
1/*
2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#undef TRACE_SYSTEM
18#define TRACE_SYSTEM wil6210
19#if !defined(WIL6210_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
20#define WIL6210_TRACE_H
21
22#include <linux/tracepoint.h>
23#include "wil6210.h"
24#include "txrx.h"
25
26/* create empty functions when tracing is disabled */
27#if !defined(CONFIG_WIL6210_TRACING) || defined(__CHECKER__)
28
29#undef TRACE_EVENT
30#define TRACE_EVENT(name, proto, ...) \
31static inline void trace_ ## name(proto) {}
32#undef DECLARE_EVENT_CLASS
33#define DECLARE_EVENT_CLASS(...)
34#undef DEFINE_EVENT
35#define DEFINE_EVENT(evt_class, name, proto, ...) \
36static inline void trace_ ## name(proto) {}
37#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
38
39DECLARE_EVENT_CLASS(wil6210_wmi,
40 TP_PROTO(u16 id, void *buf, u16 buf_len),
41
42 TP_ARGS(id, buf, buf_len),
43
44 TP_STRUCT__entry(
45 __field(u16, id)
46 __field(u16, buf_len)
47 __dynamic_array(u8, buf, buf_len)
48 ),
49
50 TP_fast_assign(
51 __entry->id = id;
52 __entry->buf_len = buf_len;
53 memcpy(__get_dynamic_array(buf), buf, buf_len);
54 ),
55
56 TP_printk(
57 "id 0x%04x len %d",
58 __entry->id, __entry->buf_len
59 )
60);
61
62DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
63 TP_PROTO(u16 id, void *buf, u16 buf_len),
64 TP_ARGS(id, buf, buf_len)
65);
66
67DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
68 TP_PROTO(u16 id, void *buf, u16 buf_len),
69 TP_ARGS(id, buf, buf_len)
70);
71
72#define WIL6210_MSG_MAX (200)
73
74DECLARE_EVENT_CLASS(wil6210_log_event,
75 TP_PROTO(struct va_format *vaf),
76 TP_ARGS(vaf),
77 TP_STRUCT__entry(
78 __dynamic_array(char, msg, WIL6210_MSG_MAX)
79 ),
80 TP_fast_assign(
81 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
82 WIL6210_MSG_MAX,
83 vaf->fmt,
84 *vaf->va) >= WIL6210_MSG_MAX);
85 ),
86 TP_printk("%s", __get_str(msg))
87);
88
89DEFINE_EVENT(wil6210_log_event, wil6210_log_err,
90 TP_PROTO(struct va_format *vaf),
91 TP_ARGS(vaf)
92);
93
94DEFINE_EVENT(wil6210_log_event, wil6210_log_info,
95 TP_PROTO(struct va_format *vaf),
96 TP_ARGS(vaf)
97);
98
99DEFINE_EVENT(wil6210_log_event, wil6210_log_dbg,
100 TP_PROTO(struct va_format *vaf),
101 TP_ARGS(vaf)
102);
103
104#define wil_pseudo_irq_cause(x) __print_flags(x, "|", \
105 {BIT_DMA_PSEUDO_CAUSE_RX, "Rx" }, \
106 {BIT_DMA_PSEUDO_CAUSE_TX, "Tx" }, \
107 {BIT_DMA_PSEUDO_CAUSE_MISC, "Misc" })
108
109TRACE_EVENT(wil6210_irq_pseudo,
110 TP_PROTO(u32 x),
111 TP_ARGS(x),
112 TP_STRUCT__entry(
113 __field(u32, x)
114 ),
115 TP_fast_assign(
116 __entry->x = x;
117 ),
118 TP_printk("cause 0x%08x : %s", __entry->x,
119 wil_pseudo_irq_cause(__entry->x))
120);
121
122DECLARE_EVENT_CLASS(wil6210_irq,
123 TP_PROTO(u32 x),
124 TP_ARGS(x),
125 TP_STRUCT__entry(
126 __field(u32, x)
127 ),
128 TP_fast_assign(
129 __entry->x = x;
130 ),
131 TP_printk("cause 0x%08x", __entry->x)
132);
133
134DEFINE_EVENT(wil6210_irq, wil6210_irq_rx,
135 TP_PROTO(u32 x),
136 TP_ARGS(x)
137);
138
139DEFINE_EVENT(wil6210_irq, wil6210_irq_tx,
140 TP_PROTO(u32 x),
141 TP_ARGS(x)
142);
143
144DEFINE_EVENT(wil6210_irq, wil6210_irq_misc,
145 TP_PROTO(u32 x),
146 TP_ARGS(x)
147);
148
149DEFINE_EVENT(wil6210_irq, wil6210_irq_misc_thread,
150 TP_PROTO(u32 x),
151 TP_ARGS(x)
152);
153
154TRACE_EVENT(wil6210_rx,
155 TP_PROTO(u16 index, struct vring_rx_desc *d),
156 TP_ARGS(index, d),
157 TP_STRUCT__entry(
158 __field(u16, index)
159 __field(unsigned int, len)
160 __field(u8, mid)
161 __field(u8, cid)
162 __field(u8, tid)
163 __field(u8, type)
164 __field(u8, subtype)
165 __field(u16, seq)
166 __field(u8, mcs)
167 ),
168 TP_fast_assign(
169 __entry->index = index;
170 __entry->len = d->dma.length;
171 __entry->mid = wil_rxdesc_mid(d);
172 __entry->cid = wil_rxdesc_cid(d);
173 __entry->tid = wil_rxdesc_tid(d);
174 __entry->type = wil_rxdesc_ftype(d);
175 __entry->subtype = wil_rxdesc_subtype(d);
176 __entry->seq = wil_rxdesc_seq(d);
177 __entry->mcs = wil_rxdesc_mcs(d);
178 ),
179 TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x"
180 " type 0x%1x subtype 0x%1x", __entry->index, __entry->len,
181 __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
182 __entry->seq, __entry->type, __entry->subtype)
183);
184
185TRACE_EVENT(wil6210_tx,
186 TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
187 TP_ARGS(vring, index, len, frags),
188 TP_STRUCT__entry(
189 __field(u8, vring)
190 __field(u8, frags)
191 __field(u16, index)
192 __field(unsigned int, len)
193 ),
194 TP_fast_assign(
195 __entry->vring = vring;
196 __entry->frags = frags;
197 __entry->index = index;
198 __entry->len = len;
199 ),
200 TP_printk("vring %d index %d len %d frags %d",
201 __entry->vring, __entry->index, __entry->len, __entry->frags)
202);
203
204TRACE_EVENT(wil6210_tx_done,
205 TP_PROTO(u8 vring, u16 index, unsigned int len, u8 err),
206 TP_ARGS(vring, index, len, err),
207 TP_STRUCT__entry(
208 __field(u8, vring)
209 __field(u8, err)
210 __field(u16, index)
211 __field(unsigned int, len)
212 ),
213 TP_fast_assign(
214 __entry->vring = vring;
215 __entry->index = index;
216 __entry->len = len;
217 __entry->err = err;
218 ),
219 TP_printk("vring %d index %d len %d err 0x%02x",
220 __entry->vring, __entry->index, __entry->len,
221 __entry->err)
222);
223
224#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
225
226#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
227/* we don't want to use include/trace/events */
228#undef TRACE_INCLUDE_PATH
229#define TRACE_INCLUDE_PATH .
230#undef TRACE_INCLUDE_FILE
231#define TRACE_INCLUDE_FILE trace
232
233/* This part must be outside protection */
234#include <trace/define_trace.h>
235#endif /* defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) */
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 797024507c71..d240b24e1ccf 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -22,6 +22,7 @@
22#include "wil6210.h" 22#include "wil6210.h"
23#include "wmi.h" 23#include "wmi.h"
24#include "txrx.h" 24#include "txrx.h"
25#include "trace.h"
25 26
26static bool rtap_include_phy_info; 27static bool rtap_include_phy_info;
27module_param(rtap_include_phy_info, bool, S_IRUGO); 28module_param(rtap_include_phy_info, bool, S_IRUGO);
@@ -89,8 +90,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
89 * we can use any 90 * we can use any
90 */ 91 */
91 for (i = 0; i < vring->size; i++) { 92 for (i = 0; i < vring->size; i++) {
92 volatile struct vring_tx_desc *d = &(vring->va[i].tx); 93 volatile struct vring_tx_desc *_d = &(vring->va[i].tx);
93 d->dma.status = TX_DMA_STATUS_DU; 94 _d->dma.status = TX_DMA_STATUS_DU;
94 } 95 }
95 96
96 wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size, 97 wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
@@ -106,30 +107,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
106 size_t sz = vring->size * sizeof(vring->va[0]); 107 size_t sz = vring->size * sizeof(vring->va[0]);
107 108
108 while (!wil_vring_is_empty(vring)) { 109 while (!wil_vring_is_empty(vring)) {
110 dma_addr_t pa;
111 struct sk_buff *skb;
112 u16 dmalen;
113
109 if (tx) { 114 if (tx) {
110 volatile struct vring_tx_desc *d = 115 struct vring_tx_desc dd, *d = &dd;
116 volatile struct vring_tx_desc *_d =
111 &vring->va[vring->swtail].tx; 117 &vring->va[vring->swtail].tx;
112 dma_addr_t pa = d->dma.addr_low | 118
113 ((u64)d->dma.addr_high << 32); 119 *d = *_d;
114 struct sk_buff *skb = vring->ctx[vring->swtail]; 120 pa = wil_desc_addr(&d->dma.addr);
121 dmalen = le16_to_cpu(d->dma.length);
122 skb = vring->ctx[vring->swtail];
115 if (skb) { 123 if (skb) {
116 dma_unmap_single(dev, pa, d->dma.length, 124 dma_unmap_single(dev, pa, dmalen,
117 DMA_TO_DEVICE); 125 DMA_TO_DEVICE);
118 dev_kfree_skb_any(skb); 126 dev_kfree_skb_any(skb);
119 vring->ctx[vring->swtail] = NULL; 127 vring->ctx[vring->swtail] = NULL;
120 } else { 128 } else {
121 dma_unmap_page(dev, pa, d->dma.length, 129 dma_unmap_page(dev, pa, dmalen,
122 DMA_TO_DEVICE); 130 DMA_TO_DEVICE);
123 } 131 }
124 vring->swtail = wil_vring_next_tail(vring); 132 vring->swtail = wil_vring_next_tail(vring);
125 } else { /* rx */ 133 } else { /* rx */
126 volatile struct vring_rx_desc *d = 134 struct vring_rx_desc dd, *d = &dd;
135 volatile struct vring_rx_desc *_d =
127 &vring->va[vring->swtail].rx; 136 &vring->va[vring->swtail].rx;
128 dma_addr_t pa = d->dma.addr_low | 137
129 ((u64)d->dma.addr_high << 32); 138 *d = *_d;
130 struct sk_buff *skb = vring->ctx[vring->swhead]; 139 pa = wil_desc_addr(&d->dma.addr);
131 dma_unmap_single(dev, pa, d->dma.length, 140 dmalen = le16_to_cpu(d->dma.length);
132 DMA_FROM_DEVICE); 141 skb = vring->ctx[vring->swhead];
142 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
133 kfree_skb(skb); 143 kfree_skb(skb);
134 wil_vring_advance_head(vring, 1); 144 wil_vring_advance_head(vring, 1);
135 } 145 }
@@ -151,7 +161,8 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
151{ 161{
152 struct device *dev = wil_to_dev(wil); 162 struct device *dev = wil_to_dev(wil);
153 unsigned int sz = RX_BUF_LEN; 163 unsigned int sz = RX_BUF_LEN;
154 volatile struct vring_rx_desc *d = &(vring->va[i].rx); 164 struct vring_rx_desc dd, *d = &dd;
165 volatile struct vring_rx_desc *_d = &(vring->va[i].rx);
155 dma_addr_t pa; 166 dma_addr_t pa;
156 167
157 /* TODO align */ 168 /* TODO align */
@@ -169,13 +180,13 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
169 } 180 }
170 181
171 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; 182 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
172 d->dma.addr_low = lower_32_bits(pa); 183 wil_desc_addr_set(&d->dma.addr, pa);
173 d->dma.addr_high = (u16)upper_32_bits(pa);
174 /* ip_length don't care */ 184 /* ip_length don't care */
175 /* b11 don't care */ 185 /* b11 don't care */
176 /* error don't care */ 186 /* error don't care */
177 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 187 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
178 d->dma.length = sz; 188 d->dma.length = cpu_to_le16(sz);
189 *_d = *d;
179 vring->ctx[i] = skb; 190 vring->ctx[i] = skb;
180 191
181 return 0; 192 return 0;
@@ -321,11 +332,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
321{ 332{
322 struct device *dev = wil_to_dev(wil); 333 struct device *dev = wil_to_dev(wil);
323 struct net_device *ndev = wil_to_ndev(wil); 334 struct net_device *ndev = wil_to_ndev(wil);
324 volatile struct vring_rx_desc *d; 335 volatile struct vring_rx_desc *_d;
325 struct vring_rx_desc *d1; 336 struct vring_rx_desc *d;
326 struct sk_buff *skb; 337 struct sk_buff *skb;
327 dma_addr_t pa; 338 dma_addr_t pa;
328 unsigned int sz = RX_BUF_LEN; 339 unsigned int sz = RX_BUF_LEN;
340 u16 dmalen;
329 u8 ftype; 341 u8 ftype;
330 u8 ds_bits; 342 u8 ds_bits;
331 343
@@ -334,32 +346,44 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
334 if (wil_vring_is_empty(vring)) 346 if (wil_vring_is_empty(vring))
335 return NULL; 347 return NULL;
336 348
337 d = &(vring->va[vring->swhead].rx); 349 _d = &(vring->va[vring->swhead].rx);
338 if (!(d->dma.status & RX_DMA_STATUS_DU)) { 350 if (!(_d->dma.status & RX_DMA_STATUS_DU)) {
339 /* it is not error, we just reached end of Rx done area */ 351 /* it is not error, we just reached end of Rx done area */
340 return NULL; 352 return NULL;
341 } 353 }
342 354
343 pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
344 skb = vring->ctx[vring->swhead]; 355 skb = vring->ctx[vring->swhead];
356 d = wil_skb_rxdesc(skb);
357 *d = *_d;
358 pa = wil_desc_addr(&d->dma.addr);
359 vring->ctx[vring->swhead] = NULL;
360 wil_vring_advance_head(vring, 1);
361
345 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 362 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
346 skb_trim(skb, d->dma.length); 363 dmalen = le16_to_cpu(d->dma.length);
364
365 trace_wil6210_rx(vring->swhead, d);
366 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen);
367 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
368 (const void *)d, sizeof(*d), false);
369
370 if (dmalen > sz) {
371 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
372 kfree_skb(skb);
373 return NULL;
374 }
375 skb_trim(skb, dmalen);
376
377 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
378 skb->data, skb_headlen(skb), false);
347 379
348 d1 = wil_skb_rxdesc(skb);
349 *d1 = *d;
350 380
351 wil->stats.last_mcs_rx = wil_rxdesc_mcs(d1); 381 wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
352 382
353 /* use radiotap header only if required */ 383 /* use radiotap header only if required */
354 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) 384 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
355 wil_rx_add_radiotap_header(wil, skb); 385 wil_rx_add_radiotap_header(wil, skb);
356 386
357 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
358 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
359 (const void *)d, sizeof(*d), false);
360
361 wil_vring_advance_head(vring, 1);
362
363 /* no extra checks if in sniffer mode */ 387 /* no extra checks if in sniffer mode */
364 if (ndev->type != ARPHRD_ETHER) 388 if (ndev->type != ARPHRD_ETHER)
365 return skb; 389 return skb;
@@ -368,7 +392,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
368 * Driver should recognize it by frame type, that is found 392 * Driver should recognize it by frame type, that is found
369 * in Rx descriptor. If type is not data, it is 802.11 frame as is 393 * in Rx descriptor. If type is not data, it is 802.11 frame as is
370 */ 394 */
371 ftype = wil_rxdesc_ftype(d1) << 2; 395 ftype = wil_rxdesc_ftype(d) << 2;
372 if (ftype != IEEE80211_FTYPE_DATA) { 396 if (ftype != IEEE80211_FTYPE_DATA) {
373 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); 397 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
374 /* TODO: process it */ 398 /* TODO: process it */
@@ -383,7 +407,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
383 return NULL; 407 return NULL;
384 } 408 }
385 409
386 ds_bits = wil_rxdesc_ds_bits(d1); 410 ds_bits = wil_rxdesc_ds_bits(d);
387 if (ds_bits == 1) { 411 if (ds_bits == 1) {
388 /* 412 /*
389 * HW bug - in ToDS mode, i.e. Rx on AP side, 413 * HW bug - in ToDS mode, i.e. Rx on AP side,
@@ -425,6 +449,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
425 449
426/* 450/*
427 * Pass Rx packet to the netif. Update statistics. 451 * Pass Rx packet to the netif. Update statistics.
452 * Called in softirq context (NAPI poll).
428 */ 453 */
429static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 454static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
430{ 455{
@@ -433,10 +458,7 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
433 458
434 skb_orphan(skb); 459 skb_orphan(skb);
435 460
436 if (in_interrupt()) 461 rc = netif_receive_skb(skb);
437 rc = netif_rx(skb);
438 else
439 rc = netif_rx_ni(skb);
440 462
441 if (likely(rc == NET_RX_SUCCESS)) { 463 if (likely(rc == NET_RX_SUCCESS)) {
442 ndev->stats.rx_packets++; 464 ndev->stats.rx_packets++;
@@ -450,9 +472,9 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
450/** 472/**
451 * Proceed all completed skb's from Rx VRING 473 * Proceed all completed skb's from Rx VRING
452 * 474 *
453 * Safe to call from IRQ 475 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
454 */ 476 */
455void wil_rx_handle(struct wil6210_priv *wil) 477void wil_rx_handle(struct wil6210_priv *wil, int *quota)
456{ 478{
457 struct net_device *ndev = wil_to_ndev(wil); 479 struct net_device *ndev = wil_to_ndev(wil);
458 struct vring *v = &wil->vring_rx; 480 struct vring *v = &wil->vring_rx;
@@ -463,9 +485,8 @@ void wil_rx_handle(struct wil6210_priv *wil)
463 return; 485 return;
464 } 486 }
465 wil_dbg_txrx(wil, "%s()\n", __func__); 487 wil_dbg_txrx(wil, "%s()\n", __func__);
466 while (NULL != (skb = wil_vring_reap_rx(wil, v))) { 488 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
467 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 489 (*quota)--;
468 skb->data, skb_headlen(skb), false);
469 490
470 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { 491 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
471 skb->dev = ndev; 492 skb->dev = ndev;
@@ -600,18 +621,17 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
600 return NULL; 621 return NULL;
601} 622}
602 623
603static int wil_tx_desc_map(volatile struct vring_tx_desc *d, 624static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
604 dma_addr_t pa, u32 len) 625 int vring_index)
605{ 626{
606 d->dma.addr_low = lower_32_bits(pa); 627 wil_desc_addr_set(&d->dma.addr, pa);
607 d->dma.addr_high = (u16)upper_32_bits(pa);
608 d->dma.ip_length = 0; 628 d->dma.ip_length = 0;
609 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ 629 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
610 d->dma.b11 = 0/*14 | BIT(7)*/; 630 d->dma.b11 = 0/*14 | BIT(7)*/;
611 d->dma.error = 0; 631 d->dma.error = 0;
612 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 632 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
613 d->dma.length = len; 633 d->dma.length = cpu_to_le16((u16)len);
614 d->dma.d0 = 0; 634 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
615 d->mac.d[0] = 0; 635 d->mac.d[0] = 0;
616 d->mac.d[1] = 0; 636 d->mac.d[1] = 0;
617 d->mac.d[2] = 0; 637 d->mac.d[2] = 0;
@@ -630,7 +650,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
630 struct sk_buff *skb) 650 struct sk_buff *skb)
631{ 651{
632 struct device *dev = wil_to_dev(wil); 652 struct device *dev = wil_to_dev(wil);
633 volatile struct vring_tx_desc *d; 653 struct vring_tx_desc dd, *d = &dd;
654 volatile struct vring_tx_desc *_d;
634 u32 swhead = vring->swhead; 655 u32 swhead = vring->swhead;
635 int avail = wil_vring_avail_tx(vring); 656 int avail = wil_vring_avail_tx(vring);
636 int nr_frags = skb_shinfo(skb)->nr_frags; 657 int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -648,7 +669,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
648 1 + nr_frags); 669 1 + nr_frags);
649 return -ENOMEM; 670 return -ENOMEM;
650 } 671 }
651 d = &(vring->va[i].tx); 672 _d = &(vring->va[i].tx);
652 673
653 /* FIXME FW can accept only unicast frames for the peer */ 674 /* FIXME FW can accept only unicast frames for the peer */
654 memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN); 675 memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
@@ -664,28 +685,32 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
664 if (unlikely(dma_mapping_error(dev, pa))) 685 if (unlikely(dma_mapping_error(dev, pa)))
665 return -EINVAL; 686 return -EINVAL;
666 /* 1-st segment */ 687 /* 1-st segment */
667 wil_tx_desc_map(d, pa, skb_headlen(skb)); 688 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
668 d->mac.d[2] |= ((nr_frags + 1) << 689 d->mac.d[2] |= ((nr_frags + 1) <<
669 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 690 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
691 if (nr_frags)
692 *_d = *d;
693
670 /* middle segments */ 694 /* middle segments */
671 for (f = 0; f < nr_frags; f++) { 695 for (f = 0; f < nr_frags; f++) {
672 const struct skb_frag_struct *frag = 696 const struct skb_frag_struct *frag =
673 &skb_shinfo(skb)->frags[f]; 697 &skb_shinfo(skb)->frags[f];
674 int len = skb_frag_size(frag); 698 int len = skb_frag_size(frag);
675 i = (swhead + f + 1) % vring->size; 699 i = (swhead + f + 1) % vring->size;
676 d = &(vring->va[i].tx); 700 _d = &(vring->va[i].tx);
677 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 701 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
678 DMA_TO_DEVICE); 702 DMA_TO_DEVICE);
679 if (unlikely(dma_mapping_error(dev, pa))) 703 if (unlikely(dma_mapping_error(dev, pa)))
680 goto dma_error; 704 goto dma_error;
681 wil_tx_desc_map(d, pa, len); 705 wil_tx_desc_map(d, pa, len, vring_index);
682 vring->ctx[i] = NULL; 706 vring->ctx[i] = NULL;
707 *_d = *d;
683 } 708 }
684 /* for the last seg only */ 709 /* for the last seg only */
685 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); 710 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
686 d->dma.d0 |= BIT(9); /* BUG: undocumented bit */ 711 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
687 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 712 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
688 d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS); 713 *_d = *d;
689 714
690 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4, 715 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
691 (const void *)d, sizeof(*d), false); 716 (const void *)d, sizeof(*d), false);
@@ -693,6 +718,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
693 /* advance swhead */ 718 /* advance swhead */
694 wil_vring_advance_head(vring, nr_frags + 1); 719 wil_vring_advance_head(vring, nr_frags + 1);
695 wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); 720 wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
721 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
696 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); 722 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
697 /* hold reference to skb 723 /* hold reference to skb
698 * to prevent skb release before accounting 724 * to prevent skb release before accounting
@@ -705,14 +731,18 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
705 /* unmap what we have mapped */ 731 /* unmap what we have mapped */
706 /* Note: increment @f to operate with positive index */ 732 /* Note: increment @f to operate with positive index */
707 for (f++; f > 0; f--) { 733 for (f++; f > 0; f--) {
734 u16 dmalen;
735
708 i = (swhead + f) % vring->size; 736 i = (swhead + f) % vring->size;
709 d = &(vring->va[i].tx); 737 _d = &(vring->va[i].tx);
710 d->dma.status = TX_DMA_STATUS_DU; 738 *d = *_d;
711 pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); 739 _d->dma.status = TX_DMA_STATUS_DU;
740 pa = wil_desc_addr(&d->dma.addr);
741 dmalen = le16_to_cpu(d->dma.length);
712 if (vring->ctx[i]) 742 if (vring->ctx[i])
713 dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE); 743 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
714 else 744 else
715 dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE); 745 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
716 } 746 }
717 747
718 return -EINVAL; 748 return -EINVAL;
@@ -738,18 +768,16 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
738 wil_err(wil, "Xmit in monitor mode not supported\n"); 768 wil_err(wil, "Xmit in monitor mode not supported\n");
739 goto drop; 769 goto drop;
740 } 770 }
741 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { 771
742 rc = wmi_tx_eapol(wil, skb); 772 /* find vring */
743 } else { 773 vring = wil_find_tx_vring(wil, skb);
744 /* find vring */ 774 if (!vring) {
745 vring = wil_find_tx_vring(wil, skb); 775 wil_err(wil, "No Tx VRING available\n");
746 if (!vring) { 776 goto drop;
747 wil_err(wil, "No Tx VRING available\n");
748 goto drop;
749 }
750 /* set up vring entry */
751 rc = wil_tx_vring(wil, vring, skb);
752 } 777 }
778 /* set up vring entry */
779 rc = wil_tx_vring(wil, vring, skb);
780
753 switch (rc) { 781 switch (rc) {
754 case 0: 782 case 0:
755 /* statistics will be updated on the tx_complete */ 783 /* statistics will be updated on the tx_complete */
@@ -761,7 +789,6 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
761 break; /* goto drop; */ 789 break; /* goto drop; */
762 } 790 }
763 drop: 791 drop:
764 netif_tx_stop_all_queues(ndev);
765 ndev->stats.tx_dropped++; 792 ndev->stats.tx_dropped++;
766 dev_kfree_skb_any(skb); 793 dev_kfree_skb_any(skb);
767 794
@@ -771,41 +798,48 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
771/** 798/**
772 * Clean up transmitted skb's from the Tx VRING 799 * Clean up transmitted skb's from the Tx VRING
773 * 800 *
801 * Return number of descriptors cleared
802 *
774 * Safe to call from IRQ 803 * Safe to call from IRQ
775 */ 804 */
776void wil_tx_complete(struct wil6210_priv *wil, int ringid) 805int wil_tx_complete(struct wil6210_priv *wil, int ringid)
777{ 806{
778 struct net_device *ndev = wil_to_ndev(wil); 807 struct net_device *ndev = wil_to_ndev(wil);
779 struct device *dev = wil_to_dev(wil); 808 struct device *dev = wil_to_dev(wil);
780 struct vring *vring = &wil->vring_tx[ringid]; 809 struct vring *vring = &wil->vring_tx[ringid];
810 int done = 0;
781 811
782 if (!vring->va) { 812 if (!vring->va) {
783 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); 813 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
784 return; 814 return 0;
785 } 815 }
786 816
787 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); 817 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
788 818
789 while (!wil_vring_is_empty(vring)) { 819 while (!wil_vring_is_empty(vring)) {
790 volatile struct vring_tx_desc *d1 = 820 volatile struct vring_tx_desc *_d =
791 &vring->va[vring->swtail].tx; 821 &vring->va[vring->swtail].tx;
792 struct vring_tx_desc dd, *d = &dd; 822 struct vring_tx_desc dd, *d = &dd;
793 dma_addr_t pa; 823 dma_addr_t pa;
794 struct sk_buff *skb; 824 struct sk_buff *skb;
825 u16 dmalen;
795 826
796 dd = *d1; 827 *d = *_d;
797 828
798 if (!(d->dma.status & TX_DMA_STATUS_DU)) 829 if (!(d->dma.status & TX_DMA_STATUS_DU))
799 break; 830 break;
800 831
832 dmalen = le16_to_cpu(d->dma.length);
833 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
834 d->dma.error);
801 wil_dbg_txrx(wil, 835 wil_dbg_txrx(wil,
802 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", 836 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
803 vring->swtail, d->dma.length, d->dma.status, 837 vring->swtail, dmalen, d->dma.status,
804 d->dma.error); 838 d->dma.error);
805 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, 839 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
806 (const void *)d, sizeof(*d), false); 840 (const void *)d, sizeof(*d), false);
807 841
808 pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); 842 pa = wil_desc_addr(&d->dma.addr);
809 skb = vring->ctx[vring->swtail]; 843 skb = vring->ctx[vring->swtail];
810 if (skb) { 844 if (skb) {
811 if (d->dma.error == 0) { 845 if (d->dma.error == 0) {
@@ -815,18 +849,21 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid)
815 ndev->stats.tx_errors++; 849 ndev->stats.tx_errors++;
816 } 850 }
817 851
818 dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE); 852 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
819 dev_kfree_skb_any(skb); 853 dev_kfree_skb_any(skb);
820 vring->ctx[vring->swtail] = NULL; 854 vring->ctx[vring->swtail] = NULL;
821 } else { 855 } else {
822 dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE); 856 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
823 } 857 }
824 d->dma.addr_low = 0; 858 d->dma.addr.addr_low = 0;
825 d->dma.addr_high = 0; 859 d->dma.addr.addr_high = 0;
826 d->dma.length = 0; 860 d->dma.length = 0;
827 d->dma.status = TX_DMA_STATUS_DU; 861 d->dma.status = TX_DMA_STATUS_DU;
828 vring->swtail = wil_vring_next_tail(vring); 862 vring->swtail = wil_vring_next_tail(vring);
863 done++;
829 } 864 }
830 if (wil_vring_avail_tx(vring) > vring->size/4) 865 if (wil_vring_avail_tx(vring) > vring->size/4)
831 netif_tx_wake_all_queues(wil_to_ndev(wil)); 866 netif_tx_wake_all_queues(wil_to_ndev(wil));
867
868 return done;
832} 869}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index adef12fb2aee..859aea68a1fa 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -27,6 +27,28 @@
27#define WIL6210_RTAP_SIZE (128) 27#define WIL6210_RTAP_SIZE (128)
28 28
29/* Tx/Rx path */ 29/* Tx/Rx path */
30
31/*
32 * Common representation of physical address in Vring
33 */
34struct vring_dma_addr {
35 __le32 addr_low;
36 __le16 addr_high;
37} __packed;
38
39static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
40{
41 return le32_to_cpu(addr->addr_low) |
42 ((u64)le16_to_cpu(addr->addr_high) << 32);
43}
44
45static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
46 dma_addr_t pa)
47{
48 addr->addr_low = cpu_to_le32(lower_32_bits(pa));
49 addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
50}
51
30/* 52/*
31 * Tx descriptor - MAC part 53 * Tx descriptor - MAC part
32 * [dword 0] 54 * [dword 0]
@@ -179,6 +201,10 @@ struct vring_tx_mac {
179#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1 201#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
180#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100 202#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
181 203
204#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS 9
205#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_LEN 1
206#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_MSK 0x200
207
182#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10 208#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
183#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1 209#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
184#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400 210#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
@@ -216,13 +242,12 @@ struct vring_tx_mac {
216 242
217struct vring_tx_dma { 243struct vring_tx_dma {
218 u32 d0; 244 u32 d0;
219 u32 addr_low; 245 struct vring_dma_addr addr;
220 u16 addr_high;
221 u8 ip_length; 246 u8 ip_length;
222 u8 b11; /* 0..6: mac_length; 7:ip_version */ 247 u8 b11; /* 0..6: mac_length; 7:ip_version */
223 u8 error; /* 0..2: err; 3..7: reserved; */ 248 u8 error; /* 0..2: err; 3..7: reserved; */
224 u8 status; /* 0: used; 1..7; reserved */ 249 u8 status; /* 0: used; 1..7; reserved */
225 u16 length; 250 __le16 length;
226} __packed; 251} __packed;
227 252
228/* 253/*
@@ -315,13 +340,12 @@ struct vring_rx_mac {
315 340
316struct vring_rx_dma { 341struct vring_rx_dma {
317 u32 d0; 342 u32 d0;
318 u32 addr_low; 343 struct vring_dma_addr addr;
319 u16 addr_high;
320 u8 ip_length; 344 u8 ip_length;
321 u8 b11; 345 u8 b11;
322 u8 error; 346 u8 error;
323 u8 status; 347 u8 status;
324 u16 length; 348 __le16 length;
325} __packed; 349} __packed;
326 350
327struct vring_tx_desc { 351struct vring_tx_desc {
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 8f76ecd8a7e5..44fdab51de7e 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -34,9 +34,11 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
34 34
35#define WIL6210_MEM_SIZE (2*1024*1024UL) 35#define WIL6210_MEM_SIZE (2*1024*1024UL)
36 36
37#define WIL6210_RX_RING_SIZE (128) 37#define WIL6210_RX_RING_SIZE (128)
38#define WIL6210_TX_RING_SIZE (128) 38#define WIL6210_TX_RING_SIZE (128)
39#define WIL6210_MAX_TX_RINGS (24) 39#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
40#define WIL6210_MAX_CID (8) /* HW limit */
41#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
40 42
41/* Hardware definitions begin */ 43/* Hardware definitions begin */
42 44
@@ -184,6 +186,7 @@ struct vring {
184 186
185enum { /* for wil6210_priv.status */ 187enum { /* for wil6210_priv.status */
186 wil_status_fwready = 0, 188 wil_status_fwready = 0,
189 wil_status_fwconnecting,
187 wil_status_fwconnected, 190 wil_status_fwconnected,
188 wil_status_dontscan, 191 wil_status_dontscan,
189 wil_status_reset_done, 192 wil_status_reset_done,
@@ -239,6 +242,8 @@ struct wil6210_priv {
239 * - consumed in thread by wmi_event_worker 242 * - consumed in thread by wmi_event_worker
240 */ 243 */
241 spinlock_t wmi_ev_lock; 244 spinlock_t wmi_ev_lock;
245 struct napi_struct napi_rx;
246 struct napi_struct napi_tx;
242 /* DMA related */ 247 /* DMA related */
243 struct vring vring_rx; 248 struct vring vring_rx;
244 struct vring vring_tx[WIL6210_MAX_TX_RINGS]; 249 struct vring vring_tx[WIL6210_MAX_TX_RINGS];
@@ -267,9 +272,13 @@ struct wil6210_priv {
267#define wil_to_ndev(i) (wil_to_wdev(i)->netdev) 272#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
268#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr)) 273#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
269 274
270#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg) 275int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
271#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg) 276int wil_err(struct wil6210_priv *wil, const char *fmt, ...);
272#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg) 277int wil_info(struct wil6210_priv *wil, const char *fmt, ...);
278#define wil_dbg(wil, fmt, arg...) do { \
279 netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
280 wil_dbg_trace(wil, fmt, ##arg); \
281} while (0)
273 282
274#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg) 283#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
275#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg) 284#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
@@ -320,7 +329,6 @@ int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
320int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid); 329int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
321int wmi_set_channel(struct wil6210_priv *wil, int channel); 330int wmi_set_channel(struct wil6210_priv *wil, int channel);
322int wmi_get_channel(struct wil6210_priv *wil, int *channel); 331int wmi_get_channel(struct wil6210_priv *wil, int *channel);
323int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
324int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, 332int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
325 const void *mac_addr); 333 const void *mac_addr);
326int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, 334int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
@@ -356,10 +364,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
356void wil_vring_fini_tx(struct wil6210_priv *wil, int id); 364void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
357 365
358netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); 366netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
359void wil_tx_complete(struct wil6210_priv *wil, int ringid); 367int wil_tx_complete(struct wil6210_priv *wil, int ringid);
368void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
360 369
361/* RX API */ 370/* RX API */
362void wil_rx_handle(struct wil6210_priv *wil); 371void wil_rx_handle(struct wil6210_priv *wil, int *quota);
372void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
363 373
364int wil_iftype_nl2wmi(enum nl80211_iftype type); 374int wil_iftype_nl2wmi(enum nl80211_iftype type);
365 375
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 45b04e383f9a..dc8059ad4bab 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -20,6 +20,7 @@
20#include "wil6210.h" 20#include "wil6210.h"
21#include "txrx.h" 21#include "txrx.h"
22#include "wmi.h" 22#include "wmi.h"
23#include "trace.h"
23 24
24/** 25/**
25 * WMI event receiving - theory of operations 26 * WMI event receiving - theory of operations
@@ -74,10 +75,11 @@ static const struct {
74 {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */ 75 {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
75 {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */ 76 {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
76 {0x880000, 0x88a000, 0x880000}, /* various RGF */ 77 {0x880000, 0x88a000, 0x880000}, /* various RGF */
77 {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */ 78 {0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */
78 /* 79 /*
79 * 920000..930000 ucode code RAM 80 * 920000..930000 ucode code RAM
80 * 930000..932000 ucode data RAM 81 * 930000..932000 ucode data RAM
82 * 932000..949000 back-door debug data
81 */ 83 */
82}; 84};
83 85
@@ -246,6 +248,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
246 iowrite32(r->head = next_head, wil->csr + HOST_MBOX + 248 iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
247 offsetof(struct wil6210_mbox_ctl, tx.head)); 249 offsetof(struct wil6210_mbox_ctl, tx.head));
248 250
251 trace_wil6210_wmi_cmd(cmdid, buf, len);
252
249 /* interrupt to FW */ 253 /* interrupt to FW */
250 iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); 254 iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
251 255
@@ -311,8 +315,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
311 315
312 wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n", 316 wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
313 data->info.channel, data->info.mcs, data->info.snr); 317 data->info.channel, data->info.mcs, data->info.snr);
314 wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len, 318 wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
315 le16_to_cpu(data->info.stype)); 319 le16_to_cpu(fc));
316 wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", 320 wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
317 data->info.qid, data->info.mid, data->info.cid); 321 data->info.qid, data->info.mid, data->info.cid);
318 322
@@ -406,7 +410,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
406 410
407 if ((wdev->iftype == NL80211_IFTYPE_STATION) || 411 if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
408 (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { 412 (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
409 if (wdev->sme_state != CFG80211_SME_CONNECTING) { 413 if (!test_bit(wil_status_fwconnecting, &wil->status)) {
410 wil_err(wil, "Not in connecting state\n"); 414 wil_err(wil, "Not in connecting state\n");
411 return; 415 return;
412 } 416 }
@@ -430,6 +434,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
430 434
431 cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); 435 cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
432 } 436 }
437 clear_bit(wil_status_fwconnecting, &wil->status);
433 set_bit(wil_status_fwconnected, &wil->status); 438 set_bit(wil_status_fwconnected, &wil->status);
434 439
435 /* FIXME FW can transmit only ucast frames to peer */ 440 /* FIXME FW can transmit only ucast frames to peer */
@@ -635,8 +640,9 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
635 hdr.flags); 640 hdr.flags);
636 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && 641 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
637 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 642 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
638 wil_dbg_wmi(wil, "WMI event 0x%04x\n", 643 u16 id = le16_to_cpu(evt->event.wmi.id);
639 evt->event.wmi.id); 644 wil_dbg_wmi(wil, "WMI event 0x%04x\n", id);
645 trace_wil6210_wmi_event(id, &evt->event.wmi, len);
640 } 646 }
641 wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, 647 wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
642 &evt->event.hdr, sizeof(hdr) + len, true); 648 &evt->event.hdr, sizeof(hdr) + len, true);
@@ -724,7 +730,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
724 .bcon_interval = cpu_to_le16(bi), 730 .bcon_interval = cpu_to_le16(bi),
725 .network_type = wmi_nettype, 731 .network_type = wmi_nettype,
726 .disable_sec_offload = 1, 732 .disable_sec_offload = 1,
727 .channel = chan, 733 .channel = chan - 1,
728 }; 734 };
729 struct { 735 struct {
730 struct wil6210_mbox_hdr_wmi wmi; 736 struct wil6210_mbox_hdr_wmi wmi;
@@ -734,8 +740,12 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
734 if (!wil->secure_pcp) 740 if (!wil->secure_pcp)
735 cmd.disable_sec = 1; 741 cmd.disable_sec = 1;
736 742
743 /*
744 * Processing time may be huge, in case of secure AP it takes about
745 * 3500ms for FW to start AP
746 */
737 rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd), 747 rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
738 WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100); 748 WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000);
739 if (rc) 749 if (rc)
740 return rc; 750 return rc;
741 751
@@ -829,40 +839,6 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
829 return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); 839 return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
830} 840}
831 841
832int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
833{
834 struct wmi_eapol_tx_cmd *cmd;
835 struct ethhdr *eth;
836 u16 eapol_len = skb->len - ETH_HLEN;
837 void *eapol = skb->data + ETH_HLEN;
838 uint i;
839 int rc;
840
841 skb_set_mac_header(skb, 0);
842 eth = eth_hdr(skb);
843 wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
844 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
845 if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
846 goto found_dest;
847 }
848
849 return -EINVAL;
850
851 found_dest:
852 /* find out eapol data & len */
853 cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
854 if (!cmd)
855 return -EINVAL;
856
857 memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
858 cmd->eapol_len = cpu_to_le16(eapol_len);
859 memcpy(cmd->eapol, eapol, eapol_len);
860 rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
861 kfree(cmd);
862
863 return rc;
864}
865
866int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, 842int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
867 const void *mac_addr) 843 const void *mac_addr)
868{ 844{
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 078e6f3477a9..51ff0b198d0a 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -28,18 +28,12 @@ config B43
28 28
29config B43_BCMA 29config B43_BCMA
30 bool "Support for BCMA bus" 30 bool "Support for BCMA bus"
31 depends on B43 && BCMA 31 depends on B43 && (BCMA = y || BCMA = B43)
32 default y
33
34config B43_BCMA_EXTRA
35 bool "Hardware support that overlaps with the brcmsmac driver"
36 depends on B43_BCMA
37 default n if BRCMSMAC
38 default y 32 default y
39 33
40config B43_SSB 34config B43_SSB
41 bool 35 bool
42 depends on B43 && SSB 36 depends on B43 && (SSB = y || SSB = B43)
43 default y 37 default y
44 38
45# Auto-select SSB PCI-HOST support, if possible 39# Auto-select SSB PCI-HOST support, if possible
@@ -111,6 +105,7 @@ config B43_PIO
111config B43_PHY_N 105config B43_PHY_N
112 bool "Support for 802.11n (N-PHY) devices" 106 bool "Support for 802.11n (N-PHY) devices"
113 depends on B43 107 depends on B43
108 default y
114 ---help--- 109 ---help---
115 Support for the N-PHY. 110 Support for the N-PHY.
116 111
@@ -132,6 +127,7 @@ config B43_PHY_LP
132config B43_PHY_HT 127config B43_PHY_HT
133 bool "Support for HT-PHY (high throughput) devices" 128 bool "Support for HT-PHY (high throughput) devices"
134 depends on B43 && B43_BCMA 129 depends on B43 && B43_BCMA
130 default y
135 ---help--- 131 ---help---
136 Support for the HT-PHY. 132 Support for the HT-PHY.
137 133
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a95b77ab360e..0e933bb71543 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -113,13 +113,15 @@ static int b43_modparam_pio = 0;
113module_param_named(pio, b43_modparam_pio, int, 0644); 113module_param_named(pio, b43_modparam_pio, int, 0644);
114MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO"); 114MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
115 115
116static int modparam_allhwsupport = !IS_ENABLED(CONFIG_BRCMSMAC);
117module_param_named(allhwsupport, modparam_allhwsupport, int, 0444);
118MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the brcmsmac driver)");
119
116#ifdef CONFIG_B43_BCMA 120#ifdef CONFIG_B43_BCMA
117static const struct bcma_device_id b43_bcma_tbl[] = { 121static const struct bcma_device_id b43_bcma_tbl[] = {
118 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), 122 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
119#ifdef CONFIG_B43_BCMA_EXTRA
120 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), 123 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
121 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), 124 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
122#endif
123 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS), 125 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
124 BCMA_CORETABLE_END 126 BCMA_CORETABLE_END
125}; 127};
@@ -5396,6 +5398,12 @@ static int b43_bcma_probe(struct bcma_device *core)
5396 struct b43_wl *wl; 5398 struct b43_wl *wl;
5397 int err; 5399 int err;
5398 5400
5401 if (!modparam_allhwsupport &&
5402 (core->id.rev == 0x17 || core->id.rev == 0x18)) {
5403 pr_err("Support for cores revisions 0x17 and 0x18 disabled by module param allhwsupport=0. Try b43.allhwsupport=1\n");
5404 return -ENOTSUPP;
5405 }
5406
5399 dev = b43_bus_dev_bcma_init(core); 5407 dev = b43_bus_dev_bcma_init(core);
5400 if (!dev) 5408 if (!dev)
5401 return -ENODEV; 5409 return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 4891e3df2058..e3f3c48f86d4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -22,9 +22,11 @@
22#include <linux/pci_ids.h> 22#include <linux/pci_ids.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/completion.h> 24#include <linux/completion.h>
25#include <linux/scatterlist.h>
25#include <linux/mmc/sdio.h> 26#include <linux/mmc/sdio.h>
26#include <linux/mmc/sdio_func.h> 27#include <linux/mmc/sdio_func.h>
27#include <linux/mmc/card.h> 28#include <linux/mmc/card.h>
29#include <linux/mmc/host.h>
28#include <linux/platform_data/brcmfmac-sdio.h> 30#include <linux/platform_data/brcmfmac-sdio.h>
29 31
30#include <defs.h> 32#include <defs.h>
@@ -160,7 +162,7 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
160 return 0; 162 return 0;
161} 163}
162 164
163int 165static int
164brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address) 166brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
165{ 167{
166 int err = 0, i; 168 int err = 0, i;
@@ -191,12 +193,33 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
191 return err; 193 return err;
192} 194}
193 195
196static int
197brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
198{
199 uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
200 int err = 0;
201
202 if (bar0 != sdiodev->sbwad) {
203 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
204 if (err)
205 return err;
206
207 sdiodev->sbwad = bar0;
208 }
209
210 *addr &= SBSDIO_SB_OFT_ADDR_MASK;
211
212 if (width == 4)
213 *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
214
215 return 0;
216}
217
194int 218int
195brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, 219brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
196 void *data, bool write) 220 void *data, bool write)
197{ 221{
198 u8 func_num, reg_size; 222 u8 func_num, reg_size;
199 u32 bar;
200 s32 retry = 0; 223 s32 retry = 0;
201 int ret; 224 int ret;
202 225
@@ -216,18 +239,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
216 func_num = SDIO_FUNC_1; 239 func_num = SDIO_FUNC_1;
217 reg_size = 4; 240 reg_size = 4;
218 241
219 /* Set the window for SB core register */ 242 brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
220 bar = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
221 if (bar != sdiodev->sbwad) {
222 ret = brcmf_sdcard_set_sbaddr_window(sdiodev, bar);
223 if (ret != 0) {
224 memset(data, 0xFF, reg_size);
225 return ret;
226 }
227 sdiodev->sbwad = bar;
228 }
229 addr &= SBSDIO_SB_OFT_ADDR_MASK;
230 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
231 } 243 }
232 244
233 do { 245 do {
@@ -303,30 +315,207 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
303 *ret = retval; 315 *ret = retval;
304} 316}
305 317
306static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn, 318/**
307 uint flags, uint width, u32 *addr) 319 * brcmf_sdio_buffrw - SDIO interface function for block data access
320 * @sdiodev: brcmfmac sdio device
321 * @fn: SDIO function number
322 * @write: direction flag
323 * @addr: dongle memory address as source/destination
324 * @pkt: skb pointer
325 *
326 * This function takes the respbonsibility as the interface function to MMC
327 * stack for block data access. It assumes that the skb passed down by the
328 * caller has already been padded and aligned.
329 */
330static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
331 bool write, u32 addr, struct sk_buff_head *pktlist)
308{ 332{
309 uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK; 333 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
310 int err = 0; 334 unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
335 unsigned short max_seg_sz, seg_sz;
336 unsigned char *pkt_data, *orig_data, *dst_data;
337 struct sk_buff *pkt_next = NULL, *local_pkt_next;
338 struct sk_buff_head local_list, *target_list;
339 struct mmc_request mmc_req;
340 struct mmc_command mmc_cmd;
341 struct mmc_data mmc_dat;
342 struct sg_table st;
343 struct scatterlist *sgl;
344 struct mmc_host *host;
345 int ret = 0;
311 346
312 /* Async not implemented yet */ 347 if (!pktlist->qlen)
313 if (flags & SDIO_REQ_ASYNC) 348 return -EINVAL;
314 return -ENOTSUPP;
315 349
316 if (bar0 != sdiodev->sbwad) { 350 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
317 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0); 351 if (brcmf_pm_resume_error(sdiodev))
318 if (err) 352 return -EIO;
319 return err;
320 353
321 sdiodev->sbwad = bar0; 354 /* Single skb use the standard mmc interface */
355 if (pktlist->qlen == 1) {
356 pkt_next = pktlist->next;
357 req_sz = pkt_next->len + 3;
358 req_sz &= (uint)~3;
359
360 if (write)
361 return sdio_memcpy_toio(sdiodev->func[fn], addr,
362 ((u8 *)(pkt_next->data)),
363 req_sz);
364 else if (fn == 1)
365 return sdio_memcpy_fromio(sdiodev->func[fn],
366 ((u8 *)(pkt_next->data)),
367 addr, req_sz);
368 else
369 /* function 2 read is FIFO operation */
370 return sdio_readsb(sdiodev->func[fn],
371 ((u8 *)(pkt_next->data)), addr,
372 req_sz);
322 } 373 }
323 374
324 *addr &= SBSDIO_SB_OFT_ADDR_MASK; 375 target_list = pktlist;
376 /* for host with broken sg support, prepare a page aligned list */
377 __skb_queue_head_init(&local_list);
378 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
379 req_sz = 0;
380 skb_queue_walk(pktlist, pkt_next)
381 req_sz += pkt_next->len;
382 req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
383 while (req_sz > PAGE_SIZE) {
384 pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
385 if (pkt_next == NULL) {
386 ret = -ENOMEM;
387 goto exit;
388 }
389 __skb_queue_tail(&local_list, pkt_next);
390 req_sz -= PAGE_SIZE;
391 }
392 pkt_next = brcmu_pkt_buf_get_skb(req_sz);
393 if (pkt_next == NULL) {
394 ret = -ENOMEM;
395 goto exit;
396 }
397 __skb_queue_tail(&local_list, pkt_next);
398 target_list = &local_list;
399 }
325 400
326 if (width == 4) 401 host = sdiodev->func[fn]->card->host;
327 *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 402 func_blk_sz = sdiodev->func[fn]->cur_blksize;
403 /* Blocks per command is limited by host count, host transfer
404 * size and the maximum for IO_RW_EXTENDED of 511 blocks.
405 */
406 max_blks = min_t(unsigned int, host->max_blk_count, 511u);
407 max_req_sz = min_t(unsigned int, host->max_req_size,
408 max_blks * func_blk_sz);
409 max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
410 max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
411 seg_sz = target_list->qlen;
412 pkt_offset = 0;
413 pkt_next = target_list->next;
414
415 if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
416 ret = -ENOMEM;
417 goto exit;
418 }
328 419
329 return 0; 420 while (seg_sz) {
421 req_sz = 0;
422 sg_cnt = 0;
423 memset(&mmc_req, 0, sizeof(struct mmc_request));
424 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
425 memset(&mmc_dat, 0, sizeof(struct mmc_data));
426 sgl = st.sgl;
427 /* prep sg table */
428 while (pkt_next != (struct sk_buff *)target_list) {
429 pkt_data = pkt_next->data + pkt_offset;
430 sg_data_sz = pkt_next->len - pkt_offset;
431 if (sg_data_sz > host->max_seg_size)
432 sg_data_sz = host->max_seg_size;
433 if (sg_data_sz > max_req_sz - req_sz)
434 sg_data_sz = max_req_sz - req_sz;
435
436 sg_set_buf(sgl, pkt_data, sg_data_sz);
437
438 sg_cnt++;
439 sgl = sg_next(sgl);
440 req_sz += sg_data_sz;
441 pkt_offset += sg_data_sz;
442 if (pkt_offset == pkt_next->len) {
443 pkt_offset = 0;
444 pkt_next = pkt_next->next;
445 }
446
447 if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
448 break;
449 }
450 seg_sz -= sg_cnt;
451
452 if (req_sz % func_blk_sz != 0) {
453 brcmf_err("sg request length %u is not %u aligned\n",
454 req_sz, func_blk_sz);
455 ret = -ENOTBLK;
456 goto exit;
457 }
458 mmc_dat.sg = st.sgl;
459 mmc_dat.sg_len = sg_cnt;
460 mmc_dat.blksz = func_blk_sz;
461 mmc_dat.blocks = req_sz / func_blk_sz;
462 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
463 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
464 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
465 mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
466 mmc_cmd.arg |= 1<<27; /* block mode */
467 /* incrementing addr for function 1 */
468 mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
469 mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
470 mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
471 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
472 mmc_req.cmd = &mmc_cmd;
473 mmc_req.data = &mmc_dat;
474 if (fn == 1)
475 addr += req_sz;
476
477 mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
478 mmc_wait_for_req(host, &mmc_req);
479
480 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
481 if (ret != 0) {
482 brcmf_err("CMD53 sg block %s failed %d\n",
483 write ? "write" : "read", ret);
484 ret = -EIO;
485 break;
486 }
487 }
488
489 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
490 local_pkt_next = local_list.next;
491 orig_offset = 0;
492 skb_queue_walk(pktlist, pkt_next) {
493 dst_offset = 0;
494 do {
495 req_sz = local_pkt_next->len - orig_offset;
496 req_sz = min_t(uint, pkt_next->len - dst_offset,
497 req_sz);
498 orig_data = local_pkt_next->data + orig_offset;
499 dst_data = pkt_next->data + dst_offset;
500 memcpy(dst_data, orig_data, req_sz);
501 orig_offset += req_sz;
502 dst_offset += req_sz;
503 if (orig_offset == local_pkt_next->len) {
504 orig_offset = 0;
505 local_pkt_next = local_pkt_next->next;
506 }
507 if (dst_offset == pkt_next->len)
508 break;
509 } while (!skb_queue_empty(&local_list));
510 }
511 }
512
513exit:
514 sg_free_table(&st);
515 while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
516 brcmu_pkt_buf_free_skb(pkt_next);
517
518 return ret;
330} 519}
331 520
332int 521int
@@ -355,21 +544,22 @@ int
355brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 544brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
356 uint flags, struct sk_buff *pkt) 545 uint flags, struct sk_buff *pkt)
357{ 546{
358 uint incr_fix;
359 uint width; 547 uint width;
360 int err = 0; 548 int err = 0;
549 struct sk_buff_head pkt_list;
361 550
362 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", 551 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
363 fn, addr, pkt->len); 552 fn, addr, pkt->len);
364 553
365 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 554 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
366 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); 555 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
367 if (err) 556 if (err)
368 goto done; 557 goto done;
369 558
370 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; 559 skb_queue_head_init(&pkt_list);
371 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, 560 skb_queue_tail(&pkt_list, pkt);
372 fn, addr, pkt); 561 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
562 skb_dequeue_tail(&pkt_list);
373 563
374done: 564done:
375 return err; 565 return err;
@@ -386,13 +576,12 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
386 fn, addr, pktq->qlen); 576 fn, addr, pktq->qlen);
387 577
388 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 578 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
389 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); 579 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
390 if (err) 580 if (err)
391 goto done; 581 goto done;
392 582
393 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; 583 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
394 err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr, 584 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
395 pktq);
396 585
397done: 586done:
398 return err; 587 return err;
@@ -424,37 +613,21 @@ int
424brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 613brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
425 uint flags, struct sk_buff *pkt) 614 uint flags, struct sk_buff *pkt)
426{ 615{
427 uint incr_fix;
428 uint width; 616 uint width;
429 uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
430 int err = 0; 617 int err = 0;
618 struct sk_buff_head pkt_list;
431 619
432 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", 620 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
433 fn, addr, pkt->len); 621 fn, addr, pkt->len);
434 622
435 /* Async not implemented yet */
436 if (flags & SDIO_REQ_ASYNC)
437 return -ENOTSUPP;
438
439 if (bar0 != sdiodev->sbwad) {
440 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
441 if (err)
442 goto done;
443
444 sdiodev->sbwad = bar0;
445 }
446
447 addr &= SBSDIO_SB_OFT_ADDR_MASK;
448
449 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
450 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 623 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
451 if (width == 4) 624 brcmf_sdio_addrprep(sdiodev, width, &addr);
452 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
453 625
454 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn, 626 skb_queue_head_init(&pkt_list);
455 addr, pkt); 627 skb_queue_tail(&pkt_list, pkt);
628 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
629 skb_dequeue_tail(&pkt_list);
456 630
457done:
458 return err; 631 return err;
459} 632}
460 633
@@ -466,6 +639,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
466 struct sk_buff *pkt; 639 struct sk_buff *pkt;
467 u32 sdaddr; 640 u32 sdaddr;
468 uint dsize; 641 uint dsize;
642 struct sk_buff_head pkt_list;
469 643
470 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); 644 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
471 pkt = dev_alloc_skb(dsize); 645 pkt = dev_alloc_skb(dsize);
@@ -474,6 +648,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
474 return -EIO; 648 return -EIO;
475 } 649 }
476 pkt->priority = 0; 650 pkt->priority = 0;
651 skb_queue_head_init(&pkt_list);
477 652
478 /* Determine initial transfer parameters */ 653 /* Determine initial transfer parameters */
479 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; 654 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -501,9 +676,10 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
501 skb_put(pkt, dsize); 676 skb_put(pkt, dsize);
502 if (write) 677 if (write)
503 memcpy(pkt->data, data, dsize); 678 memcpy(pkt->data, data, dsize);
504 bcmerror = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, 679 skb_queue_tail(&pkt_list, pkt);
505 write, SDIO_FUNC_1, 680 bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
506 sdaddr, pkt); 681 sdaddr, &pkt_list);
682 skb_dequeue_tail(&pkt_list);
507 if (bcmerror) { 683 if (bcmerror) {
508 brcmf_err("membytes transfer failed\n"); 684 brcmf_err("membytes transfer failed\n");
509 break; 685 break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 44fa0cdbf97b..289e386f01f6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -66,7 +66,7 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
66static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata; 66static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
67 67
68 68
69static bool 69bool
70brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev) 70brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
71{ 71{
72 bool is_err = false; 72 bool is_err = false;
@@ -76,7 +76,7 @@ brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
76 return is_err; 76 return is_err;
77} 77}
78 78
79static void 79void
80brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq) 80brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
81{ 81{
82#ifdef CONFIG_PM_SLEEP 82#ifdef CONFIG_PM_SLEEP
@@ -211,115 +211,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
211 return err_ret; 211 return err_ret;
212} 212}
213 213
214/* precondition: host controller is claimed */
215static int
216brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
217 uint func, uint addr, struct sk_buff *pkt, uint pktlen)
218{
219 int err_ret = 0;
220
221 if ((write) && (!fifo)) {
222 err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
223 ((u8 *) (pkt->data)), pktlen);
224 } else if (write) {
225 err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
226 ((u8 *) (pkt->data)), pktlen);
227 } else if (fifo) {
228 err_ret = sdio_readsb(sdiodev->func[func],
229 ((u8 *) (pkt->data)), addr, pktlen);
230 } else {
231 err_ret = sdio_memcpy_fromio(sdiodev->func[func],
232 ((u8 *) (pkt->data)),
233 addr, pktlen);
234 }
235
236 return err_ret;
237}
238
239/*
240 * This function takes a queue of packets. The packets on the queue
241 * are assumed to be properly aligned by the caller.
242 */
243int
244brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
245 uint write, uint func, uint addr,
246 struct sk_buff_head *pktq)
247{
248 bool fifo = (fix_inc == SDIOH_DATA_FIX);
249 u32 SGCount = 0;
250 int err_ret = 0;
251
252 struct sk_buff *pkt;
253
254 brcmf_dbg(SDIO, "Enter\n");
255
256 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
257 if (brcmf_pm_resume_error(sdiodev))
258 return -EIO;
259
260 skb_queue_walk(pktq, pkt) {
261 uint pkt_len = pkt->len;
262 pkt_len += 3;
263 pkt_len &= 0xFFFFFFFC;
264
265 err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
266 addr, pkt, pkt_len);
267 if (err_ret) {
268 brcmf_err("%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
269 write ? "TX" : "RX", pkt, SGCount, addr,
270 pkt_len, err_ret);
271 } else {
272 brcmf_dbg(SDIO, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
273 write ? "TX" : "RX", pkt, SGCount, addr,
274 pkt_len);
275 }
276 if (!fifo)
277 addr += pkt_len;
278
279 SGCount++;
280 }
281
282 brcmf_dbg(SDIO, "Exit\n");
283 return err_ret;
284}
285
286/*
287 * This function takes a single DMA-able packet.
288 */
289int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
290 uint fix_inc, uint write, uint func, uint addr,
291 struct sk_buff *pkt)
292{
293 int status;
294 uint pkt_len;
295 bool fifo = (fix_inc == SDIOH_DATA_FIX);
296
297 brcmf_dbg(SDIO, "Enter\n");
298
299 if (pkt == NULL)
300 return -EINVAL;
301 pkt_len = pkt->len;
302
303 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
304 if (brcmf_pm_resume_error(sdiodev))
305 return -EIO;
306
307 pkt_len += 3;
308 pkt_len &= (uint)~3;
309
310 status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
311 addr, pkt, pkt_len);
312 if (status) {
313 brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
314 write ? "TX" : "RX", pkt, addr, pkt_len, status);
315 } else {
316 brcmf_dbg(SDIO, "%s xfr'd %p, addr=0x%05x, len=%d\n",
317 write ? "TX" : "RX", pkt, addr, pkt_len);
318 }
319
320 return status;
321}
322
323static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr) 214static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
324{ 215{
325 /* read 24 bits and return valid 17 bit addr */ 216 /* read 24 bits and return valid 17 bit addr */
@@ -468,7 +359,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
468 atomic_set(&sdiodev->suspend, false); 359 atomic_set(&sdiodev->suspend, false);
469 init_waitqueue_head(&sdiodev->request_byte_wait); 360 init_waitqueue_head(&sdiodev->request_byte_wait);
470 init_waitqueue_head(&sdiodev->request_word_wait); 361 init_waitqueue_head(&sdiodev->request_word_wait);
471 init_waitqueue_head(&sdiodev->request_chain_wait);
472 init_waitqueue_head(&sdiodev->request_buffer_wait); 362 init_waitqueue_head(&sdiodev->request_buffer_wait);
473 363
474 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n"); 364 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");
@@ -606,7 +496,8 @@ static int brcmf_sdio_pd_remove(struct platform_device *pdev)
606static struct platform_driver brcmf_sdio_pd = { 496static struct platform_driver brcmf_sdio_pd = {
607 .remove = brcmf_sdio_pd_remove, 497 .remove = brcmf_sdio_pd_remove,
608 .driver = { 498 .driver = {
609 .name = BRCMFMAC_SDIO_PDATA_NAME 499 .name = BRCMFMAC_SDIO_PDATA_NAME,
500 .owner = THIS_MODULE,
610 } 501 }
611}; 502};
612 503
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 28db9cf39672..86cbfe2c7c6c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -583,6 +583,7 @@ enum brcmf_netif_stop_reason {
583 * @bssidx: index of bss associated with this interface. 583 * @bssidx: index of bss associated with this interface.
584 * @mac_addr: assigned mac address. 584 * @mac_addr: assigned mac address.
585 * @netif_stop: bitmap indicates reason why netif queues are stopped. 585 * @netif_stop: bitmap indicates reason why netif queues are stopped.
586 * @netif_stop_lock: spinlock for update netif_stop from multiple sources.
586 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. 587 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
587 * @pend_8021x_wait: used for signalling change in count. 588 * @pend_8021x_wait: used for signalling change in count.
588 */ 589 */
@@ -598,6 +599,7 @@ struct brcmf_if {
598 s32 bssidx; 599 s32 bssidx;
599 u8 mac_addr[ETH_ALEN]; 600 u8 mac_addr[ETH_ALEN];
600 u8 netif_stop; 601 u8 netif_stop;
602 spinlock_t netif_stop_lock;
601 atomic_t pend_8021x_cnt; 603 atomic_t pend_8021x_cnt;
602 wait_queue_head_t pend_8021x_wait; 604 wait_queue_head_t pend_8021x_wait;
603}; 605};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index 59c77aa3b959..dd85401063cb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -30,6 +30,7 @@
30#include "dhd_bus.h" 30#include "dhd_bus.h"
31#include "fwsignal.h" 31#include "fwsignal.h"
32#include "dhd_dbg.h" 32#include "dhd_dbg.h"
33#include "tracepoint.h"
33 34
34struct brcmf_proto_cdc_dcmd { 35struct brcmf_proto_cdc_dcmd {
35 __le32 cmd; /* dongle command value */ 36 __le32 cmd; /* dongle command value */
@@ -292,6 +293,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
292 h->flags2 = 0; 293 h->flags2 = 0;
293 h->data_offset = offset; 294 h->data_offset = offset;
294 BDC_SET_IF_IDX(h, ifidx); 295 BDC_SET_IF_IDX(h, ifidx);
296 trace_brcmf_bdchdr(pktbuf->data);
295} 297}
296 298
297int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, 299int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
@@ -309,6 +311,7 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
309 return -EBADE; 311 return -EBADE;
310 } 312 }
311 313
314 trace_brcmf_bdchdr(pktbuf->data);
312 h = (struct brcmf_proto_bdc_header *)(pktbuf->data); 315 h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
313 316
314 *ifidx = BDC_GET_IF_IDX(h); 317 *ifidx = BDC_GET_IF_IDX(h);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 202869cd0932..c37b9d68e458 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -156,8 +156,11 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
156 "txs_suppr_core: %u\n" 156 "txs_suppr_core: %u\n"
157 "txs_suppr_ps: %u\n" 157 "txs_suppr_ps: %u\n"
158 "txs_tossed: %u\n" 158 "txs_tossed: %u\n"
159 "txs_host_tossed: %u\n"
160 "bus_flow_block: %u\n"
161 "fws_flow_block: %u\n"
159 "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n" 162 "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n"
160 "fifo_credits_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n", 163 "requested_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
161 fwstats->header_pulls, 164 fwstats->header_pulls,
162 fwstats->header_only_pkt, 165 fwstats->header_only_pkt,
163 fwstats->tlv_parse_failed, 166 fwstats->tlv_parse_failed,
@@ -176,14 +179,17 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
176 fwstats->txs_supp_core, 179 fwstats->txs_supp_core,
177 fwstats->txs_supp_ps, 180 fwstats->txs_supp_ps,
178 fwstats->txs_tossed, 181 fwstats->txs_tossed,
182 fwstats->txs_host_tossed,
183 fwstats->bus_flow_block,
184 fwstats->fws_flow_block,
179 fwstats->send_pkts[0], fwstats->send_pkts[1], 185 fwstats->send_pkts[0], fwstats->send_pkts[1],
180 fwstats->send_pkts[2], fwstats->send_pkts[3], 186 fwstats->send_pkts[2], fwstats->send_pkts[3],
181 fwstats->send_pkts[4], 187 fwstats->send_pkts[4],
182 fwstats->fifo_credits_sent[0], 188 fwstats->requested_sent[0],
183 fwstats->fifo_credits_sent[1], 189 fwstats->requested_sent[1],
184 fwstats->fifo_credits_sent[2], 190 fwstats->requested_sent[2],
185 fwstats->fifo_credits_sent[3], 191 fwstats->requested_sent[3],
186 fwstats->fifo_credits_sent[4]); 192 fwstats->requested_sent[4]);
187 193
188 return simple_read_from_buffer(data, count, ppos, buf, res); 194 return simple_read_from_buffer(data, count, ppos, buf, res);
189} 195}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 009c87bfd9ae..0af1f5dc583a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -141,8 +141,7 @@ struct brcmf_fws_stats {
141 u32 header_pulls; 141 u32 header_pulls;
142 u32 pkt2bus; 142 u32 pkt2bus;
143 u32 send_pkts[5]; 143 u32 send_pkts[5];
144 u32 fifo_credits_sent[5]; 144 u32 requested_sent[5];
145 u32 fifo_credits_back[6];
146 u32 generic_error; 145 u32 generic_error;
147 u32 mac_update_failed; 146 u32 mac_update_failed;
148 u32 mac_ps_update_failed; 147 u32 mac_ps_update_failed;
@@ -158,6 +157,9 @@ struct brcmf_fws_stats {
158 u32 txs_supp_core; 157 u32 txs_supp_core;
159 u32 txs_supp_ps; 158 u32 txs_supp_ps;
160 u32 txs_tossed; 159 u32 txs_tossed;
160 u32 txs_host_tossed;
161 u32 bus_flow_block;
162 u32 fws_flow_block;
161}; 163};
162 164
163struct brcmf_pub; 165struct brcmf_pub;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 2c593570497c..8e8975562ec3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -179,7 +179,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
179 struct brcmf_pub *drvr = ifp->drvr; 179 struct brcmf_pub *drvr = ifp->drvr;
180 struct ethhdr *eh; 180 struct ethhdr *eh;
181 181
182 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); 182 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
183 183
184 /* Can the device send data? */ 184 /* Can the device send data? */
185 if (drvr->bus_if->state != BRCMF_BUS_DATA) { 185 if (drvr->bus_if->state != BRCMF_BUS_DATA) {
@@ -240,11 +240,15 @@ done:
240void brcmf_txflowblock_if(struct brcmf_if *ifp, 240void brcmf_txflowblock_if(struct brcmf_if *ifp,
241 enum brcmf_netif_stop_reason reason, bool state) 241 enum brcmf_netif_stop_reason reason, bool state)
242{ 242{
243 unsigned long flags;
244
243 if (!ifp) 245 if (!ifp)
244 return; 246 return;
245 247
246 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", 248 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
247 ifp->bssidx, ifp->netif_stop, reason, state); 249 ifp->bssidx, ifp->netif_stop, reason, state);
250
251 spin_lock_irqsave(&ifp->netif_stop_lock, flags);
248 if (state) { 252 if (state) {
249 if (!ifp->netif_stop) 253 if (!ifp->netif_stop)
250 netif_stop_queue(ifp->ndev); 254 netif_stop_queue(ifp->ndev);
@@ -254,6 +258,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
254 if (!ifp->netif_stop) 258 if (!ifp->netif_stop)
255 netif_wake_queue(ifp->ndev); 259 netif_wake_queue(ifp->ndev);
256 } 260 }
261 spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
257} 262}
258 263
259void brcmf_txflowblock(struct device *dev, bool state) 264void brcmf_txflowblock(struct device *dev, bool state)
@@ -264,15 +269,18 @@ void brcmf_txflowblock(struct device *dev, bool state)
264 269
265 brcmf_dbg(TRACE, "Enter\n"); 270 brcmf_dbg(TRACE, "Enter\n");
266 271
267 for (i = 0; i < BRCMF_MAX_IFS; i++) 272 if (brcmf_fws_fc_active(drvr->fws)) {
268 brcmf_txflowblock_if(drvr->iflist[i], 273 brcmf_fws_bus_blocked(drvr, state);
269 BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state); 274 } else {
275 for (i = 0; i < BRCMF_MAX_IFS; i++)
276 brcmf_txflowblock_if(drvr->iflist[i],
277 BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
278 state);
279 }
270} 280}
271 281
272void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) 282void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
273{ 283{
274 unsigned char *eth;
275 uint len;
276 struct sk_buff *skb, *pnext; 284 struct sk_buff *skb, *pnext;
277 struct brcmf_if *ifp; 285 struct brcmf_if *ifp;
278 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 286 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -280,7 +288,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
280 u8 ifidx; 288 u8 ifidx;
281 int ret; 289 int ret;
282 290
283 brcmf_dbg(TRACE, "Enter\n"); 291 brcmf_dbg(DATA, "Enter\n");
284 292
285 skb_queue_walk_safe(skb_list, skb, pnext) { 293 skb_queue_walk_safe(skb_list, skb, pnext) {
286 skb_unlink(skb, skb_list); 294 skb_unlink(skb, skb_list);
@@ -296,33 +304,12 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
296 continue; 304 continue;
297 } 305 }
298 306
299 /* Get the protocol, maintain skb around eth_type_trans()
300 * The main reason for this hack is for the limitation of
301 * Linux 2.4 where 'eth_type_trans' uses the
302 * 'net->hard_header_len'
303 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
304 * coping of the packet coming from the network stack to add
305 * BDC, Hardware header etc, during network interface
306 * registration
307 * we set the 'net->hard_header_len' to ETH_HLEN + extra space
308 * required
309 * for BDC, Hardware header etc. and not just the ETH_HLEN
310 */
311 eth = skb->data;
312 len = skb->len;
313
314 skb->dev = ifp->ndev; 307 skb->dev = ifp->ndev;
315 skb->protocol = eth_type_trans(skb, skb->dev); 308 skb->protocol = eth_type_trans(skb, skb->dev);
316 309
317 if (skb->pkt_type == PACKET_MULTICAST) 310 if (skb->pkt_type == PACKET_MULTICAST)
318 ifp->stats.multicast++; 311 ifp->stats.multicast++;
319 312
320 skb->data = eth;
321 skb->len = len;
322
323 /* Strip header, count, deliver upward */
324 skb_pull(skb, ETH_HLEN);
325
326 /* Process special event packets */ 313 /* Process special event packets */
327 brcmf_fweh_process_skb(drvr, skb); 314 brcmf_fweh_process_skb(drvr, skb);
328 315
@@ -338,10 +325,8 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
338 netif_rx(skb); 325 netif_rx(skb);
339 else 326 else
340 /* If the receive is not processed inside an ISR, 327 /* If the receive is not processed inside an ISR,
341 * the softirqd must be woken explicitly to service 328 * the softirqd must be woken explicitly to service the
342 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled 329 * NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
343 * by netif_rx_ni(), but in earlier kernels, we need
344 * to do it manually.
345 */ 330 */
346 netif_rx_ni(skb); 331 netif_rx_ni(skb);
347 } 332 }
@@ -630,7 +615,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
630 /* set appropriate operations */ 615 /* set appropriate operations */
631 ndev->netdev_ops = &brcmf_netdev_ops_pri; 616 ndev->netdev_ops = &brcmf_netdev_ops_pri;
632 617
633 ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; 618 ndev->hard_header_len += drvr->hdrlen;
634 ndev->ethtool_ops = &brcmf_ethtool_ops; 619 ndev->ethtool_ops = &brcmf_ethtool_ops;
635 620
636 drvr->rxsz = ndev->mtu + ndev->hard_header_len + 621 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
@@ -779,6 +764,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
779 ifp->bssidx = bssidx; 764 ifp->bssidx = bssidx;
780 765
781 init_waitqueue_head(&ifp->pend_8021x_wait); 766 init_waitqueue_head(&ifp->pend_8021x_wait);
767 spin_lock_init(&ifp->netif_stop_lock);
782 768
783 if (mac_addr != NULL) 769 if (mac_addr != NULL)
784 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN); 770 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index d2487518bd2a..264111968320 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -448,8 +448,6 @@ struct brcmf_sdio {
448 uint rxblen; /* Allocated length of rxbuf */ 448 uint rxblen; /* Allocated length of rxbuf */
449 u8 *rxctl; /* Aligned pointer into rxbuf */ 449 u8 *rxctl; /* Aligned pointer into rxbuf */
450 u8 *rxctl_orig; /* pointer for freeing rxctl */ 450 u8 *rxctl_orig; /* pointer for freeing rxctl */
451 u8 *databuf; /* Buffer for receiving big glom packet */
452 u8 *dataptr; /* Aligned pointer into databuf */
453 uint rxlen; /* Length of valid data in buffer */ 451 uint rxlen; /* Length of valid data in buffer */
454 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */ 452 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
455 453
@@ -473,8 +471,6 @@ struct brcmf_sdio {
473 s32 idletime; /* Control for activity timeout */ 471 s32 idletime; /* Control for activity timeout */
474 s32 idlecount; /* Activity timeout counter */ 472 s32 idlecount; /* Activity timeout counter */
475 s32 idleclock; /* How to set bus driver when idle */ 473 s32 idleclock; /* How to set bus driver when idle */
476 s32 sd_rxchain;
477 bool use_rxchain; /* If brcmf should use PKT chains */
478 bool rxflow_mode; /* Rx flow control mode */ 474 bool rxflow_mode; /* Rx flow control mode */
479 bool rxflow; /* Is rx flow control on */ 475 bool rxflow; /* Is rx flow control on */
480 bool alp_only; /* Don't use HT clock (ALP only) */ 476 bool alp_only; /* Don't use HT clock (ALP only) */
@@ -495,8 +491,7 @@ struct brcmf_sdio {
495 491
496 struct workqueue_struct *brcmf_wq; 492 struct workqueue_struct *brcmf_wq;
497 struct work_struct datawork; 493 struct work_struct datawork;
498 struct list_head dpc_tsklst; 494 atomic_t dpc_tskcnt;
499 spinlock_t dpc_tl_lock;
500 495
501 const struct firmware *firmware; 496 const struct firmware *firmware;
502 u32 fw_ptr; 497 u32 fw_ptr;
@@ -1026,29 +1021,6 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1026 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 1021 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
1027} 1022}
1028 1023
1029/* copy a buffer into a pkt buffer chain */
1030static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
1031{
1032 uint n, ret = 0;
1033 struct sk_buff *p;
1034 u8 *buf;
1035
1036 buf = bus->dataptr;
1037
1038 /* copy the data */
1039 skb_queue_walk(&bus->glom, p) {
1040 n = min_t(uint, p->len, len);
1041 memcpy(p->data, buf, n);
1042 buf += n;
1043 len -= n;
1044 ret += n;
1045 if (!len)
1046 break;
1047 }
1048
1049 return ret;
1050}
1051
1052/* return total length of buffer chain */ 1024/* return total length of buffer chain */
1053static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus) 1025static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
1054{ 1026{
@@ -1202,8 +1174,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1202 int errcode; 1174 int errcode;
1203 u8 doff, sfdoff; 1175 u8 doff, sfdoff;
1204 1176
1205 bool usechain = bus->use_rxchain;
1206
1207 struct brcmf_sdio_read rd_new; 1177 struct brcmf_sdio_read rd_new;
1208 1178
1209 /* If packets, issue read(s) and send up packet chain */ 1179 /* If packets, issue read(s) and send up packet chain */
@@ -1238,7 +1208,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1238 if (sublen % BRCMF_SDALIGN) { 1208 if (sublen % BRCMF_SDALIGN) {
1239 brcmf_err("sublen %d not multiple of %d\n", 1209 brcmf_err("sublen %d not multiple of %d\n",
1240 sublen, BRCMF_SDALIGN); 1210 sublen, BRCMF_SDALIGN);
1241 usechain = false;
1242 } 1211 }
1243 totlen += sublen; 1212 totlen += sublen;
1244 1213
@@ -1305,27 +1274,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1305 * packet and and copy into the chain. 1274 * packet and and copy into the chain.
1306 */ 1275 */
1307 sdio_claim_host(bus->sdiodev->func[1]); 1276 sdio_claim_host(bus->sdiodev->func[1]);
1308 if (usechain) { 1277 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1309 errcode = brcmf_sdcard_recv_chain(bus->sdiodev, 1278 bus->sdiodev->sbwad,
1310 bus->sdiodev->sbwad, 1279 SDIO_FUNC_2, F2SYNC, &bus->glom);
1311 SDIO_FUNC_2, F2SYNC, &bus->glom);
1312 } else if (bus->dataptr) {
1313 errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
1314 bus->sdiodev->sbwad,
1315 SDIO_FUNC_2, F2SYNC,
1316 bus->dataptr, dlen);
1317 sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
1318 if (sublen != dlen) {
1319 brcmf_err("FAILED TO COPY, dlen %d sublen %d\n",
1320 dlen, sublen);
1321 errcode = -1;
1322 }
1323 pnext = NULL;
1324 } else {
1325 brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
1326 dlen);
1327 errcode = -1;
1328 }
1329 sdio_release_host(bus->sdiodev->func[1]); 1280 sdio_release_host(bus->sdiodev->func[1]);
1330 bus->sdcnt.f2rxdata++; 1281 bus->sdcnt.f2rxdata++;
1331 1282
@@ -2061,23 +2012,6 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2061 } 2012 }
2062} 2013}
2063 2014
2064static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2065{
2066 struct list_head *new_hd;
2067 unsigned long flags;
2068
2069 if (in_interrupt())
2070 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2071 else
2072 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2073 if (new_hd == NULL)
2074 return;
2075
2076 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2077 list_add_tail(new_hd, &bus->dpc_tsklst);
2078 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2079}
2080
2081static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) 2015static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2082{ 2016{
2083 u8 idx; 2017 u8 idx;
@@ -2312,7 +2246,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2312 (!atomic_read(&bus->fcstate) && 2246 (!atomic_read(&bus->fcstate) &&
2313 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && 2247 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2314 data_ok(bus)) || PKT_AVAILABLE()) { 2248 data_ok(bus)) || PKT_AVAILABLE()) {
2315 brcmf_sdbrcm_adddpctsk(bus); 2249 atomic_inc(&bus->dpc_tskcnt);
2316 } 2250 }
2317 2251
2318 /* If we're done for now, turn off clock request. */ 2252 /* If we're done for now, turn off clock request. */
@@ -2342,7 +2276,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2342 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2276 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2343 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2277 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2344 struct brcmf_sdio *bus = sdiodev->bus; 2278 struct brcmf_sdio *bus = sdiodev->bus;
2345 unsigned long flags;
2346 2279
2347 brcmf_dbg(TRACE, "Enter\n"); 2280 brcmf_dbg(TRACE, "Enter\n");
2348 2281
@@ -2369,26 +2302,21 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2369 } else { 2302 } else {
2370 ret = 0; 2303 ret = 0;
2371 } 2304 }
2372 spin_unlock_bh(&bus->txqlock);
2373 2305
2374 if (pktq_len(&bus->txq) >= TXHI) { 2306 if (pktq_len(&bus->txq) >= TXHI) {
2375 bus->txoff = true; 2307 bus->txoff = true;
2376 brcmf_txflowblock(bus->sdiodev->dev, true); 2308 brcmf_txflowblock(bus->sdiodev->dev, true);
2377 } 2309 }
2310 spin_unlock_bh(&bus->txqlock);
2378 2311
2379#ifdef DEBUG 2312#ifdef DEBUG
2380 if (pktq_plen(&bus->txq, prec) > qcount[prec]) 2313 if (pktq_plen(&bus->txq, prec) > qcount[prec])
2381 qcount[prec] = pktq_plen(&bus->txq, prec); 2314 qcount[prec] = pktq_plen(&bus->txq, prec);
2382#endif 2315#endif
2383 2316
2384 spin_lock_irqsave(&bus->dpc_tl_lock, flags); 2317 if (atomic_read(&bus->dpc_tskcnt) == 0) {
2385 if (list_empty(&bus->dpc_tsklst)) { 2318 atomic_inc(&bus->dpc_tskcnt);
2386 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2387
2388 brcmf_sdbrcm_adddpctsk(bus);
2389 queue_work(bus->brcmf_wq, &bus->datawork); 2319 queue_work(bus->brcmf_wq, &bus->datawork);
2390 } else {
2391 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2392 } 2320 }
2393 2321
2394 return ret; 2322 return ret;
@@ -2525,7 +2453,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2525 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2453 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2526 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2454 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2527 struct brcmf_sdio *bus = sdiodev->bus; 2455 struct brcmf_sdio *bus = sdiodev->bus;
2528 unsigned long flags;
2529 2456
2530 brcmf_dbg(TRACE, "Enter\n"); 2457 brcmf_dbg(TRACE, "Enter\n");
2531 2458
@@ -2612,18 +2539,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2612 } while (ret < 0 && retries++ < TXRETRIES); 2539 } while (ret < 0 && retries++ < TXRETRIES);
2613 } 2540 }
2614 2541
2615 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2616 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && 2542 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2617 list_empty(&bus->dpc_tsklst)) { 2543 atomic_read(&bus->dpc_tskcnt) == 0) {
2618 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2619
2620 bus->activity = false; 2544 bus->activity = false;
2621 sdio_claim_host(bus->sdiodev->func[1]); 2545 sdio_claim_host(bus->sdiodev->func[1]);
2622 brcmf_dbg(INFO, "idle\n"); 2546 brcmf_dbg(INFO, "idle\n");
2623 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true); 2547 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2624 sdio_release_host(bus->sdiodev->func[1]); 2548 sdio_release_host(bus->sdiodev->func[1]);
2625 } else {
2626 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2627 } 2549 }
2628 2550
2629 if (ret) 2551 if (ret)
@@ -3451,7 +3373,7 @@ void brcmf_sdbrcm_isr(void *arg)
3451 if (!bus->intr) 3373 if (!bus->intr)
3452 brcmf_err("isr w/o interrupt configured!\n"); 3374 brcmf_err("isr w/o interrupt configured!\n");
3453 3375
3454 brcmf_sdbrcm_adddpctsk(bus); 3376 atomic_inc(&bus->dpc_tskcnt);
3455 queue_work(bus->brcmf_wq, &bus->datawork); 3377 queue_work(bus->brcmf_wq, &bus->datawork);
3456} 3378}
3457 3379
@@ -3460,7 +3382,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3460#ifdef DEBUG 3382#ifdef DEBUG
3461 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev); 3383 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3462#endif /* DEBUG */ 3384#endif /* DEBUG */
3463 unsigned long flags;
3464 3385
3465 brcmf_dbg(TIMER, "Enter\n"); 3386 brcmf_dbg(TIMER, "Enter\n");
3466 3387
@@ -3476,11 +3397,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3476 if (!bus->intr || 3397 if (!bus->intr ||
3477 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { 3398 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3478 3399
3479 spin_lock_irqsave(&bus->dpc_tl_lock, flags); 3400 if (atomic_read(&bus->dpc_tskcnt) == 0) {
3480 if (list_empty(&bus->dpc_tsklst)) {
3481 u8 devpend; 3401 u8 devpend;
3482 spin_unlock_irqrestore(&bus->dpc_tl_lock, 3402
3483 flags);
3484 sdio_claim_host(bus->sdiodev->func[1]); 3403 sdio_claim_host(bus->sdiodev->func[1]);
3485 devpend = brcmf_sdio_regrb(bus->sdiodev, 3404 devpend = brcmf_sdio_regrb(bus->sdiodev,
3486 SDIO_CCCR_INTx, 3405 SDIO_CCCR_INTx,
@@ -3489,9 +3408,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3489 intstatus = 3408 intstatus =
3490 devpend & (INTR_STATUS_FUNC1 | 3409 devpend & (INTR_STATUS_FUNC1 |
3491 INTR_STATUS_FUNC2); 3410 INTR_STATUS_FUNC2);
3492 } else {
3493 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3494 flags);
3495 } 3411 }
3496 3412
3497 /* If there is something, make like the ISR and 3413 /* If there is something, make like the ISR and
@@ -3500,7 +3416,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3500 bus->sdcnt.pollcnt++; 3416 bus->sdcnt.pollcnt++;
3501 atomic_set(&bus->ipend, 1); 3417 atomic_set(&bus->ipend, 1);
3502 3418
3503 brcmf_sdbrcm_adddpctsk(bus); 3419 atomic_inc(&bus->dpc_tskcnt);
3504 queue_work(bus->brcmf_wq, &bus->datawork); 3420 queue_work(bus->brcmf_wq, &bus->datawork);
3505 } 3421 }
3506 } 3422 }
@@ -3545,41 +3461,15 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3545 return (atomic_read(&bus->ipend) > 0); 3461 return (atomic_read(&bus->ipend) > 0);
3546} 3462}
3547 3463
3548static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3549{
3550 if (chipid == BCM43143_CHIP_ID)
3551 return true;
3552 if (chipid == BCM43241_CHIP_ID)
3553 return true;
3554 if (chipid == BCM4329_CHIP_ID)
3555 return true;
3556 if (chipid == BCM4330_CHIP_ID)
3557 return true;
3558 if (chipid == BCM4334_CHIP_ID)
3559 return true;
3560 if (chipid == BCM4335_CHIP_ID)
3561 return true;
3562 return false;
3563}
3564
3565static void brcmf_sdio_dataworker(struct work_struct *work) 3464static void brcmf_sdio_dataworker(struct work_struct *work)
3566{ 3465{
3567 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio, 3466 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3568 datawork); 3467 datawork);
3569 struct list_head *cur_hd, *tmp_hd;
3570 unsigned long flags;
3571
3572 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3573 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
3574 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3575 3468
3469 while (atomic_read(&bus->dpc_tskcnt)) {
3576 brcmf_sdbrcm_dpc(bus); 3470 brcmf_sdbrcm_dpc(bus);
3577 3471 atomic_dec(&bus->dpc_tskcnt);
3578 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3579 list_del(cur_hd);
3580 kfree(cur_hd);
3581 } 3472 }
3582 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3583} 3473}
3584 3474
3585static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) 3475static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
@@ -3589,9 +3479,6 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3589 kfree(bus->rxbuf); 3479 kfree(bus->rxbuf);
3590 bus->rxctl = bus->rxbuf = NULL; 3480 bus->rxctl = bus->rxbuf = NULL;
3591 bus->rxlen = 0; 3481 bus->rxlen = 0;
3592
3593 kfree(bus->databuf);
3594 bus->databuf = NULL;
3595} 3482}
3596 3483
3597static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus) 3484static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
@@ -3604,29 +3491,10 @@ static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
3604 ALIGNMENT) + BRCMF_SDALIGN; 3491 ALIGNMENT) + BRCMF_SDALIGN;
3605 bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC); 3492 bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
3606 if (!(bus->rxbuf)) 3493 if (!(bus->rxbuf))
3607 goto fail; 3494 return false;
3608 }
3609
3610 /* Allocate buffer to receive glomed packet */
3611 bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
3612 if (!(bus->databuf)) {
3613 /* release rxbuf which was already located as above */
3614 if (!bus->rxblen)
3615 kfree(bus->rxbuf);
3616 goto fail;
3617 } 3495 }
3618 3496
3619 /* Align the buffer */
3620 if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
3621 bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
3622 ((unsigned long)bus->databuf % BRCMF_SDALIGN));
3623 else
3624 bus->dataptr = bus->databuf;
3625
3626 return true; 3497 return true;
3627
3628fail:
3629 return false;
3630} 3498}
3631 3499
3632static bool 3500static bool
@@ -3667,11 +3535,6 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3667 goto fail; 3535 goto fail;
3668 } 3536 }
3669 3537
3670 if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
3671 brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip);
3672 goto fail;
3673 }
3674
3675 if (brcmf_sdbrcm_kso_init(bus)) { 3538 if (brcmf_sdbrcm_kso_init(bus)) {
3676 brcmf_err("error enabling KSO\n"); 3539 brcmf_err("error enabling KSO\n");
3677 goto fail; 3540 goto fail;
@@ -3770,10 +3633,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3770 bus->blocksize = bus->sdiodev->func[2]->cur_blksize; 3633 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
3771 bus->roundup = min(max_roundup, bus->blocksize); 3634 bus->roundup = min(max_roundup, bus->blocksize);
3772 3635
3773 /* bus module does not support packet chaining */
3774 bus->use_rxchain = false;
3775 bus->sd_rxchain = false;
3776
3777 /* SR state */ 3636 /* SR state */
3778 bus->sleeping = false; 3637 bus->sleeping = false;
3779 bus->sr_enabled = false; 3638 bus->sr_enabled = false;
@@ -3927,8 +3786,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3927 bus->watchdog_tsk = NULL; 3786 bus->watchdog_tsk = NULL;
3928 } 3787 }
3929 /* Initialize DPC thread */ 3788 /* Initialize DPC thread */
3930 INIT_LIST_HEAD(&bus->dpc_tsklst); 3789 atomic_set(&bus->dpc_tskcnt, 0);
3931 spin_lock_init(&bus->dpc_tl_lock);
3932 3790
3933 /* Assign bus interface call back */ 3791 /* Assign bus interface call back */
3934 bus->sdiodev->bus_if->dev = bus->sdiodev->dev; 3792 bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 6ec5db9c60a5..e679214b3c98 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -101,7 +101,8 @@ struct brcmf_event;
101 BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \ 101 BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
102 BRCMF_ENUM_DEF(DCS_REQUEST, 73) \ 102 BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
103 BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \ 103 BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
104 BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) 104 BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
105 BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
105 106
106#define BRCMF_ENUM_DEF(id, val) \ 107#define BRCMF_ENUM_DEF(id, val) \
107 BRCMF_E_##id = (val), 108 BRCMF_E_##id = (val),
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 5352dc1fdf3c..f0d9f7f6c83d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -22,7 +22,6 @@
22#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25#include <uapi/linux/nl80211.h>
26#include <net/cfg80211.h> 25#include <net/cfg80211.h>
27 26
28#include <brcmu_utils.h> 27#include <brcmu_utils.h>
@@ -142,7 +141,7 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
142#define BRCMF_FWS_FLOWCONTROL_HIWATER 128 141#define BRCMF_FWS_FLOWCONTROL_HIWATER 128
143#define BRCMF_FWS_FLOWCONTROL_LOWATER 64 142#define BRCMF_FWS_FLOWCONTROL_LOWATER 64
144 143
145#define BRCMF_FWS_PSQ_PREC_COUNT ((NL80211_NUM_ACS + 1) * 2) 144#define BRCMF_FWS_PSQ_PREC_COUNT ((BRCMF_FWS_FIFO_COUNT + 1) * 2)
146#define BRCMF_FWS_PSQ_LEN 256 145#define BRCMF_FWS_PSQ_LEN 256
147 146
148#define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01 147#define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01
@@ -157,11 +156,13 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
157 * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver. 156 * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver.
158 * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue. 157 * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue.
159 * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware. 158 * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware.
159 * @BRCMF_FWS_SKBSTATE_TIM: allocated for TIM update info.
160 */ 160 */
161enum brcmf_fws_skb_state { 161enum brcmf_fws_skb_state {
162 BRCMF_FWS_SKBSTATE_NEW, 162 BRCMF_FWS_SKBSTATE_NEW,
163 BRCMF_FWS_SKBSTATE_DELAYED, 163 BRCMF_FWS_SKBSTATE_DELAYED,
164 BRCMF_FWS_SKBSTATE_SUPPRESSED 164 BRCMF_FWS_SKBSTATE_SUPPRESSED,
165 BRCMF_FWS_SKBSTATE_TIM
165}; 166};
166 167
167/** 168/**
@@ -193,9 +194,8 @@ struct brcmf_skbuff_cb {
193 * b[11] - packet sent upon firmware request. 194 * b[11] - packet sent upon firmware request.
194 * b[10] - packet only contains signalling data. 195 * b[10] - packet only contains signalling data.
195 * b[9] - packet is a tx packet. 196 * b[9] - packet is a tx packet.
196 * b[8] - packet uses FIFO credit (non-pspoll). 197 * b[8] - packet used requested credit
197 * b[7] - interface in AP mode. 198 * b[7] - interface in AP mode.
198 * b[6:4] - AC FIFO number.
199 * b[3:0] - interface index. 199 * b[3:0] - interface index.
200 */ 200 */
201#define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800 201#define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800
@@ -204,12 +204,10 @@ struct brcmf_skbuff_cb {
204#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10 204#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10
205#define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200 205#define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200
206#define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9 206#define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9
207#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK 0x0100 207#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_MASK 0x0100
208#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_SHIFT 8 208#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT 8
209#define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080 209#define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080
210#define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7 210#define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7
211#define BRCMF_SKB_IF_FLAGS_FIFO_MASK 0x0070
212#define BRCMF_SKB_IF_FLAGS_FIFO_SHIFT 4
213#define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f 211#define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f
214#define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0 212#define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0
215 213
@@ -246,7 +244,7 @@ struct brcmf_skbuff_cb {
246#define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00 244#define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00
247#define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8 245#define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8
248#define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff 246#define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff
249#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0 247#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0
250 248
251#define brcmf_skb_htod_tag_set_field(skb, field, value) \ 249#define brcmf_skb_htod_tag_set_field(skb, field, value) \
252 brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \ 250 brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \
@@ -278,6 +276,7 @@ struct brcmf_skbuff_cb {
278/** 276/**
279 * enum brcmf_fws_fifo - fifo indices used by dongle firmware. 277 * enum brcmf_fws_fifo - fifo indices used by dongle firmware.
280 * 278 *
279 * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
281 * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic. 280 * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
282 * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic. 281 * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
283 * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic. 282 * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
@@ -287,7 +286,8 @@ struct brcmf_skbuff_cb {
287 * @BRCMF_FWS_FIFO_COUNT: number of fifos. 286 * @BRCMF_FWS_FIFO_COUNT: number of fifos.
288 */ 287 */
289enum brcmf_fws_fifo { 288enum brcmf_fws_fifo {
290 BRCMF_FWS_FIFO_AC_BK, 289 BRCMF_FWS_FIFO_FIRST,
290 BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
291 BRCMF_FWS_FIFO_AC_BE, 291 BRCMF_FWS_FIFO_AC_BE,
292 BRCMF_FWS_FIFO_AC_VI, 292 BRCMF_FWS_FIFO_AC_VI,
293 BRCMF_FWS_FIFO_AC_VO, 293 BRCMF_FWS_FIFO_AC_VO,
@@ -307,12 +307,15 @@ enum brcmf_fws_fifo {
307 * firmware suppress the packet as device is already in PS mode. 307 * firmware suppress the packet as device is already in PS mode.
308 * @BRCMF_FWS_TXSTATUS_FW_TOSSED: 308 * @BRCMF_FWS_TXSTATUS_FW_TOSSED:
309 * firmware tossed the packet. 309 * firmware tossed the packet.
310 * @BRCMF_FWS_TXSTATUS_HOST_TOSSED:
311 * host tossed the packet.
310 */ 312 */
311enum brcmf_fws_txstatus { 313enum brcmf_fws_txstatus {
312 BRCMF_FWS_TXSTATUS_DISCARD, 314 BRCMF_FWS_TXSTATUS_DISCARD,
313 BRCMF_FWS_TXSTATUS_CORE_SUPPRESS, 315 BRCMF_FWS_TXSTATUS_CORE_SUPPRESS,
314 BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS, 316 BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS,
315 BRCMF_FWS_TXSTATUS_FW_TOSSED 317 BRCMF_FWS_TXSTATUS_FW_TOSSED,
318 BRCMF_FWS_TXSTATUS_HOST_TOSSED
316}; 319};
317 320
318enum brcmf_fws_fcmode { 321enum brcmf_fws_fcmode {
@@ -343,6 +346,7 @@ enum brcmf_fws_mac_desc_state {
343 * @transit_count: packet in transit to firmware. 346 * @transit_count: packet in transit to firmware.
344 */ 347 */
345struct brcmf_fws_mac_descriptor { 348struct brcmf_fws_mac_descriptor {
349 char name[16];
346 u8 occupied; 350 u8 occupied;
347 u8 mac_handle; 351 u8 mac_handle;
348 u8 interface_id; 352 u8 interface_id;
@@ -356,7 +360,6 @@ struct brcmf_fws_mac_descriptor {
356 u8 seq[BRCMF_FWS_FIFO_COUNT]; 360 u8 seq[BRCMF_FWS_FIFO_COUNT];
357 struct pktq psq; 361 struct pktq psq;
358 int transit_count; 362 int transit_count;
359 int suppress_count;
360 int suppr_transit_count; 363 int suppr_transit_count;
361 bool send_tim_signal; 364 bool send_tim_signal;
362 u8 traffic_pending_bmp; 365 u8 traffic_pending_bmp;
@@ -383,12 +386,10 @@ enum brcmf_fws_hanger_item_state {
383 * struct brcmf_fws_hanger_item - single entry for tx pending packet. 386 * struct brcmf_fws_hanger_item - single entry for tx pending packet.
384 * 387 *
385 * @state: entry is either free or occupied. 388 * @state: entry is either free or occupied.
386 * @gen: generation.
387 * @pkt: packet itself. 389 * @pkt: packet itself.
388 */ 390 */
389struct brcmf_fws_hanger_item { 391struct brcmf_fws_hanger_item {
390 enum brcmf_fws_hanger_item_state state; 392 enum brcmf_fws_hanger_item_state state;
391 u8 gen;
392 struct sk_buff *pkt; 393 struct sk_buff *pkt;
393}; 394};
394 395
@@ -424,6 +425,7 @@ struct brcmf_fws_info {
424 struct brcmf_fws_stats stats; 425 struct brcmf_fws_stats stats;
425 struct brcmf_fws_hanger hanger; 426 struct brcmf_fws_hanger hanger;
426 enum brcmf_fws_fcmode fcmode; 427 enum brcmf_fws_fcmode fcmode;
428 bool bcmc_credit_check;
427 struct brcmf_fws_macdesc_table desc; 429 struct brcmf_fws_macdesc_table desc;
428 struct workqueue_struct *fws_wq; 430 struct workqueue_struct *fws_wq;
429 struct work_struct fws_dequeue_work; 431 struct work_struct fws_dequeue_work;
@@ -434,6 +436,8 @@ struct brcmf_fws_info {
434 u32 fifo_credit_map; 436 u32 fifo_credit_map;
435 u32 fifo_delay_map; 437 u32 fifo_delay_map;
436 unsigned long borrow_defer_timestamp; 438 unsigned long borrow_defer_timestamp;
439 bool bus_flow_blocked;
440 bool creditmap_received;
437}; 441};
438 442
439/* 443/*
@@ -507,7 +511,6 @@ static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
507{ 511{
508 int i; 512 int i;
509 513
510 brcmf_dbg(TRACE, "enter\n");
511 memset(hanger, 0, sizeof(*hanger)); 514 memset(hanger, 0, sizeof(*hanger));
512 for (i = 0; i < ARRAY_SIZE(hanger->items); i++) 515 for (i = 0; i < ARRAY_SIZE(hanger->items); i++)
513 hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; 516 hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
@@ -517,7 +520,6 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
517{ 520{
518 u32 i; 521 u32 i;
519 522
520 brcmf_dbg(TRACE, "enter\n");
521 i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS; 523 i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS;
522 524
523 while (i != h->slot_pos) { 525 while (i != h->slot_pos) {
@@ -533,14 +535,12 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
533 h->failed_slotfind++; 535 h->failed_slotfind++;
534 i = BRCMF_FWS_HANGER_MAXITEMS; 536 i = BRCMF_FWS_HANGER_MAXITEMS;
535done: 537done:
536 brcmf_dbg(TRACE, "exit: %d\n", i);
537 return i; 538 return i;
538} 539}
539 540
540static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, 541static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
541 struct sk_buff *pkt, u32 slot_id) 542 struct sk_buff *pkt, u32 slot_id)
542{ 543{
543 brcmf_dbg(TRACE, "enter\n");
544 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) 544 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
545 return -ENOENT; 545 return -ENOENT;
546 546
@@ -560,7 +560,6 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
560 u32 slot_id, struct sk_buff **pktout, 560 u32 slot_id, struct sk_buff **pktout,
561 bool remove_item) 561 bool remove_item)
562{ 562{
563 brcmf_dbg(TRACE, "enter\n");
564 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) 563 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
565 return -ENOENT; 564 return -ENOENT;
566 565
@@ -574,23 +573,18 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
574 if (remove_item) { 573 if (remove_item) {
575 h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; 574 h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
576 h->items[slot_id].pkt = NULL; 575 h->items[slot_id].pkt = NULL;
577 h->items[slot_id].gen = 0xff;
578 h->popped++; 576 h->popped++;
579 } 577 }
580 return 0; 578 return 0;
581} 579}
582 580
583static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, 581static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
584 u32 slot_id, u8 gen) 582 u32 slot_id)
585{ 583{
586 brcmf_dbg(TRACE, "enter\n");
587
588 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) 584 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
589 return -ENOENT; 585 return -ENOENT;
590 586
591 h->items[slot_id].gen = gen; 587 if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
592
593 if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_INUSE) {
594 brcmf_err("entry not in use\n"); 588 brcmf_err("entry not in use\n");
595 return -EINVAL; 589 return -EINVAL;
596 } 590 }
@@ -599,25 +593,6 @@ static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
599 return 0; 593 return 0;
600} 594}
601 595
602static int brcmf_fws_hanger_get_genbit(struct brcmf_fws_hanger *hanger,
603 struct sk_buff *pkt, u32 slot_id,
604 int *gen)
605{
606 brcmf_dbg(TRACE, "enter\n");
607 *gen = 0xff;
608
609 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
610 return -ENOENT;
611
612 if (hanger->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
613 brcmf_err("slot not in use\n");
614 return -EINVAL;
615 }
616
617 *gen = hanger->items[slot_id].gen;
618 return 0;
619}
620
621static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, 596static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
622 bool (*fn)(struct sk_buff *, void *), 597 bool (*fn)(struct sk_buff *, void *),
623 int ifidx) 598 int ifidx)
@@ -627,7 +602,6 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
627 int i; 602 int i;
628 enum brcmf_fws_hanger_item_state s; 603 enum brcmf_fws_hanger_item_state s;
629 604
630 brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
631 for (i = 0; i < ARRAY_SIZE(h->items); i++) { 605 for (i = 0; i < ARRAY_SIZE(h->items); i++) {
632 s = h->items[i].state; 606 s = h->items[i].state;
633 if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE || 607 if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE ||
@@ -644,14 +618,28 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
644 } 618 }
645} 619}
646 620
647static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc, 621static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws,
648 u8 *addr, u8 ifidx) 622 struct brcmf_fws_mac_descriptor *desc)
623{
624 if (desc == &fws->desc.other)
625 strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name));
626 else if (desc->mac_handle)
627 scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d",
628 desc->mac_handle, desc->interface_id);
629 else
630 scnprintf(desc->name, sizeof(desc->name), "MACIF:%d",
631 desc->interface_id);
632}
633
634static void brcmf_fws_macdesc_init(struct brcmf_fws_mac_descriptor *desc,
635 u8 *addr, u8 ifidx)
649{ 636{
650 brcmf_dbg(TRACE, 637 brcmf_dbg(TRACE,
651 "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx); 638 "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx);
652 desc->occupied = 1; 639 desc->occupied = 1;
653 desc->state = BRCMF_FWS_STATE_OPEN; 640 desc->state = BRCMF_FWS_STATE_OPEN;
654 desc->requested_credit = 0; 641 desc->requested_credit = 0;
642 desc->requested_packet = 0;
655 /* depending on use may need ifp->bssidx instead */ 643 /* depending on use may need ifp->bssidx instead */
656 desc->interface_id = ifidx; 644 desc->interface_id = ifidx;
657 desc->ac_bitmap = 0xff; /* update this when handling APSD */ 645 desc->ac_bitmap = 0xff; /* update this when handling APSD */
@@ -660,22 +648,22 @@ static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc,
660} 648}
661 649
662static 650static
663void brcmf_fws_clear_mac_descriptor(struct brcmf_fws_mac_descriptor *desc) 651void brcmf_fws_macdesc_deinit(struct brcmf_fws_mac_descriptor *desc)
664{ 652{
665 brcmf_dbg(TRACE, 653 brcmf_dbg(TRACE,
666 "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id); 654 "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id);
667 desc->occupied = 0; 655 desc->occupied = 0;
668 desc->state = BRCMF_FWS_STATE_CLOSE; 656 desc->state = BRCMF_FWS_STATE_CLOSE;
669 desc->requested_credit = 0; 657 desc->requested_credit = 0;
658 desc->requested_packet = 0;
670} 659}
671 660
672static struct brcmf_fws_mac_descriptor * 661static struct brcmf_fws_mac_descriptor *
673brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea) 662brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea)
674{ 663{
675 struct brcmf_fws_mac_descriptor *entry; 664 struct brcmf_fws_mac_descriptor *entry;
676 int i; 665 int i;
677 666
678 brcmf_dbg(TRACE, "enter: ea=%pM\n", ea);
679 if (ea == NULL) 667 if (ea == NULL)
680 return ERR_PTR(-EINVAL); 668 return ERR_PTR(-EINVAL);
681 669
@@ -690,42 +678,33 @@ brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea)
690} 678}
691 679
692static struct brcmf_fws_mac_descriptor* 680static struct brcmf_fws_mac_descriptor*
693brcmf_fws_find_mac_desc(struct brcmf_fws_info *fws, struct brcmf_if *ifp, 681brcmf_fws_macdesc_find(struct brcmf_fws_info *fws, struct brcmf_if *ifp, u8 *da)
694 u8 *da)
695{ 682{
696 struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; 683 struct brcmf_fws_mac_descriptor *entry = &fws->desc.other;
697 bool multicast; 684 bool multicast;
698 enum nl80211_iftype iftype;
699
700 brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
701 685
702 multicast = is_multicast_ether_addr(da); 686 multicast = is_multicast_ether_addr(da);
703 iftype = brcmf_cfg80211_get_iftype(ifp);
704 687
705 /* Multicast destination and P2P clients get the interface entry. 688 /* Multicast destination, STA and P2P clients get the interface entry.
706 * STA gets the interface entry if there is no exact match. For 689 * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
707 * example, TDLS destinations have their own entry. 690 * have their own entry.
708 */ 691 */
709 entry = NULL; 692 if (multicast && ifp->fws_desc) {
710 if ((multicast || iftype == NL80211_IFTYPE_STATION ||
711 iftype == NL80211_IFTYPE_P2P_CLIENT) && ifp->fws_desc)
712 entry = ifp->fws_desc; 693 entry = ifp->fws_desc;
713
714 if (entry != NULL && iftype != NL80211_IFTYPE_STATION)
715 goto done; 694 goto done;
695 }
716 696
717 entry = brcmf_fws_mac_descriptor_lookup(fws, da); 697 entry = brcmf_fws_macdesc_lookup(fws, da);
718 if (IS_ERR(entry)) 698 if (IS_ERR(entry))
719 entry = &fws->desc.other; 699 entry = ifp->fws_desc;
720 700
721done: 701done:
722 brcmf_dbg(TRACE, "exit: entry=%p\n", entry);
723 return entry; 702 return entry;
724} 703}
725 704
726static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws, 705static bool brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws,
727 struct brcmf_fws_mac_descriptor *entry, 706 struct brcmf_fws_mac_descriptor *entry,
728 int fifo) 707 int fifo)
729{ 708{
730 struct brcmf_fws_mac_descriptor *if_entry; 709 struct brcmf_fws_mac_descriptor *if_entry;
731 bool closed; 710 bool closed;
@@ -748,15 +727,11 @@ static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws,
748 return closed || !(entry->ac_bitmap & BIT(fifo)); 727 return closed || !(entry->ac_bitmap & BIT(fifo));
749} 728}
750 729
751static void brcmf_fws_mac_desc_cleanup(struct brcmf_fws_info *fws, 730static void brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws,
752 struct brcmf_fws_mac_descriptor *entry, 731 struct brcmf_fws_mac_descriptor *entry,
753 int ifidx) 732 int ifidx)
754{ 733{
755 brcmf_dbg(TRACE, "enter: entry=(ea=%pM, ifid=%d), ifidx=%d\n",
756 entry->ea, entry->interface_id, ifidx);
757 if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { 734 if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) {
758 brcmf_dbg(TRACE, "flush psq: ifidx=%d, qlen=%d\n",
759 ifidx, entry->psq.len);
760 brcmf_fws_psq_flush(fws, &entry->psq, ifidx); 735 brcmf_fws_psq_flush(fws, &entry->psq, ifidx);
761 entry->occupied = !!(entry->psq.len); 736 entry->occupied = !!(entry->psq.len);
762 } 737 }
@@ -772,7 +747,6 @@ static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws,
772 int prec; 747 int prec;
773 u32 hslot; 748 u32 hslot;
774 749
775 brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
776 txq = brcmf_bus_gettxq(fws->drvr->bus_if); 750 txq = brcmf_bus_gettxq(fws->drvr->bus_if);
777 if (IS_ERR(txq)) { 751 if (IS_ERR(txq)) {
778 brcmf_dbg(TRACE, "no txq to clean up\n"); 752 brcmf_dbg(TRACE, "no txq to clean up\n");
@@ -798,7 +772,6 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
798 struct brcmf_fws_mac_descriptor *table; 772 struct brcmf_fws_mac_descriptor *table;
799 bool (*matchfn)(struct sk_buff *, void *) = NULL; 773 bool (*matchfn)(struct sk_buff *, void *) = NULL;
800 774
801 brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
802 if (fws == NULL) 775 if (fws == NULL)
803 return; 776 return;
804 777
@@ -808,51 +781,121 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
808 /* cleanup individual nodes */ 781 /* cleanup individual nodes */
809 table = &fws->desc.nodes[0]; 782 table = &fws->desc.nodes[0];
810 for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) 783 for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++)
811 brcmf_fws_mac_desc_cleanup(fws, &table[i], ifidx); 784 brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx);
812 785
813 brcmf_fws_mac_desc_cleanup(fws, &fws->desc.other, ifidx); 786 brcmf_fws_macdesc_cleanup(fws, &fws->desc.other, ifidx);
814 brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx); 787 brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx);
815 brcmf_fws_hanger_cleanup(fws, matchfn, ifidx); 788 brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
816} 789}
817 790
818static void brcmf_fws_tim_update(struct brcmf_fws_info *ctx, 791static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
819 struct brcmf_fws_mac_descriptor *entry,
820 int prec)
821{ 792{
822 brcmf_dbg(TRACE, "enter: ea=%pM\n", entry->ea); 793 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
823 if (entry->state == BRCMF_FWS_STATE_CLOSE) { 794 u8 *wlh;
824 /* check delayedQ and suppressQ in one call using bitmap */ 795 u16 data_offset = 0;
825 if (brcmu_pktq_mlen(&entry->psq, 3 << (prec * 2)) == 0) 796 u8 fillers;
826 entry->traffic_pending_bmp = 797 __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
827 entry->traffic_pending_bmp & ~NBITVAL(prec); 798
828 else 799 brcmf_dbg(TRACE, "enter: %s, idx=%d pkttag=0x%08X, hslot=%d\n",
829 entry->traffic_pending_bmp = 800 entry->name, brcmf_skb_if_flags_get_field(skb, INDEX),
830 entry->traffic_pending_bmp | NBITVAL(prec); 801 le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff);
802 if (entry->send_tim_signal)
803 data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
804
805 /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
806 data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
807 fillers = round_up(data_offset, 4) - data_offset;
808 data_offset += fillers;
809
810 skb_push(skb, data_offset);
811 wlh = skb->data;
812
813 wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
814 wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
815 memcpy(&wlh[2], &pkttag, sizeof(pkttag));
816 wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
817
818 if (entry->send_tim_signal) {
819 entry->send_tim_signal = 0;
820 wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
821 wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
822 wlh[2] = entry->mac_handle;
823 wlh[3] = entry->traffic_pending_bmp;
824 brcmf_dbg(TRACE, "adding TIM info: handle %d bmp 0x%X\n",
825 entry->mac_handle, entry->traffic_pending_bmp);
826 wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
827 entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
831 } 828 }
832 /* request a TIM update to firmware at the next piggyback opportunity */ 829 if (fillers)
830 memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
831
832 brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
833 data_offset >> 2, skb);
834 return 0;
835}
836
837static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
838 struct brcmf_fws_mac_descriptor *entry,
839 int fifo, bool send_immediately)
840{
841 struct sk_buff *skb;
842 struct brcmf_bus *bus;
843 struct brcmf_skbuff_cb *skcb;
844 s32 err;
845 u32 len;
846
847 /* check delayedQ and suppressQ in one call using bitmap */
848 if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0)
849 entry->traffic_pending_bmp &= ~NBITVAL(fifo);
850 else
851 entry->traffic_pending_bmp |= NBITVAL(fifo);
852
853 entry->send_tim_signal = false;
833 if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) 854 if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp)
834 entry->send_tim_signal = true; 855 entry->send_tim_signal = true;
856 if (send_immediately && entry->send_tim_signal &&
857 entry->state == BRCMF_FWS_STATE_CLOSE) {
858 /* create a dummy packet and sent that. The traffic */
859 /* bitmap info will automatically be attached to that packet */
860 len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 +
861 BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 +
862 4 + fws->drvr->hdrlen;
863 skb = brcmu_pkt_buf_get_skb(len);
864 if (skb == NULL)
865 return false;
866 skb_pull(skb, len);
867 skcb = brcmf_skbcb(skb);
868 skcb->mac = entry;
869 skcb->state = BRCMF_FWS_SKBSTATE_TIM;
870 bus = fws->drvr->bus_if;
871 err = brcmf_fws_hdrpush(fws, skb);
872 if (err == 0)
873 err = brcmf_bus_txdata(bus, skb);
874 if (err)
875 brcmu_pkt_buf_free_skb(skb);
876 return true;
877 }
878 return false;
835} 879}
836 880
837static void 881static void
838brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq, 882brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq,
839 u8 if_id) 883 u8 if_id)
840{ 884{
841 struct brcmf_if *ifp = fws->drvr->iflist[if_id]; 885 struct brcmf_if *ifp = fws->drvr->iflist[!if_id ? 0 : if_id + 1];
842 886
843 if (WARN_ON(!ifp)) 887 if (WARN_ON(!ifp))
844 return; 888 return;
845 889
846 brcmf_dbg(TRACE,
847 "enter: bssidx=%d, ifidx=%d\n", ifp->bssidx, ifp->ifidx);
848
849 if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && 890 if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
850 pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER) 891 pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER)
851 brcmf_txflowblock_if(ifp, 892 brcmf_txflowblock_if(ifp,
852 BRCMF_NETIF_STOP_REASON_FWS_FC, false); 893 BRCMF_NETIF_STOP_REASON_FWS_FC, false);
853 if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && 894 if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
854 pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) 895 pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) {
896 fws->stats.fws_flow_block++;
855 brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true); 897 brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true);
898 }
856 return; 899 return;
857} 900}
858 901
@@ -862,10 +905,26 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
862 return 0; 905 return 0;
863} 906}
864 907
908/* using macro so sparse checking does not complain
909 * about locking imbalance.
910 */
911#define brcmf_fws_lock(drvr, flags) \
912do { \
913 flags = 0; \
914 spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
915} while (0)
916
917/* using macro so sparse checking does not complain
918 * about locking imbalance.
919 */
920#define brcmf_fws_unlock(drvr, flags) \
921 spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
922
865static 923static
866int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) 924int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
867{ 925{
868 struct brcmf_fws_mac_descriptor *entry, *existing; 926 struct brcmf_fws_mac_descriptor *entry, *existing;
927 ulong flags;
869 u8 mac_handle; 928 u8 mac_handle;
870 u8 ifidx; 929 u8 ifidx;
871 u8 *addr; 930 u8 *addr;
@@ -876,34 +935,44 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
876 935
877 entry = &fws->desc.nodes[mac_handle & 0x1F]; 936 entry = &fws->desc.nodes[mac_handle & 0x1F];
878 if (type == BRCMF_FWS_TYPE_MACDESC_DEL) { 937 if (type == BRCMF_FWS_TYPE_MACDESC_DEL) {
879 brcmf_dbg(TRACE, "deleting mac %pM idx %d\n", addr, ifidx);
880 if (entry->occupied) { 938 if (entry->occupied) {
881 brcmf_fws_mac_desc_cleanup(fws, entry, -1); 939 brcmf_dbg(TRACE, "deleting %s mac %pM\n",
882 brcmf_fws_clear_mac_descriptor(entry); 940 entry->name, addr);
941 brcmf_fws_lock(fws->drvr, flags);
942 brcmf_fws_macdesc_cleanup(fws, entry, -1);
943 brcmf_fws_macdesc_deinit(entry);
944 brcmf_fws_unlock(fws->drvr, flags);
883 } else 945 } else
884 fws->stats.mac_update_failed++; 946 fws->stats.mac_update_failed++;
885 return 0; 947 return 0;
886 } 948 }
887 949
888 brcmf_dbg(TRACE, 950 existing = brcmf_fws_macdesc_lookup(fws, addr);
889 "add mac %pM handle %u idx %d\n", addr, mac_handle, ifidx);
890 existing = brcmf_fws_mac_descriptor_lookup(fws, addr);
891 if (IS_ERR(existing)) { 951 if (IS_ERR(existing)) {
892 if (!entry->occupied) { 952 if (!entry->occupied) {
953 brcmf_fws_lock(fws->drvr, flags);
893 entry->mac_handle = mac_handle; 954 entry->mac_handle = mac_handle;
894 brcmf_fws_init_mac_descriptor(entry, addr, ifidx); 955 brcmf_fws_macdesc_init(entry, addr, ifidx);
956 brcmf_fws_macdesc_set_name(fws, entry);
895 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, 957 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
896 BRCMF_FWS_PSQ_LEN); 958 BRCMF_FWS_PSQ_LEN);
959 brcmf_fws_unlock(fws->drvr, flags);
960 brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
897 } else { 961 } else {
898 fws->stats.mac_update_failed++; 962 fws->stats.mac_update_failed++;
899 } 963 }
900 } else { 964 } else {
901 if (entry != existing) { 965 if (entry != existing) {
902 brcmf_dbg(TRACE, "relocate mac\n"); 966 brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
967 brcmf_fws_lock(fws->drvr, flags);
903 memcpy(entry, existing, 968 memcpy(entry, existing,
904 offsetof(struct brcmf_fws_mac_descriptor, psq)); 969 offsetof(struct brcmf_fws_mac_descriptor, psq));
905 entry->mac_handle = mac_handle; 970 entry->mac_handle = mac_handle;
906 brcmf_fws_clear_mac_descriptor(existing); 971 brcmf_fws_macdesc_deinit(existing);
972 brcmf_fws_macdesc_set_name(fws, entry);
973 brcmf_fws_unlock(fws->drvr, flags);
974 brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
975 addr);
907 } else { 976 } else {
908 brcmf_dbg(TRACE, "use existing\n"); 977 brcmf_dbg(TRACE, "use existing\n");
909 WARN_ON(entry->mac_handle != mac_handle); 978 WARN_ON(entry->mac_handle != mac_handle);
@@ -917,8 +986,9 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
917 u8 type, u8 *data) 986 u8 type, u8 *data)
918{ 987{
919 struct brcmf_fws_mac_descriptor *entry; 988 struct brcmf_fws_mac_descriptor *entry;
989 ulong flags;
920 u8 mac_handle; 990 u8 mac_handle;
921 int i; 991 int ret;
922 992
923 mac_handle = data[0]; 993 mac_handle = data[0];
924 entry = &fws->desc.nodes[mac_handle & 0x1F]; 994 entry = &fws->desc.nodes[mac_handle & 0x1F];
@@ -926,30 +996,35 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
926 fws->stats.mac_ps_update_failed++; 996 fws->stats.mac_ps_update_failed++;
927 return -ESRCH; 997 return -ESRCH;
928 } 998 }
929 999 brcmf_fws_lock(fws->drvr, flags);
930 /* a state update should wipe old credits? */ 1000 /* a state update should wipe old credits */
931 entry->requested_credit = 0; 1001 entry->requested_credit = 0;
1002 entry->requested_packet = 0;
932 if (type == BRCMF_FWS_TYPE_MAC_OPEN) { 1003 if (type == BRCMF_FWS_TYPE_MAC_OPEN) {
933 entry->state = BRCMF_FWS_STATE_OPEN; 1004 entry->state = BRCMF_FWS_STATE_OPEN;
934 return BRCMF_FWS_RET_OK_SCHEDULE; 1005 ret = BRCMF_FWS_RET_OK_SCHEDULE;
935 } else { 1006 } else {
936 entry->state = BRCMF_FWS_STATE_CLOSE; 1007 entry->state = BRCMF_FWS_STATE_CLOSE;
937 for (i = BRCMF_FWS_FIFO_AC_BE; i < NL80211_NUM_ACS; i++) 1008 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false);
938 brcmf_fws_tim_update(fws, entry, i); 1009 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false);
1010 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false);
1011 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
1012 ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
939 } 1013 }
940 return BRCMF_FWS_RET_OK_NOSCHEDULE; 1014 brcmf_fws_unlock(fws->drvr, flags);
1015 return ret;
941} 1016}
942 1017
943static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, 1018static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
944 u8 type, u8 *data) 1019 u8 type, u8 *data)
945{ 1020{
946 struct brcmf_fws_mac_descriptor *entry; 1021 struct brcmf_fws_mac_descriptor *entry;
1022 ulong flags;
947 u8 ifidx; 1023 u8 ifidx;
948 int ret; 1024 int ret;
949 1025
950 ifidx = data[0]; 1026 ifidx = data[0];
951 1027
952 brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
953 if (ifidx >= BRCMF_MAX_IFS) { 1028 if (ifidx >= BRCMF_MAX_IFS) {
954 ret = -ERANGE; 1029 ret = -ERANGE;
955 goto fail; 1030 goto fail;
@@ -961,17 +1036,26 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
961 goto fail; 1036 goto fail;
962 } 1037 }
963 1038
1039 brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
1040 entry->name);
1041 brcmf_fws_lock(fws->drvr, flags);
964 switch (type) { 1042 switch (type) {
965 case BRCMF_FWS_TYPE_INTERFACE_OPEN: 1043 case BRCMF_FWS_TYPE_INTERFACE_OPEN:
966 entry->state = BRCMF_FWS_STATE_OPEN; 1044 entry->state = BRCMF_FWS_STATE_OPEN;
967 return BRCMF_FWS_RET_OK_SCHEDULE; 1045 ret = BRCMF_FWS_RET_OK_SCHEDULE;
1046 break;
968 case BRCMF_FWS_TYPE_INTERFACE_CLOSE: 1047 case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
969 entry->state = BRCMF_FWS_STATE_CLOSE; 1048 entry->state = BRCMF_FWS_STATE_CLOSE;
970 return BRCMF_FWS_RET_OK_NOSCHEDULE; 1049 ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
1050 break;
971 default: 1051 default:
972 ret = -EINVAL; 1052 ret = -EINVAL;
973 break; 1053 brcmf_fws_unlock(fws->drvr, flags);
1054 goto fail;
974 } 1055 }
1056 brcmf_fws_unlock(fws->drvr, flags);
1057 return ret;
1058
975fail: 1059fail:
976 fws->stats.if_update_failed++; 1060 fws->stats.if_update_failed++;
977 return ret; 1061 return ret;
@@ -981,6 +1065,7 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
981 u8 *data) 1065 u8 *data)
982{ 1066{
983 struct brcmf_fws_mac_descriptor *entry; 1067 struct brcmf_fws_mac_descriptor *entry;
1068 ulong flags;
984 1069
985 entry = &fws->desc.nodes[data[1] & 0x1F]; 1070 entry = &fws->desc.nodes[data[1] & 0x1F];
986 if (!entry->occupied) { 1071 if (!entry->occupied) {
@@ -991,15 +1076,51 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
991 return -ESRCH; 1076 return -ESRCH;
992 } 1077 }
993 1078
1079 brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
1080 brcmf_fws_get_tlv_name(type), type, entry->name,
1081 data[0], data[2]);
1082 brcmf_fws_lock(fws->drvr, flags);
994 if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) 1083 if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
995 entry->requested_credit = data[0]; 1084 entry->requested_credit = data[0];
996 else 1085 else
997 entry->requested_packet = data[0]; 1086 entry->requested_packet = data[0];
998 1087
999 entry->ac_bitmap = data[2]; 1088 entry->ac_bitmap = data[2];
1089 brcmf_fws_unlock(fws->drvr, flags);
1000 return BRCMF_FWS_RET_OK_SCHEDULE; 1090 return BRCMF_FWS_RET_OK_SCHEDULE;
1001} 1091}
1002 1092
1093static void
1094brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry,
1095 struct sk_buff *skb)
1096{
1097 if (entry->requested_credit > 0) {
1098 entry->requested_credit--;
1099 brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
1100 brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1);
1101 if (entry->state != BRCMF_FWS_STATE_CLOSE)
1102 brcmf_err("requested credit set while mac not closed!\n");
1103 } else if (entry->requested_packet > 0) {
1104 entry->requested_packet--;
1105 brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
1106 brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
1107 if (entry->state != BRCMF_FWS_STATE_CLOSE)
1108 brcmf_err("requested packet set while mac not closed!\n");
1109 } else {
1110 brcmf_skb_if_flags_set_field(skb, REQUESTED, 0);
1111 brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
1112 }
1113}
1114
1115static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb)
1116{
1117 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1118
1119 if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) &&
1120 (entry->state == BRCMF_FWS_STATE_CLOSE))
1121 entry->requested_credit++;
1122}
1123
1003static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, 1124static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
1004 u8 fifo, u8 credits) 1125 u8 fifo, u8 credits)
1005{ 1126{
@@ -1010,6 +1131,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
1010 if (!credits) 1131 if (!credits)
1011 return; 1132 return;
1012 1133
1134 fws->fifo_credit_map |= 1 << fifo;
1135
1013 if ((fifo == BRCMF_FWS_FIFO_AC_BE) && 1136 if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
1014 (fws->credits_borrowed[0])) { 1137 (fws->credits_borrowed[0])) {
1015 for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0; 1138 for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0;
@@ -1031,7 +1154,6 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
1031 } 1154 }
1032 } 1155 }
1033 1156
1034 fws->fifo_credit_map |= 1 << fifo;
1035 fws->fifo_credit[fifo] += credits; 1157 fws->fifo_credit[fifo] += credits;
1036} 1158}
1037 1159
@@ -1042,27 +1164,6 @@ static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
1042 queue_work(fws->fws_wq, &fws->fws_dequeue_work); 1164 queue_work(fws->fws_wq, &fws->fws_dequeue_work);
1043} 1165}
1044 1166
1045static void brcmf_skb_pick_up_credit(struct brcmf_fws_info *fws, int fifo,
1046 struct sk_buff *p)
1047{
1048 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(p)->mac;
1049
1050 if (brcmf_skbcb(p)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
1051 if (fws->fcmode != BRCMF_FWS_FCMODE_IMPLIED_CREDIT)
1052 return;
1053 brcmf_fws_return_credits(fws, fifo, 1);
1054 } else {
1055 /*
1056 * if this packet did not count against FIFO credit, it
1057 * must have taken a requested_credit from the destination
1058 * entry (for pspoll etc.)
1059 */
1060 if (!brcmf_skb_if_flags_get_field(p, REQUESTED))
1061 entry->requested_credit++;
1062 }
1063 brcmf_fws_schedule_deq(fws);
1064}
1065
1066static int brcmf_fws_enq(struct brcmf_fws_info *fws, 1167static int brcmf_fws_enq(struct brcmf_fws_info *fws,
1067 enum brcmf_fws_skb_state state, int fifo, 1168 enum brcmf_fws_skb_state state, int fifo,
1068 struct sk_buff *p) 1169 struct sk_buff *p)
@@ -1078,7 +1179,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
1078 return -ENOENT; 1179 return -ENOENT;
1079 } 1180 }
1080 1181
1081 brcmf_dbg(TRACE, "enter: ea=%pM, qlen=%d\n", entry->ea, entry->psq.len); 1182 brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p);
1082 if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { 1183 if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
1083 prec += 1; 1184 prec += 1;
1084 qfull_stat = &fws->stats.supprq_full_error; 1185 qfull_stat = &fws->stats.supprq_full_error;
@@ -1095,14 +1196,12 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
1095 1196
1096 /* update the sk_buff state */ 1197 /* update the sk_buff state */
1097 brcmf_skbcb(p)->state = state; 1198 brcmf_skbcb(p)->state = state;
1098 if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
1099 entry->suppress_count++;
1100 1199
1101 /* 1200 /*
1102 * A packet has been pushed so update traffic 1201 * A packet has been pushed so update traffic
1103 * availability bitmap, if applicable 1202 * availability bitmap, if applicable
1104 */ 1203 */
1105 brcmf_fws_tim_update(fws, entry, fifo); 1204 brcmf_fws_tim_update(fws, entry, fifo, true);
1106 brcmf_fws_flow_control_check(fws, &entry->psq, 1205 brcmf_fws_flow_control_check(fws, &entry->psq,
1107 brcmf_skb_if_flags_get_field(p, INDEX)); 1206 brcmf_skb_if_flags_get_field(p, INDEX));
1108 return 0; 1207 return 0;
@@ -1113,7 +1212,6 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
1113 struct brcmf_fws_mac_descriptor *table; 1212 struct brcmf_fws_mac_descriptor *table;
1114 struct brcmf_fws_mac_descriptor *entry; 1213 struct brcmf_fws_mac_descriptor *entry;
1115 struct sk_buff *p; 1214 struct sk_buff *p;
1116 int use_credit = 1;
1117 int num_nodes; 1215 int num_nodes;
1118 int node_pos; 1216 int node_pos;
1119 int prec_out; 1217 int prec_out;
@@ -1127,7 +1225,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
1127 for (i = 0; i < num_nodes; i++) { 1225 for (i = 0; i < num_nodes; i++) {
1128 entry = &table[(node_pos + i) % num_nodes]; 1226 entry = &table[(node_pos + i) % num_nodes];
1129 if (!entry->occupied || 1227 if (!entry->occupied ||
1130 brcmf_fws_mac_desc_closed(fws, entry, fifo)) 1228 brcmf_fws_macdesc_closed(fws, entry, fifo))
1131 continue; 1229 continue;
1132 1230
1133 if (entry->suppressed) 1231 if (entry->suppressed)
@@ -1137,9 +1235,8 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
1137 p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); 1235 p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out);
1138 if (p == NULL) { 1236 if (p == NULL) {
1139 if (entry->suppressed) { 1237 if (entry->suppressed) {
1140 if (entry->suppr_transit_count > 1238 if (entry->suppr_transit_count)
1141 entry->suppress_count) 1239 continue;
1142 return NULL;
1143 entry->suppressed = false; 1240 entry->suppressed = false;
1144 p = brcmu_pktq_mdeq(&entry->psq, 1241 p = brcmu_pktq_mdeq(&entry->psq,
1145 1 << (fifo * 2), &prec_out); 1242 1 << (fifo * 2), &prec_out);
@@ -1148,26 +1245,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
1148 if (p == NULL) 1245 if (p == NULL)
1149 continue; 1246 continue;
1150 1247
1151 /* did the packet come from suppress sub-queue? */ 1248 brcmf_fws_macdesc_use_req_credit(entry, p);
1152 if (entry->requested_credit > 0) {
1153 entry->requested_credit--;
1154 /*
1155 * if the packet was pulled out while destination is in
1156 * closed state but had a non-zero packets requested,
1157 * then this should not count against the FIFO credit.
1158 * That is due to the fact that the firmware will
1159 * most likely hold onto this packet until a suitable
1160 * time later to push it to the appropriate AC FIFO.
1161 */
1162 if (entry->state == BRCMF_FWS_STATE_CLOSE)
1163 use_credit = 0;
1164 } else if (entry->requested_packet > 0) {
1165 entry->requested_packet--;
1166 brcmf_skb_if_flags_set_field(p, REQUESTED, 1);
1167 if (entry->state == BRCMF_FWS_STATE_CLOSE)
1168 use_credit = 0;
1169 }
1170 brcmf_skb_if_flags_set_field(p, CREDITCHECK, use_credit);
1171 1249
1172 /* move dequeue position to ensure fair round-robin */ 1250 /* move dequeue position to ensure fair round-robin */
1173 fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes; 1251 fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes;
@@ -1179,7 +1257,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
1179 * A packet has been picked up, update traffic 1257 * A packet has been picked up, update traffic
1180 * availability bitmap, if applicable 1258 * availability bitmap, if applicable
1181 */ 1259 */
1182 brcmf_fws_tim_update(fws, entry, fifo); 1260 brcmf_fws_tim_update(fws, entry, fifo, false);
1183 1261
1184 /* 1262 /*
1185 * decrement total enqueued fifo packets and 1263 * decrement total enqueued fifo packets and
@@ -1192,7 +1270,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
1192 } 1270 }
1193 p = NULL; 1271 p = NULL;
1194done: 1272done:
1195 brcmf_dbg(TRACE, "exit: fifo %d skb %p\n", fifo, p); 1273 brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p);
1196 return p; 1274 return p;
1197} 1275}
1198 1276
@@ -1202,22 +1280,26 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
1202 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; 1280 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1203 u32 hslot; 1281 u32 hslot;
1204 int ret; 1282 int ret;
1283 u8 ifidx;
1205 1284
1206 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); 1285 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
1207 1286
1208 /* this packet was suppressed */ 1287 /* this packet was suppressed */
1209 if (!entry->suppressed || entry->generation != genbit) { 1288 if (!entry->suppressed) {
1210 entry->suppressed = true; 1289 entry->suppressed = true;
1211 entry->suppress_count = brcmu_pktq_mlen(&entry->psq,
1212 1 << (fifo * 2 + 1));
1213 entry->suppr_transit_count = entry->transit_count; 1290 entry->suppr_transit_count = entry->transit_count;
1291 brcmf_dbg(DATA, "suppress %s: transit %d\n",
1292 entry->name, entry->transit_count);
1214 } 1293 }
1215 1294
1216 entry->generation = genbit; 1295 entry->generation = genbit;
1217 1296
1218 ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb); 1297 ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
1298 if (ret == 0)
1299 ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
1300 skb);
1219 if (ret != 0) { 1301 if (ret != 0) {
1220 /* suppress q is full, drop this packet */ 1302 /* suppress q is full or hdrpull failed, drop this packet */
1221 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, 1303 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
1222 true); 1304 true);
1223 } else { 1305 } else {
@@ -1225,26 +1307,24 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
1225 * Mark suppressed to avoid a double free during 1307 * Mark suppressed to avoid a double free during
1226 * wlfc cleanup 1308 * wlfc cleanup
1227 */ 1309 */
1228 brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot, 1310 brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
1229 genbit);
1230 entry->suppress_count++;
1231 } 1311 }
1232 1312
1233 return ret; 1313 return ret;
1234} 1314}
1235 1315
1236static int 1316static int
1237brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, 1317brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1238 u32 genbit) 1318 u32 genbit)
1239{ 1319{
1240 u32 fifo; 1320 u32 fifo;
1241 int ret; 1321 int ret;
1242 bool remove_from_hanger = true; 1322 bool remove_from_hanger = true;
1243 struct sk_buff *skb; 1323 struct sk_buff *skb;
1324 struct brcmf_skbuff_cb *skcb;
1244 struct brcmf_fws_mac_descriptor *entry = NULL; 1325 struct brcmf_fws_mac_descriptor *entry = NULL;
1245 1326
1246 brcmf_dbg(TRACE, "status: flags=0x%X, hslot=%d\n", 1327 brcmf_dbg(DATA, "flags %d\n", flags);
1247 flags, hslot);
1248 1328
1249 if (flags == BRCMF_FWS_TXSTATUS_DISCARD) 1329 if (flags == BRCMF_FWS_TXSTATUS_DISCARD)
1250 fws->stats.txs_discard++; 1330 fws->stats.txs_discard++;
@@ -1256,6 +1336,8 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1256 remove_from_hanger = false; 1336 remove_from_hanger = false;
1257 } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED) 1337 } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
1258 fws->stats.txs_tossed++; 1338 fws->stats.txs_tossed++;
1339 else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)
1340 fws->stats.txs_host_tossed++;
1259 else 1341 else
1260 brcmf_err("unexpected txstatus\n"); 1342 brcmf_err("unexpected txstatus\n");
1261 1343
@@ -1266,32 +1348,42 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1266 return ret; 1348 return ret;
1267 } 1349 }
1268 1350
1269 entry = brcmf_skbcb(skb)->mac; 1351 skcb = brcmf_skbcb(skb);
1352 entry = skcb->mac;
1270 if (WARN_ON(!entry)) { 1353 if (WARN_ON(!entry)) {
1271 brcmu_pkt_buf_free_skb(skb); 1354 brcmu_pkt_buf_free_skb(skb);
1272 return -EINVAL; 1355 return -EINVAL;
1273 } 1356 }
1357 entry->transit_count--;
1358 if (entry->suppressed && entry->suppr_transit_count)
1359 entry->suppr_transit_count--;
1360
1361 brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags,
1362 skcb->htod);
1274 1363
1275 /* pick up the implicit credit from this packet */ 1364 /* pick up the implicit credit from this packet */
1276 fifo = brcmf_skb_htod_tag_get_field(skb, FIFO); 1365 fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
1277 brcmf_skb_pick_up_credit(fws, fifo, skb); 1366 if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) ||
1367 (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) ||
1368 (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) {
1369 brcmf_fws_return_credits(fws, fifo, 1);
1370 brcmf_fws_schedule_deq(fws);
1371 }
1372 brcmf_fws_macdesc_return_req_credit(skb);
1278 1373
1279 if (!remove_from_hanger) 1374 if (!remove_from_hanger)
1280 ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit); 1375 ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit);
1281 1376
1282 if (remove_from_hanger || ret) { 1377 if (remove_from_hanger || ret)
1283 entry->transit_count--;
1284 if (entry->suppressed)
1285 entry->suppr_transit_count--;
1286
1287 brcmf_txfinalize(fws->drvr, skb, true); 1378 brcmf_txfinalize(fws->drvr, skb, true);
1288 } 1379
1289 return 0; 1380 return 0;
1290} 1381}
1291 1382
1292static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, 1383static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
1293 u8 *data) 1384 u8 *data)
1294{ 1385{
1386 ulong flags;
1295 int i; 1387 int i;
1296 1388
1297 if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) { 1389 if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
@@ -1299,17 +1391,20 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
1299 return BRCMF_FWS_RET_OK_NOSCHEDULE; 1391 return BRCMF_FWS_RET_OK_NOSCHEDULE;
1300 } 1392 }
1301 1393
1302 brcmf_dbg(TRACE, "enter: data %pM\n", data); 1394 brcmf_dbg(DATA, "enter: data %pM\n", data);
1395 brcmf_fws_lock(fws->drvr, flags);
1303 for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++) 1396 for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
1304 brcmf_fws_return_credits(fws, i, data[i]); 1397 brcmf_fws_return_credits(fws, i, data[i]);
1305 1398
1306 brcmf_dbg(INFO, "map: credit %x delay %x\n", fws->fifo_credit_map, 1399 brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
1307 fws->fifo_delay_map); 1400 fws->fifo_delay_map);
1401 brcmf_fws_unlock(fws->drvr, flags);
1308 return BRCMF_FWS_RET_OK_SCHEDULE; 1402 return BRCMF_FWS_RET_OK_SCHEDULE;
1309} 1403}
1310 1404
1311static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) 1405static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
1312{ 1406{
1407 ulong lflags;
1313 __le32 status_le; 1408 __le32 status_le;
1314 u32 status; 1409 u32 status;
1315 u32 hslot; 1410 u32 hslot;
@@ -1323,7 +1418,10 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
1323 hslot = brcmf_txstatus_get_field(status, HSLOT); 1418 hslot = brcmf_txstatus_get_field(status, HSLOT);
1324 genbit = brcmf_txstatus_get_field(status, GENERATION); 1419 genbit = brcmf_txstatus_get_field(status, GENERATION);
1325 1420
1326 return brcmf_fws_txstatus_process(fws, flags, hslot, genbit); 1421 brcmf_fws_lock(fws->drvr, lflags);
1422 brcmf_fws_txs_process(fws, flags, hslot, genbit);
1423 brcmf_fws_unlock(fws->drvr, lflags);
1424 return BRCMF_FWS_RET_OK_NOSCHEDULE;
1327} 1425}
1328 1426
1329static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data) 1427static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
@@ -1331,26 +1429,11 @@ static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
1331 __le32 timestamp; 1429 __le32 timestamp;
1332 1430
1333 memcpy(&timestamp, &data[2], sizeof(timestamp)); 1431 memcpy(&timestamp, &data[2], sizeof(timestamp));
1334 brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1], 1432 brcmf_dbg(CTL, "received: seq %d, timestamp %d\n", data[1],
1335 le32_to_cpu(timestamp)); 1433 le32_to_cpu(timestamp));
1336 return 0; 1434 return 0;
1337} 1435}
1338 1436
1339/* using macro so sparse checking does not complain
1340 * about locking imbalance.
1341 */
1342#define brcmf_fws_lock(drvr, flags) \
1343do { \
1344 flags = 0; \
1345 spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
1346} while (0)
1347
1348/* using macro so sparse checking does not complain
1349 * about locking imbalance.
1350 */
1351#define brcmf_fws_unlock(drvr, flags) \
1352 spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
1353
1354static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, 1437static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
1355 const struct brcmf_event_msg *e, 1438 const struct brcmf_event_msg *e,
1356 void *data) 1439 void *data)
@@ -1364,6 +1447,10 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
1364 brcmf_err("event payload too small (%d)\n", e->datalen); 1447 brcmf_err("event payload too small (%d)\n", e->datalen);
1365 return -EINVAL; 1448 return -EINVAL;
1366 } 1449 }
1450 if (fws->creditmap_received)
1451 return 0;
1452
1453 fws->creditmap_received = true;
1367 1454
1368 brcmf_dbg(TRACE, "enter: credits %pM\n", credits); 1455 brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
1369 brcmf_fws_lock(ifp->drvr, flags); 1456 brcmf_fws_lock(ifp->drvr, flags);
@@ -1379,11 +1466,24 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
1379 return 0; 1466 return 0;
1380} 1467}
1381 1468
1469static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
1470 const struct brcmf_event_msg *e,
1471 void *data)
1472{
1473 struct brcmf_fws_info *fws = ifp->drvr->fws;
1474 ulong flags;
1475
1476 brcmf_fws_lock(ifp->drvr, flags);
1477 if (fws)
1478 fws->bcmc_credit_check = true;
1479 brcmf_fws_unlock(ifp->drvr, flags);
1480 return 0;
1481}
1482
1382int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, 1483int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1383 struct sk_buff *skb) 1484 struct sk_buff *skb)
1384{ 1485{
1385 struct brcmf_fws_info *fws = drvr->fws; 1486 struct brcmf_fws_info *fws = drvr->fws;
1386 ulong flags;
1387 u8 *signal_data; 1487 u8 *signal_data;
1388 s16 data_len; 1488 s16 data_len;
1389 u8 type; 1489 u8 type;
@@ -1392,7 +1492,7 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1392 s32 status; 1492 s32 status;
1393 s32 err; 1493 s32 err;
1394 1494
1395 brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n", 1495 brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n",
1396 ifidx, skb->len, signal_len); 1496 ifidx, skb->len, signal_len);
1397 1497
1398 WARN_ON(signal_len > skb->len); 1498 WARN_ON(signal_len > skb->len);
@@ -1403,9 +1503,6 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1403 return 0; 1503 return 0;
1404 } 1504 }
1405 1505
1406 /* lock during tlv parsing */
1407 brcmf_fws_lock(drvr, flags);
1408
1409 fws->stats.header_pulls++; 1506 fws->stats.header_pulls++;
1410 data_len = signal_len; 1507 data_len = signal_len;
1411 signal_data = skb->data; 1508 signal_data = skb->data;
@@ -1426,14 +1523,15 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1426 len = signal_data[1]; 1523 len = signal_data[1];
1427 data = signal_data + 2; 1524 data = signal_data + 2;
1428 1525
1429 brcmf_dbg(INFO, "tlv type=%d (%s), len=%d, data[0]=%d\n", type, 1526 brcmf_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n",
1430 brcmf_fws_get_tlv_name(type), len, *data); 1527 brcmf_fws_get_tlv_name(type), type, len,
1528 brcmf_fws_get_tlv_len(fws, type));
1431 1529
1432 /* abort parsing when length invalid */ 1530 /* abort parsing when length invalid */
1433 if (data_len < len + 2) 1531 if (data_len < len + 2)
1434 break; 1532 break;
1435 1533
1436 if (len != brcmf_fws_get_tlv_len(fws, type)) 1534 if (len < brcmf_fws_get_tlv_len(fws, type))
1437 break; 1535 break;
1438 1536
1439 err = BRCMF_FWS_RET_OK_NOSCHEDULE; 1537 err = BRCMF_FWS_RET_OK_NOSCHEDULE;
@@ -1498,203 +1596,74 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1498 if (skb->len == 0) 1596 if (skb->len == 0)
1499 fws->stats.header_only_pkt++; 1597 fws->stats.header_only_pkt++;
1500 1598
1501 brcmf_fws_unlock(drvr, flags);
1502 return 0;
1503}
1504
1505static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
1506{
1507 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1508 u8 *wlh;
1509 u16 data_offset = 0;
1510 u8 fillers;
1511 __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
1512
1513 brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u, pkttag=0x%08X\n",
1514 entry->ea, entry->interface_id, le32_to_cpu(pkttag));
1515 if (entry->send_tim_signal)
1516 data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
1517
1518 /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
1519 data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
1520 fillers = round_up(data_offset, 4) - data_offset;
1521 data_offset += fillers;
1522
1523 skb_push(skb, data_offset);
1524 wlh = skb->data;
1525
1526 wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
1527 wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
1528 memcpy(&wlh[2], &pkttag, sizeof(pkttag));
1529 wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
1530
1531 if (entry->send_tim_signal) {
1532 entry->send_tim_signal = 0;
1533 wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
1534 wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
1535 wlh[2] = entry->mac_handle;
1536 wlh[3] = entry->traffic_pending_bmp;
1537 wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
1538 entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
1539 }
1540 if (fillers)
1541 memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
1542
1543 brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
1544 data_offset >> 2, skb);
1545 return 0; 1599 return 0;
1546} 1600}
1547 1601
1548static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, 1602static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
1549 struct sk_buff *p) 1603 struct sk_buff *p)
1550{ 1604{
1551 struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p); 1605 struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
1552 struct brcmf_fws_mac_descriptor *entry = skcb->mac; 1606 struct brcmf_fws_mac_descriptor *entry = skcb->mac;
1553 int rc = 0;
1554 bool header_needed;
1555 int hslot = BRCMF_FWS_HANGER_MAXITEMS;
1556 u8 free_ctr;
1557 u8 ifidx;
1558 u8 flags; 1607 u8 flags;
1559 1608
1560 header_needed = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED;
1561
1562 if (header_needed) {
1563 /* obtaining free slot may fail, but that will be caught
1564 * by the hanger push. This assures the packet has a BDC
1565 * header upon return.
1566 */
1567 hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
1568 free_ctr = entry->seq[fifo];
1569 brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
1570 brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr);
1571 brcmf_skb_htod_tag_set_field(p, GENERATION, 1);
1572 entry->transit_count++;
1573 }
1574 brcmf_skb_if_flags_set_field(p, TRANSMIT, 1); 1609 brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
1575 brcmf_skb_htod_tag_set_field(p, FIFO, fifo); 1610 brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
1576
1577 flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST; 1611 flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
1578 if (!(skcb->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) { 1612 if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
1579 /* 1613 /*
1580 Indicate that this packet is being sent in response to an 1614 * Indicate that this packet is being sent in response to an
1581 explicit request from the firmware side. 1615 * explicit request from the firmware side.
1582 */ 1616 */
1583 flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED; 1617 flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
1584 } 1618 }
1585 brcmf_skb_htod_tag_set_field(p, FLAGS, flags); 1619 brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
1586 if (header_needed) { 1620 brcmf_fws_hdrpush(fws, p);
1587 brcmf_fws_hdrpush(fws, p);
1588 rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
1589 if (rc)
1590 brcmf_err("hanger push failed: rc=%d\n", rc);
1591 } else {
1592 int gen;
1593
1594 /* remove old header */
1595 rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, p);
1596 if (rc == 0) {
1597 hslot = brcmf_skb_htod_tag_get_field(p, HSLOT);
1598 brcmf_fws_hanger_get_genbit(&fws->hanger, p,
1599 hslot, &gen);
1600 brcmf_skb_htod_tag_set_field(p, GENERATION, gen);
1601
1602 /* push new header */
1603 brcmf_fws_hdrpush(fws, p);
1604 }
1605 }
1606
1607 return rc;
1608} 1621}
1609 1622
1610static void 1623static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
1611brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) 1624 struct sk_buff *skb, int fifo)
1612{ 1625{
1613 /*
1614 put the packet back to the head of queue
1615
1616 - suppressed packet goes back to suppress sub-queue
1617 - pull out the header, if new or delayed packet
1618
1619 Note: hslot is used only when header removal is done.
1620 */
1621 struct brcmf_fws_mac_descriptor *entry; 1626 struct brcmf_fws_mac_descriptor *entry;
1622 enum brcmf_fws_skb_state state;
1623 struct sk_buff *pktout; 1627 struct sk_buff *pktout;
1628 int qidx, hslot;
1624 int rc = 0; 1629 int rc = 0;
1625 int fifo;
1626 int hslot;
1627 u8 ifidx;
1628 1630
1629 fifo = brcmf_skb_if_flags_get_field(skb, FIFO);
1630 state = brcmf_skbcb(skb)->state;
1631 entry = brcmf_skbcb(skb)->mac; 1631 entry = brcmf_skbcb(skb)->mac;
1632 1632 if (entry->occupied) {
1633 if (entry != NULL) { 1633 qidx = 2 * fifo;
1634 if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { 1634 if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
1635 /* wl-header is saved for suppressed packets */ 1635 qidx++;
1636 pktout = brcmu_pktq_penq_head(&entry->psq, 2 * fifo + 1, 1636
1637 skb); 1637 pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb);
1638 if (pktout == NULL) { 1638 if (pktout == NULL) {
1639 brcmf_err("suppress queue full\n"); 1639 brcmf_err("%s queue %d full\n", entry->name, qidx);
1640 rc = -ENOSPC; 1640 rc = -ENOSPC;
1641 }
1642 } else {
1643 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
1644
1645 /* remove header first */
1646 rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
1647 if (rc) {
1648 brcmf_err("header removal failed\n");
1649 /* free the hanger slot */
1650 brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
1651 &pktout, true);
1652 rc = -EINVAL;
1653 goto fail;
1654 }
1655
1656 /* delay-q packets are going to delay-q */
1657 pktout = brcmu_pktq_penq_head(&entry->psq,
1658 2 * fifo, skb);
1659 if (pktout == NULL) {
1660 brcmf_err("delay queue full\n");
1661 rc = -ENOSPC;
1662 }
1663
1664 /* free the hanger slot */
1665 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &pktout,
1666 true);
1667
1668 /* decrement sequence count */
1669 entry->seq[fifo]--;
1670 } 1641 }
1671 /*
1672 if this packet did not count against FIFO credit, it must have
1673 taken a requested_credit from the firmware (for pspoll etc.)
1674 */
1675 if (!(brcmf_skbcb(skb)->if_flags &
1676 BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK))
1677 entry->requested_credit++;
1678 } else { 1642 } else {
1679 brcmf_err("no mac entry linked\n"); 1643 brcmf_err("%s entry removed\n", entry->name);
1680 rc = -ENOENT; 1644 rc = -ENOENT;
1681 } 1645 }
1682 1646
1683
1684fail:
1685 if (rc) { 1647 if (rc) {
1686 brcmf_txfinalize(fws->drvr, skb, false);
1687 fws->stats.rollback_failed++; 1648 fws->stats.rollback_failed++;
1688 } else 1649 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
1650 brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
1651 hslot, 0);
1652 } else {
1689 fws->stats.rollback_success++; 1653 fws->stats.rollback_success++;
1654 brcmf_fws_return_credits(fws, fifo, 1);
1655 brcmf_fws_macdesc_return_req_credit(skb);
1656 }
1690} 1657}
1691 1658
1692static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) 1659static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
1693{ 1660{
1694 int lender_ac; 1661 int lender_ac;
1695 1662
1696 if (time_after(fws->borrow_defer_timestamp, jiffies)) 1663 if (time_after(fws->borrow_defer_timestamp, jiffies)) {
1664 fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
1697 return -ENAVAIL; 1665 return -ENAVAIL;
1666 }
1698 1667
1699 for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) { 1668 for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
1700 if (fws->fifo_credit[lender_ac]) { 1669 if (fws->fifo_credit[lender_ac]) {
@@ -1702,66 +1671,15 @@ static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
1702 fws->fifo_credit[lender_ac]--; 1671 fws->fifo_credit[lender_ac]--;
1703 if (fws->fifo_credit[lender_ac] == 0) 1672 if (fws->fifo_credit[lender_ac] == 0)
1704 fws->fifo_credit_map &= ~(1 << lender_ac); 1673 fws->fifo_credit_map &= ~(1 << lender_ac);
1705 brcmf_dbg(TRACE, "borrow credit from: %d\n", lender_ac); 1674 fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE);
1675 brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac);
1706 return 0; 1676 return 0;
1707 } 1677 }
1708 } 1678 }
1679 fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
1709 return -ENAVAIL; 1680 return -ENAVAIL;
1710} 1681}
1711 1682
1712static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo,
1713 struct sk_buff *skb)
1714{
1715 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1716 int *credit = &fws->fifo_credit[fifo];
1717 int use_credit = 1;
1718
1719 brcmf_dbg(TRACE, "enter: ac=%d, credits=%d\n", fifo, *credit);
1720
1721 if (entry->requested_credit > 0) {
1722 /*
1723 * if the packet was pulled out while destination is in
1724 * closed state but had a non-zero packets requested,
1725 * then this should not count against the FIFO credit.
1726 * That is due to the fact that the firmware will
1727 * most likely hold onto this packet until a suitable
1728 * time later to push it to the appropriate AC FIFO.
1729 */
1730 entry->requested_credit--;
1731 if (entry->state == BRCMF_FWS_STATE_CLOSE)
1732 use_credit = 0;
1733 } else if (entry->requested_packet > 0) {
1734 entry->requested_packet--;
1735 brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
1736 if (entry->state == BRCMF_FWS_STATE_CLOSE)
1737 use_credit = 0;
1738 }
1739 brcmf_skb_if_flags_set_field(skb, CREDITCHECK, use_credit);
1740 if (!use_credit) {
1741 brcmf_dbg(TRACE, "exit: no creditcheck set\n");
1742 return 0;
1743 }
1744
1745 if (fifo != BRCMF_FWS_FIFO_AC_BE)
1746 fws->borrow_defer_timestamp = jiffies +
1747 BRCMF_FWS_BORROW_DEFER_PERIOD;
1748
1749 if (!(*credit)) {
1750 /* Try to borrow a credit from other queue */
1751 if (fifo == BRCMF_FWS_FIFO_AC_BE &&
1752 brcmf_fws_borrow_credit(fws) == 0)
1753 return 0;
1754
1755 brcmf_dbg(TRACE, "exit: ac=%d, credits depleted\n", fifo);
1756 return -ENAVAIL;
1757 }
1758 (*credit)--;
1759 if (!(*credit))
1760 fws->fifo_credit_map &= ~(1 << fifo);
1761 brcmf_dbg(TRACE, "exit: ac=%d, credits=%d\n", fifo, *credit);
1762 return 0;
1763}
1764
1765static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo, 1683static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
1766 struct sk_buff *skb) 1684 struct sk_buff *skb)
1767{ 1685{
@@ -1769,32 +1687,51 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
1769 struct brcmf_fws_mac_descriptor *entry; 1687 struct brcmf_fws_mac_descriptor *entry;
1770 struct brcmf_bus *bus = fws->drvr->bus_if; 1688 struct brcmf_bus *bus = fws->drvr->bus_if;
1771 int rc; 1689 int rc;
1690 u8 ifidx;
1772 1691
1773 entry = skcb->mac; 1692 entry = skcb->mac;
1774 if (IS_ERR(entry)) 1693 if (IS_ERR(entry))
1775 return PTR_ERR(entry); 1694 return PTR_ERR(entry);
1776 1695
1777 rc = brcmf_fws_precommit_skb(fws, fifo, skb); 1696 brcmf_fws_precommit_skb(fws, fifo, skb);
1697 rc = brcmf_bus_txdata(bus, skb);
1698 brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
1699 skcb->if_flags, skcb->htod, rc);
1778 if (rc < 0) { 1700 if (rc < 0) {
1779 fws->stats.generic_error++; 1701 brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
1780 goto rollback; 1702 goto rollback;
1781 } 1703 }
1782 1704
1783 rc = brcmf_bus_txdata(bus, skb); 1705 entry->transit_count++;
1784 if (rc < 0) 1706 if (entry->suppressed)
1785 goto rollback; 1707 entry->suppr_transit_count++;
1786
1787 entry->seq[fifo]++;
1788 fws->stats.pkt2bus++; 1708 fws->stats.pkt2bus++;
1789 if (brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) { 1709 fws->stats.send_pkts[fifo]++;
1790 fws->stats.send_pkts[fifo]++; 1710 if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
1791 fws->stats.fifo_credits_sent[fifo]++; 1711 fws->stats.requested_sent[fifo]++;
1792 }
1793 1712
1794 return rc; 1713 return rc;
1795 1714
1796rollback: 1715rollback:
1797 brcmf_fws_rollback_toq(fws, skb); 1716 brcmf_fws_rollback_toq(fws, skb, fifo);
1717 return rc;
1718}
1719
1720static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
1721 int fifo)
1722{
1723 struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
1724 int rc, hslot;
1725
1726 hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
1727 brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
1728 brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]);
1729 brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
1730 rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
1731 if (!rc)
1732 skcb->mac->seq[fifo]++;
1733 else
1734 fws->stats.generic_error++;
1798 return rc; 1735 return rc;
1799} 1736}
1800 1737
@@ -1826,29 +1763,25 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1826 1763
1827 /* set control buffer information */ 1764 /* set control buffer information */
1828 skcb->if_flags = 0; 1765 skcb->if_flags = 0;
1829 skcb->mac = brcmf_fws_find_mac_desc(fws, ifp, eh->h_dest);
1830 skcb->state = BRCMF_FWS_SKBSTATE_NEW; 1766 skcb->state = BRCMF_FWS_SKBSTATE_NEW;
1831 brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx); 1767 brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
1832 if (!multicast) 1768 if (!multicast)
1833 fifo = brcmf_fws_prio2fifo[skb->priority]; 1769 fifo = brcmf_fws_prio2fifo[skb->priority];
1834 brcmf_skb_if_flags_set_field(skb, FIFO, fifo);
1835
1836 brcmf_dbg(TRACE, "ea=%pM, multi=%d, fifo=%d\n", eh->h_dest,
1837 multicast, fifo);
1838 1770
1839 brcmf_fws_lock(drvr, flags); 1771 brcmf_fws_lock(drvr, flags);
1840 if (skcb->mac->suppressed || 1772 if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
1841 brcmf_fws_mac_desc_closed(fws, skcb->mac, fifo) || 1773 fws->borrow_defer_timestamp = jiffies +
1842 brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) || 1774 BRCMF_FWS_BORROW_DEFER_PERIOD;
1843 (!multicast && 1775
1844 brcmf_fws_consume_credit(fws, fifo, skb) < 0)) { 1776 skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest);
1845 /* enqueue the packet in delayQ */ 1777 brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name,
1846 drvr->fws->fifo_delay_map |= 1 << fifo; 1778 eh->h_dest, multicast, fifo);
1779 if (!brcmf_fws_assign_htod(fws, skb, fifo)) {
1847 brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); 1780 brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
1781 brcmf_fws_schedule_deq(fws);
1848 } else { 1782 } else {
1849 if (brcmf_fws_commit_skb(fws, fifo, skb)) 1783 brcmf_err("drop skb: no hanger slot\n");
1850 if (!multicast) 1784 brcmu_pkt_buf_free_skb(skb);
1851 brcmf_skb_pick_up_credit(fws, fifo, skb);
1852 } 1785 }
1853 brcmf_fws_unlock(drvr, flags); 1786 brcmf_fws_unlock(drvr, flags);
1854 return 0; 1787 return 0;
@@ -1862,7 +1795,7 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp)
1862 if (!entry) 1795 if (!entry)
1863 return; 1796 return;
1864 1797
1865 brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx); 1798 brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx);
1866} 1799}
1867 1800
1868void brcmf_fws_add_interface(struct brcmf_if *ifp) 1801void brcmf_fws_add_interface(struct brcmf_if *ifp)
@@ -1870,16 +1803,16 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
1870 struct brcmf_fws_info *fws = ifp->drvr->fws; 1803 struct brcmf_fws_info *fws = ifp->drvr->fws;
1871 struct brcmf_fws_mac_descriptor *entry; 1804 struct brcmf_fws_mac_descriptor *entry;
1872 1805
1873 brcmf_dbg(TRACE, "enter: idx=%d, mac=%pM\n",
1874 ifp->bssidx, ifp->mac_addr);
1875 if (!ifp->ndev || !ifp->drvr->fw_signals) 1806 if (!ifp->ndev || !ifp->drvr->fw_signals)
1876 return; 1807 return;
1877 1808
1878 entry = &fws->desc.iface[ifp->ifidx]; 1809 entry = &fws->desc.iface[ifp->ifidx];
1879 ifp->fws_desc = entry; 1810 ifp->fws_desc = entry;
1880 brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx); 1811 brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx);
1812 brcmf_fws_macdesc_set_name(fws, entry);
1881 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, 1813 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
1882 BRCMF_FWS_PSQ_LEN); 1814 BRCMF_FWS_PSQ_LEN);
1815 brcmf_dbg(TRACE, "added %s\n", entry->name);
1883} 1816}
1884 1817
1885void brcmf_fws_del_interface(struct brcmf_if *ifp) 1818void brcmf_fws_del_interface(struct brcmf_if *ifp)
@@ -1887,13 +1820,13 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
1887 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; 1820 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
1888 ulong flags; 1821 ulong flags;
1889 1822
1890 brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
1891 if (!entry) 1823 if (!entry)
1892 return; 1824 return;
1893 1825
1894 brcmf_fws_lock(ifp->drvr, flags); 1826 brcmf_fws_lock(ifp->drvr, flags);
1895 ifp->fws_desc = NULL; 1827 ifp->fws_desc = NULL;
1896 brcmf_fws_clear_mac_descriptor(entry); 1828 brcmf_dbg(TRACE, "deleting %s\n", entry->name);
1829 brcmf_fws_macdesc_deinit(entry);
1897 brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); 1830 brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
1898 brcmf_fws_unlock(ifp->drvr, flags); 1831 brcmf_fws_unlock(ifp->drvr, flags);
1899} 1832}
@@ -1904,39 +1837,37 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
1904 struct sk_buff *skb; 1837 struct sk_buff *skb;
1905 ulong flags; 1838 ulong flags;
1906 int fifo; 1839 int fifo;
1907 int credit;
1908 1840
1909 fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); 1841 fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
1910 1842
1911 brcmf_dbg(TRACE, "enter: fws=%p\n", fws);
1912 brcmf_fws_lock(fws->drvr, flags); 1843 brcmf_fws_lock(fws->drvr, flags);
1913 for (fifo = NL80211_NUM_ACS; fifo >= 0; fifo--) { 1844 for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
1914 brcmf_dbg(TRACE, "fifo %d credit %d\n", fifo, 1845 fifo--) {
1915 fws->fifo_credit[fifo]); 1846 while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
1916 for (credit = 0; credit < fws->fifo_credit[fifo]; /* nop */) { 1847 (fifo == BRCMF_FWS_FIFO_BCMC))) {
1917 skb = brcmf_fws_deq(fws, fifo); 1848 skb = brcmf_fws_deq(fws, fifo);
1918 if (!skb || brcmf_fws_commit_skb(fws, fifo, skb)) 1849 if (!skb)
1850 break;
1851 fws->fifo_credit[fifo]--;
1852 if (brcmf_fws_commit_skb(fws, fifo, skb))
1853 break;
1854 if (fws->bus_flow_blocked)
1919 break; 1855 break;
1920 if (brcmf_skbcb(skb)->if_flags &
1921 BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)
1922 credit++;
1923 } 1856 }
1924 if ((fifo == BRCMF_FWS_FIFO_AC_BE) && 1857 if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
1925 (credit == fws->fifo_credit[fifo])) { 1858 (fws->fifo_credit[fifo] == 0) &&
1926 fws->fifo_credit[fifo] -= credit; 1859 (!fws->bus_flow_blocked)) {
1927 while (brcmf_fws_borrow_credit(fws) == 0) { 1860 while (brcmf_fws_borrow_credit(fws) == 0) {
1928 skb = brcmf_fws_deq(fws, fifo); 1861 skb = brcmf_fws_deq(fws, fifo);
1929 if (!skb) { 1862 if (!skb) {
1930 brcmf_fws_return_credits(fws, fifo, 1); 1863 brcmf_fws_return_credits(fws, fifo, 1);
1931 break; 1864 break;
1932 } 1865 }
1933 if (brcmf_fws_commit_skb(fws, fifo, skb)) { 1866 if (brcmf_fws_commit_skb(fws, fifo, skb))
1934 brcmf_fws_return_credits(fws, fifo, 1); 1867 break;
1868 if (fws->bus_flow_blocked)
1935 break; 1869 break;
1936 }
1937 } 1870 }
1938 } else {
1939 fws->fifo_credit[fifo] -= credit;
1940 } 1871 }
1941 } 1872 }
1942 brcmf_fws_unlock(fws->drvr, flags); 1873 brcmf_fws_unlock(fws->drvr, flags);
@@ -1982,6 +1913,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
1982 brcmf_err("register credit map handler failed\n"); 1913 brcmf_err("register credit map handler failed\n");
1983 goto fail; 1914 goto fail;
1984 } 1915 }
1916 rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT,
1917 brcmf_fws_notify_bcmc_credit_support);
1918 if (rc < 0) {
1919 brcmf_err("register bcmc credit handler failed\n");
1920 brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
1921 goto fail;
1922 }
1985 1923
1986 /* setting the iovar may fail if feature is unsupported 1924 /* setting the iovar may fail if feature is unsupported
1987 * so leave the rc as is so driver initialization can 1925 * so leave the rc as is so driver initialization can
@@ -1993,19 +1931,20 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
1993 } 1931 }
1994 1932
1995 brcmf_fws_hanger_init(&drvr->fws->hanger); 1933 brcmf_fws_hanger_init(&drvr->fws->hanger);
1996 brcmf_fws_init_mac_descriptor(&drvr->fws->desc.other, NULL, 0); 1934 brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0);
1935 brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other);
1997 brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, 1936 brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
1998 BRCMF_FWS_PSQ_LEN); 1937 BRCMF_FWS_PSQ_LEN);
1999 1938
2000 /* create debugfs file for statistics */ 1939 /* create debugfs file for statistics */
2001 brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats); 1940 brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
2002 1941
2003 /* TODO: remove upon feature delivery */ 1942 brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
2004 brcmf_err("%s bdcv2 tlv signaling [%x]\n",
2005 drvr->fw_signals ? "enabled" : "disabled", tlv); 1943 drvr->fw_signals ? "enabled" : "disabled", tlv);
2006 return 0; 1944 return 0;
2007 1945
2008fail_event: 1946fail_event:
1947 brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT);
2009 brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP); 1948 brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
2010fail: 1949fail:
2011 brcmf_fws_deinit(drvr); 1950 brcmf_fws_deinit(drvr);
@@ -2043,25 +1982,31 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
2043 if (!fws) 1982 if (!fws)
2044 return false; 1983 return false;
2045 1984
2046 brcmf_dbg(TRACE, "enter: mode=%d\n", fws->fcmode);
2047 return fws->fcmode != BRCMF_FWS_FCMODE_NONE; 1985 return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
2048} 1986}
2049 1987
2050void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) 1988void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
2051{ 1989{
2052 ulong flags; 1990 ulong flags;
1991 u32 hslot;
2053 1992
2054 brcmf_fws_lock(fws->drvr, flags); 1993 if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
2055 brcmf_fws_txstatus_process(fws, BRCMF_FWS_TXSTATUS_FW_TOSSED, 1994 brcmu_pkt_buf_free_skb(skb);
2056 brcmf_skb_htod_tag_get_field(skb, HSLOT), 0); 1995 return;
2057 /* the packet never reached firmware so reclaim credit */
2058 if (fws->fcmode == BRCMF_FWS_FCMODE_EXPLICIT_CREDIT &&
2059 brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
2060 brcmf_fws_return_credits(fws,
2061 brcmf_skb_htod_tag_get_field(skb,
2062 FIFO),
2063 1);
2064 brcmf_fws_schedule_deq(fws);
2065 } 1996 }
1997 brcmf_fws_lock(fws->drvr, flags);
1998 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
1999 brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
2066 brcmf_fws_unlock(fws->drvr, flags); 2000 brcmf_fws_unlock(fws->drvr, flags);
2067} 2001}
2002
2003void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
2004{
2005 struct brcmf_fws_info *fws = drvr->fws;
2006
2007 fws->bus_flow_blocked = flow_blocked;
2008 if (!flow_blocked)
2009 brcmf_fws_schedule_deq(fws);
2010 else
2011 fws->stats.bus_flow_block++;
2012}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
index fbe483d23752..9fc860910bd8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
@@ -29,5 +29,6 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp);
29void brcmf_fws_add_interface(struct brcmf_if *ifp); 29void brcmf_fws_add_interface(struct brcmf_if *ifp);
30void brcmf_fws_del_interface(struct brcmf_if *ifp); 30void brcmf_fws_del_interface(struct brcmf_if *ifp);
31void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb); 31void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
32void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
32 33
33#endif /* FWSIGNAL_H_ */ 34#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 7c1b6332747e..09786a539950 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -170,7 +170,6 @@ struct brcmf_sdio_dev {
170 atomic_t suspend; /* suspend flag */ 170 atomic_t suspend; /* suspend flag */
171 wait_queue_head_t request_byte_wait; 171 wait_queue_head_t request_byte_wait;
172 wait_queue_head_t request_word_wait; 172 wait_queue_head_t request_word_wait;
173 wait_queue_head_t request_chain_wait;
174 wait_queue_head_t request_buffer_wait; 173 wait_queue_head_t request_buffer_wait;
175 struct device *dev; 174 struct device *dev;
176 struct brcmf_bus *bus_if; 175 struct brcmf_bus *bus_if;
@@ -230,8 +229,6 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
230#define SDIO_REQ_4BYTE 0x1 229#define SDIO_REQ_4BYTE 0x1
231/* Fixed address (FIFO) (vs. incrementing address) */ 230/* Fixed address (FIFO) (vs. incrementing address) */
232#define SDIO_REQ_FIXED 0x2 231#define SDIO_REQ_FIXED 0x2
233/* Async request (vs. sync request) */
234#define SDIO_REQ_ASYNC 0x4
235 232
236/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only). 233/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
237 * rw: read or write (0/1) 234 * rw: read or write (0/1)
@@ -252,9 +249,6 @@ extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
252extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev); 249extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
253extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev); 250extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
254 251
255extern int brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev,
256 u32 address);
257
258/* attach, return handler on success, NULL if failed. 252/* attach, return handler on success, NULL if failed.
259 * The handler shall be provided by all subsequent calls. No local cache 253 * The handler shall be provided by all subsequent calls. No local cache
260 * cfghdl points to the starting address of pci device mapped memory 254 * cfghdl points to the starting address of pci device mapped memory
@@ -272,16 +266,6 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
272 uint rw, uint fnc, uint addr, 266 uint rw, uint fnc, uint addr,
273 u32 *word, uint nbyte); 267 u32 *word, uint nbyte);
274 268
275/* read or write any buffer using cmd53 */
276extern int
277brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
278 uint fix_inc, uint rw, uint fnc_num, u32 addr,
279 struct sk_buff *pkt);
280extern int
281brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
282 uint write, uint func, uint addr,
283 struct sk_buff_head *pktq);
284
285/* Watchdog timer interface for pm ops */ 269/* Watchdog timer interface for pm ops */
286extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, 270extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
287 bool enable); 271 bool enable);
@@ -291,4 +275,8 @@ extern void brcmf_sdbrcm_disconnect(void *ptr);
291extern void brcmf_sdbrcm_isr(void *arg); 275extern void brcmf_sdbrcm_isr(void *arg);
292 276
293extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick); 277extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
278
279extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
280 wait_queue_head_t *wq);
281extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
294#endif /* _BRCM_SDH_H_ */ 282#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index 9df1f7a681e0..bc2917112899 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -87,6 +87,27 @@ TRACE_EVENT(brcmf_hexdump,
87 TP_printk("hexdump [length=%lu]", __entry->len) 87 TP_printk("hexdump [length=%lu]", __entry->len)
88); 88);
89 89
90TRACE_EVENT(brcmf_bdchdr,
91 TP_PROTO(void *data),
92 TP_ARGS(data),
93 TP_STRUCT__entry(
94 __field(u8, flags)
95 __field(u8, prio)
96 __field(u8, flags2)
97 __field(u32, siglen)
98 __dynamic_array(u8, signal, *((u8 *)data + 3) * 4)
99 ),
100 TP_fast_assign(
101 __entry->flags = *(u8 *)data;
102 __entry->prio = *((u8 *)data + 1);
103 __entry->flags2 = *((u8 *)data + 2);
104 __entry->siglen = *((u8 *)data + 3) * 4;
105 memcpy(__get_dynamic_array(signal),
106 (u8 *)data + 4, __entry->siglen);
107 ),
108 TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
109);
110
90#ifdef CONFIG_BRCM_TRACING 111#ifdef CONFIG_BRCM_TRACING
91 112
92#undef TRACE_INCLUDE_PATH 113#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 01aed7ad6bec..322cadc51ded 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -82,6 +82,7 @@ struct brcmf_usbdev_info {
82 int tx_high_watermark; 82 int tx_high_watermark;
83 int tx_freecount; 83 int tx_freecount;
84 bool tx_flowblock; 84 bool tx_flowblock;
85 spinlock_t tx_flowblock_lock;
85 86
86 struct brcmf_usbreq *tx_reqs; 87 struct brcmf_usbreq *tx_reqs;
87 struct brcmf_usbreq *rx_reqs; 88 struct brcmf_usbreq *rx_reqs;
@@ -411,6 +412,7 @@ static void brcmf_usb_tx_complete(struct urb *urb)
411{ 412{
412 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; 413 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
413 struct brcmf_usbdev_info *devinfo = req->devinfo; 414 struct brcmf_usbdev_info *devinfo = req->devinfo;
415 unsigned long flags;
414 416
415 brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status, 417 brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
416 req->skb); 418 req->skb);
@@ -419,11 +421,13 @@ static void brcmf_usb_tx_complete(struct urb *urb)
419 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); 421 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
420 req->skb = NULL; 422 req->skb = NULL;
421 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); 423 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
424 spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
422 if (devinfo->tx_freecount > devinfo->tx_high_watermark && 425 if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
423 devinfo->tx_flowblock) { 426 devinfo->tx_flowblock) {
424 brcmf_txflowblock(devinfo->dev, false); 427 brcmf_txflowblock(devinfo->dev, false);
425 devinfo->tx_flowblock = false; 428 devinfo->tx_flowblock = false;
426 } 429 }
430 spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
427} 431}
428 432
429static void brcmf_usb_rx_complete(struct urb *urb) 433static void brcmf_usb_rx_complete(struct urb *urb)
@@ -568,6 +572,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
568 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 572 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
569 struct brcmf_usbreq *req; 573 struct brcmf_usbreq *req;
570 int ret; 574 int ret;
575 unsigned long flags;
571 576
572 brcmf_dbg(USB, "Enter, skb=%p\n", skb); 577 brcmf_dbg(USB, "Enter, skb=%p\n", skb);
573 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) { 578 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
@@ -599,11 +604,13 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
599 goto fail; 604 goto fail;
600 } 605 }
601 606
607 spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
602 if (devinfo->tx_freecount < devinfo->tx_low_watermark && 608 if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
603 !devinfo->tx_flowblock) { 609 !devinfo->tx_flowblock) {
604 brcmf_txflowblock(dev, true); 610 brcmf_txflowblock(dev, true);
605 devinfo->tx_flowblock = true; 611 devinfo->tx_flowblock = true;
606 } 612 }
613 spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
607 return 0; 614 return 0;
608 615
609fail: 616fail:
@@ -1164,6 +1171,7 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1164 1171
1165 /* Initialize the spinlocks */ 1172 /* Initialize the spinlocks */
1166 spin_lock_init(&devinfo->qlock); 1173 spin_lock_init(&devinfo->qlock);
1174 spin_lock_init(&devinfo->tx_flowblock_lock);
1167 1175
1168 INIT_LIST_HEAD(&devinfo->rx_freeq); 1176 INIT_LIST_HEAD(&devinfo->rx_freeq);
1169 INIT_LIST_HEAD(&devinfo->rx_postq); 1177 INIT_LIST_HEAD(&devinfo->rx_postq);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 301e572e8923..277b37ae7126 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -3982,6 +3982,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3982 struct brcmf_fil_af_params_le *af_params; 3982 struct brcmf_fil_af_params_le *af_params;
3983 bool ack; 3983 bool ack;
3984 s32 chan_nr; 3984 s32 chan_nr;
3985 u32 freq;
3985 3986
3986 brcmf_dbg(TRACE, "Enter\n"); 3987 brcmf_dbg(TRACE, "Enter\n");
3987 3988
@@ -3994,6 +3995,8 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3994 return -EPERM; 3995 return -EPERM;
3995 } 3996 }
3996 3997
3998 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
3999
3997 if (ieee80211_is_probe_resp(mgmt->frame_control)) { 4000 if (ieee80211_is_probe_resp(mgmt->frame_control)) {
3998 /* Right now the only reason to get a probe response */ 4001 /* Right now the only reason to get a probe response */
3999 /* is for p2p listen response or for p2p GO from */ 4002 /* is for p2p listen response or for p2p GO from */
@@ -4009,7 +4012,6 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
4009 ie_offset = DOT11_MGMT_HDR_LEN + 4012 ie_offset = DOT11_MGMT_HDR_LEN +
4010 DOT11_BCN_PRB_FIXED_LEN; 4013 DOT11_BCN_PRB_FIXED_LEN;
4011 ie_len = len - ie_offset; 4014 ie_len = len - ie_offset;
4012 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
4013 if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) 4015 if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
4014 vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; 4016 vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
4015 err = brcmf_vif_set_mgmt_ie(vif, 4017 err = brcmf_vif_set_mgmt_ie(vif,
@@ -4033,16 +4035,22 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
4033 memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); 4035 memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
4034 /* Add the length exepted for 802.11 header */ 4036 /* Add the length exepted for 802.11 header */
4035 action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); 4037 action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
4036 /* Add the channel */ 4038 /* Add the channel. Use the one specified as parameter if any or
4037 chan_nr = ieee80211_frequency_to_channel(chan->center_freq); 4039 * the current one (got from the firmware) otherwise
4040 */
4041 if (chan)
4042 freq = chan->center_freq;
4043 else
4044 brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
4045 &freq);
4046 chan_nr = ieee80211_frequency_to_channel(freq);
4038 af_params->channel = cpu_to_le32(chan_nr); 4047 af_params->channel = cpu_to_le32(chan_nr);
4039 4048
4040 memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], 4049 memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
4041 le16_to_cpu(action_frame->len)); 4050 le16_to_cpu(action_frame->len));
4042 4051
4043 brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n", 4052 brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
4044 *cookie, le16_to_cpu(action_frame->len), 4053 *cookie, le16_to_cpu(action_frame->len), freq);
4045 chan->center_freq);
4046 4054
4047 ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), 4055 ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
4048 af_params); 4056 af_params);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 1585cc5bf866..bd982856d385 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -900,7 +900,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
900 if (supr_status) { 900 if (supr_status) {
901 update_rate = false; 901 update_rate = false;
902 if (supr_status == TX_STATUS_SUPR_BADCH) { 902 if (supr_status == TX_STATUS_SUPR_BADCH) {
903 brcms_err(wlc->hw->d11core, 903 brcms_dbg_ht(wlc->hw->d11core,
904 "%s: Pkt tx suppressed, illegal channel possibly %d\n", 904 "%s: Pkt tx suppressed, illegal channel possibly %d\n",
905 __func__, CHSPEC_CHANNEL( 905 __func__, CHSPEC_CHANNEL(
906 wlc->default_bss->chanspec)); 906 wlc->default_bss->chanspec));
diff --git a/drivers/net/wireless/cw1200/Kconfig b/drivers/net/wireless/cw1200/Kconfig
new file mode 100644
index 000000000000..0880742eab17
--- /dev/null
+++ b/drivers/net/wireless/cw1200/Kconfig
@@ -0,0 +1,30 @@
1config CW1200
2 tristate "CW1200 WLAN support"
3 depends on MAC80211 && CFG80211
4 help
5 This is a driver for the ST-E CW1100 & CW1200 WLAN chipsets.
6 This option just enables the driver core, see below for
7 specific bus support.
8
9if CW1200
10
11config CW1200_WLAN_SDIO
12 tristate "Support SDIO platforms"
13 depends on CW1200 && MMC
14 help
15 Enable support for the CW1200 connected via an SDIO bus.
16 By default this driver only supports the Sagrad SG901-1091/1098 EVK
17 and similar designs that utilize a hardware reset circuit. To
18 support different CW1200 SDIO designs you will need to override
19 the default platform data by calling cw1200_sdio_set_platform_data()
20 in your board setup file.
21
22config CW1200_WLAN_SPI
23 tristate "Support SPI platforms"
24 depends on CW1200 && SPI
25 help
26 Enables support for the CW1200 connected via a SPI bus. You will
27 need to add appropriate platform data glue in your board setup
28 file.
29
30endif
diff --git a/drivers/net/wireless/cw1200/Makefile b/drivers/net/wireless/cw1200/Makefile
new file mode 100644
index 000000000000..b086aac6547a
--- /dev/null
+++ b/drivers/net/wireless/cw1200/Makefile
@@ -0,0 +1,21 @@
1cw1200_core-y := \
2 fwio.o \
3 txrx.o \
4 main.o \
5 queue.o \
6 hwio.o \
7 bh.o \
8 wsm.o \
9 sta.o \
10 scan.o \
11 debug.o
12cw1200_core-$(CONFIG_PM) += pm.o
13
14# CFLAGS_sta.o += -DDEBUG
15
16cw1200_wlan_sdio-y := cw1200_sdio.o
17cw1200_wlan_spi-y := cw1200_spi.o
18
19obj-$(CONFIG_CW1200) += cw1200_core.o
20obj-$(CONFIG_CW1200_WLAN_SDIO) += cw1200_wlan_sdio.o
21obj-$(CONFIG_CW1200_WLAN_SPI) += cw1200_wlan_spi.o
diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c
new file mode 100644
index 000000000000..c1ec2a4dd8c0
--- /dev/null
+++ b/drivers/net/wireless/cw1200/bh.c
@@ -0,0 +1,619 @@
1/*
2 * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on:
8 * ST-Ericsson UMAC CW1200 driver, which is
9 * Copyright (c) 2010, ST-Ericsson
10 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/module.h>
18#include <net/mac80211.h>
19#include <linux/kthread.h>
20#include <linux/timer.h>
21
22#include "cw1200.h"
23#include "bh.h"
24#include "hwio.h"
25#include "wsm.h"
26#include "hwbus.h"
27#include "debug.h"
28#include "fwio.h"
29
30static int cw1200_bh(void *arg);
31
32#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
33/* an SPI message cannot be bigger than (2"12-1)*2 bytes
34 * "*2" to cvt to bytes
35 */
36#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
37#define PIGGYBACK_CTRL_REG (2)
38#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
39
40/* Suspend state privates */
41enum cw1200_bh_pm_state {
42 CW1200_BH_RESUMED = 0,
43 CW1200_BH_SUSPEND,
44 CW1200_BH_SUSPENDED,
45 CW1200_BH_RESUME,
46};
47
48typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
49 u8 *data, size_t size);
50
51static void cw1200_bh_work(struct work_struct *work)
52{
53 struct cw1200_common *priv =
54 container_of(work, struct cw1200_common, bh_work);
55 cw1200_bh(priv);
56}
57
58int cw1200_register_bh(struct cw1200_common *priv)
59{
60 int err = 0;
61 /* Realtime workqueue */
62 priv->bh_workqueue = alloc_workqueue("cw1200_bh",
63 WQ_MEM_RECLAIM | WQ_HIGHPRI
64 | WQ_CPU_INTENSIVE, 1);
65
66 if (!priv->bh_workqueue)
67 return -ENOMEM;
68
69 INIT_WORK(&priv->bh_work, cw1200_bh_work);
70
71 pr_debug("[BH] register.\n");
72
73 atomic_set(&priv->bh_rx, 0);
74 atomic_set(&priv->bh_tx, 0);
75 atomic_set(&priv->bh_term, 0);
76 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
77 priv->bh_error = 0;
78 priv->hw_bufs_used = 0;
79 priv->buf_id_tx = 0;
80 priv->buf_id_rx = 0;
81 init_waitqueue_head(&priv->bh_wq);
82 init_waitqueue_head(&priv->bh_evt_wq);
83
84 err = !queue_work(priv->bh_workqueue, &priv->bh_work);
85 WARN_ON(err);
86 return err;
87}
88
89void cw1200_unregister_bh(struct cw1200_common *priv)
90{
91 atomic_add(1, &priv->bh_term);
92 wake_up(&priv->bh_wq);
93
94 flush_workqueue(priv->bh_workqueue);
95
96 destroy_workqueue(priv->bh_workqueue);
97 priv->bh_workqueue = NULL;
98
99 pr_debug("[BH] unregistered.\n");
100}
101
102void cw1200_irq_handler(struct cw1200_common *priv)
103{
104 pr_debug("[BH] irq.\n");
105
106 /* Disable Interrupts! */
107 /* NOTE: hwbus_ops->lock already held */
108 __cw1200_irq_enable(priv, 0);
109
110 if (/* WARN_ON */(priv->bh_error))
111 return;
112
113 if (atomic_add_return(1, &priv->bh_rx) == 1)
114 wake_up(&priv->bh_wq);
115}
116EXPORT_SYMBOL_GPL(cw1200_irq_handler);
117
118void cw1200_bh_wakeup(struct cw1200_common *priv)
119{
120 pr_debug("[BH] wakeup.\n");
121 if (priv->bh_error) {
122 pr_err("[BH] wakeup failed (BH error)\n");
123 return;
124 }
125
126 if (atomic_add_return(1, &priv->bh_tx) == 1)
127 wake_up(&priv->bh_wq);
128}
129
130int cw1200_bh_suspend(struct cw1200_common *priv)
131{
132 pr_debug("[BH] suspend.\n");
133 if (priv->bh_error) {
134 wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
135 return -EINVAL;
136 }
137
138 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
139 wake_up(&priv->bh_wq);
140 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
141 (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
142 1 * HZ) ? 0 : -ETIMEDOUT;
143}
144
145int cw1200_bh_resume(struct cw1200_common *priv)
146{
147 pr_debug("[BH] resume.\n");
148 if (priv->bh_error) {
149 wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
150 return -EINVAL;
151 }
152
153 atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
154 wake_up(&priv->bh_wq);
155 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
156 (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
157 1 * HZ) ? 0 : -ETIMEDOUT;
158}
159
160static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
161{
162 ++priv->hw_bufs_used;
163}
164
165int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
166{
167 int ret = 0;
168 int hw_bufs_used = priv->hw_bufs_used;
169
170 priv->hw_bufs_used -= count;
171 if (WARN_ON(priv->hw_bufs_used < 0))
172 ret = -1;
173 else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
174 ret = 1;
175 if (!priv->hw_bufs_used)
176 wake_up(&priv->bh_evt_wq);
177 return ret;
178}
179
180static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
181 u16 *ctrl_reg)
182{
183 int ret;
184
185 ret = cw1200_reg_read_16(priv,
186 ST90TDS_CONTROL_REG_ID, ctrl_reg);
187 if (ret) {
188 ret = cw1200_reg_read_16(priv,
189 ST90TDS_CONTROL_REG_ID, ctrl_reg);
190 if (ret)
191 pr_err("[BH] Failed to read control register.\n");
192 }
193
194 return ret;
195}
196
197static int cw1200_device_wakeup(struct cw1200_common *priv)
198{
199 u16 ctrl_reg;
200 int ret;
201
202 pr_debug("[BH] Device wakeup.\n");
203
204 /* First, set the dpll register */
205 ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
206 cw1200_dpll_from_clk(priv->hw_refclk));
207 if (WARN_ON(ret))
208 return ret;
209
210 /* To force the device to be always-on, the host sets WLAN_UP to 1 */
211 ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
212 ST90TDS_CONT_WUP_BIT);
213 if (WARN_ON(ret))
214 return ret;
215
216 ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
217 if (WARN_ON(ret))
218 return ret;
219
220 /* If the device returns WLAN_RDY as 1, the device is active and will
221 * remain active.
222 */
223 if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
224 pr_debug("[BH] Device awake.\n");
225 return 1;
226 }
227
228 return 0;
229}
230
231/* Must be called from BH thraed. */
232void cw1200_enable_powersave(struct cw1200_common *priv,
233 bool enable)
234{
235 pr_debug("[BH] Powerave is %s.\n",
236 enable ? "enabled" : "disabled");
237 priv->powersave_enabled = enable;
238}
239
240static int cw1200_bh_rx_helper(struct cw1200_common *priv,
241 uint16_t *ctrl_reg,
242 int *tx)
243{
244 size_t read_len = 0;
245 struct sk_buff *skb_rx = NULL;
246 struct wsm_hdr *wsm;
247 size_t wsm_len;
248 u16 wsm_id;
249 u8 wsm_seq;
250 int rx_resync = 1;
251
252 size_t alloc_len;
253 u8 *data;
254
255 read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
256 if (!read_len)
257 return 0; /* No more work */
258
259 if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
260 (read_len > EFFECTIVE_BUF_SIZE))) {
261 pr_debug("Invalid read len: %zu (%04x)",
262 read_len, *ctrl_reg);
263 goto err;
264 }
265
266 /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
267 * to the NEXT Message length + 2 Bytes for SKB
268 */
269 read_len = read_len + 2;
270
271 alloc_len = priv->hwbus_ops->align_size(
272 priv->hwbus_priv, read_len);
273
274 /* Check if not exceeding CW1200 capabilities */
275 if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
276 pr_debug("Read aligned len: %zu\n",
277 alloc_len);
278 }
279
280 skb_rx = dev_alloc_skb(alloc_len);
281 if (WARN_ON(!skb_rx))
282 goto err;
283
284 skb_trim(skb_rx, 0);
285 skb_put(skb_rx, read_len);
286 data = skb_rx->data;
287 if (WARN_ON(!data))
288 goto err;
289
290 if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
291 pr_err("rx blew up, len %zu\n", alloc_len);
292 goto err;
293 }
294
295 /* Piggyback */
296 *ctrl_reg = __le16_to_cpu(
297 ((__le16 *)data)[alloc_len / 2 - 1]);
298
299 wsm = (struct wsm_hdr *)data;
300 wsm_len = __le16_to_cpu(wsm->len);
301 if (WARN_ON(wsm_len > read_len))
302 goto err;
303
304 if (priv->wsm_enable_wsm_dumps)
305 print_hex_dump_bytes("<-- ",
306 DUMP_PREFIX_NONE,
307 data, wsm_len);
308
309 wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
310 wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
311
312 skb_trim(skb_rx, wsm_len);
313
314 if (wsm_id == 0x0800) {
315 wsm_handle_exception(priv,
316 &data[sizeof(*wsm)],
317 wsm_len - sizeof(*wsm));
318 goto err;
319 } else if (!rx_resync) {
320 if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
321 goto err;
322 }
323 priv->wsm_rx_seq = (wsm_seq + 1) & 7;
324 rx_resync = 0;
325
326 if (wsm_id & 0x0400) {
327 int rc = wsm_release_tx_buffer(priv, 1);
328 if (WARN_ON(rc < 0))
329 return rc;
330 else if (rc > 0)
331 *tx = 1;
332 }
333
334 /* cw1200_wsm_rx takes care on SKB livetime */
335 if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
336 goto err;
337
338 if (skb_rx) {
339 dev_kfree_skb(skb_rx);
340 skb_rx = NULL;
341 }
342
343 return 0;
344
345err:
346 if (skb_rx) {
347 dev_kfree_skb(skb_rx);
348 skb_rx = NULL;
349 }
350 return -1;
351}
352
353static int cw1200_bh_tx_helper(struct cw1200_common *priv,
354 int *pending_tx,
355 int *tx_burst)
356{
357 size_t tx_len;
358 u8 *data;
359 int ret;
360 struct wsm_hdr *wsm;
361
362 if (priv->device_can_sleep) {
363 ret = cw1200_device_wakeup(priv);
364 if (WARN_ON(ret < 0)) { /* Error in wakeup */
365 *pending_tx = 1;
366 return 0;
367 } else if (ret) { /* Woke up */
368 priv->device_can_sleep = false;
369 } else { /* Did not awake */
370 *pending_tx = 1;
371 return 0;
372 }
373 }
374
375 wsm_alloc_tx_buffer(priv);
376 ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
377 if (ret <= 0) {
378 wsm_release_tx_buffer(priv, 1);
379 if (WARN_ON(ret < 0))
380 return ret; /* Error */
381 return 0; /* No work */
382 }
383
384 wsm = (struct wsm_hdr *)data;
385 BUG_ON(tx_len < sizeof(*wsm));
386 BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
387
388 atomic_add(1, &priv->bh_tx);
389
390 tx_len = priv->hwbus_ops->align_size(
391 priv->hwbus_priv, tx_len);
392
393 /* Check if not exceeding CW1200 capabilities */
394 if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
395 pr_debug("Write aligned len: %zu\n", tx_len);
396
397 wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
398 wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
399
400 if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
401 pr_err("tx blew up, len %zu\n", tx_len);
402 wsm_release_tx_buffer(priv, 1);
403 return -1; /* Error */
404 }
405
406 if (priv->wsm_enable_wsm_dumps)
407 print_hex_dump_bytes("--> ",
408 DUMP_PREFIX_NONE,
409 data,
410 __le16_to_cpu(wsm->len));
411
412 wsm_txed(priv, data);
413 priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
414
415 if (*tx_burst > 1) {
416 cw1200_debug_tx_burst(priv);
417 return 1; /* Work remains */
418 }
419
420 return 0;
421}
422
423static int cw1200_bh(void *arg)
424{
425 struct cw1200_common *priv = arg;
426 int rx, tx, term, suspend;
427 u16 ctrl_reg = 0;
428 int tx_allowed;
429 int pending_tx = 0;
430 int tx_burst;
431 long status;
432 u32 dummy;
433 int ret;
434
435 for (;;) {
436 if (!priv->hw_bufs_used &&
437 priv->powersave_enabled &&
438 !priv->device_can_sleep &&
439 !atomic_read(&priv->recent_scan)) {
440 status = 1 * HZ;
441 pr_debug("[BH] Device wakedown. No data.\n");
442 cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
443 priv->device_can_sleep = true;
444 } else if (priv->hw_bufs_used) {
445 /* Interrupt loss detection */
446 status = 1 * HZ;
447 } else {
448 status = MAX_SCHEDULE_TIMEOUT;
449 }
450
451 /* Dummy Read for SDIO retry mechanism*/
452 if ((priv->hw_type != -1) &&
453 (atomic_read(&priv->bh_rx) == 0) &&
454 (atomic_read(&priv->bh_tx) == 0))
455 cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
456 &dummy, sizeof(dummy));
457
458 pr_debug("[BH] waiting ...\n");
459 status = wait_event_interruptible_timeout(priv->bh_wq, ({
460 rx = atomic_xchg(&priv->bh_rx, 0);
461 tx = atomic_xchg(&priv->bh_tx, 0);
462 term = atomic_xchg(&priv->bh_term, 0);
463 suspend = pending_tx ?
464 0 : atomic_read(&priv->bh_suspend);
465 (rx || tx || term || suspend || priv->bh_error);
466 }), status);
467
468 pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n",
469 rx, tx, term, suspend, status);
470
471 /* Did an error occur? */
472 if ((status < 0 && status != -ERESTARTSYS) ||
473 term || priv->bh_error) {
474 break;
475 }
476 if (!status) { /* wait_event timed out */
477 unsigned long timestamp = jiffies;
478 long timeout;
479 int pending = 0;
480 int i;
481
482 /* Check to see if we have any outstanding frames */
483 if (priv->hw_bufs_used && (!rx || !tx)) {
484 wiphy_warn(priv->hw->wiphy,
485 "Missed interrupt? (%d frames outstanding)\n",
486 priv->hw_bufs_used);
487 rx = 1;
488
489 /* Get a timestamp of "oldest" frame */
490 for (i = 0; i < 4; ++i)
491 pending += cw1200_queue_get_xmit_timestamp(
492 &priv->tx_queue[i],
493 &timestamp,
494 priv->pending_frame_id);
495
496 /* Check if frame transmission is timed out.
497 * Add an extra second with respect to possible
498 * interrupt loss.
499 */
500 timeout = timestamp +
501 WSM_CMD_LAST_CHANCE_TIMEOUT +
502 1 * HZ -
503 jiffies;
504
505 /* And terminate BH thread if the frame is "stuck" */
506 if (pending && timeout < 0) {
507 wiphy_warn(priv->hw->wiphy,
508 "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
509 priv->hw_bufs_used, pending,
510 timestamp, jiffies);
511 break;
512 }
513 } else if (!priv->device_can_sleep &&
514 !atomic_read(&priv->recent_scan)) {
515 pr_debug("[BH] Device wakedown. Timeout.\n");
516 cw1200_reg_write_16(priv,
517 ST90TDS_CONTROL_REG_ID, 0);
518 priv->device_can_sleep = true;
519 }
520 goto done;
521 } else if (suspend) {
522 pr_debug("[BH] Device suspend.\n");
523 if (priv->powersave_enabled) {
524 pr_debug("[BH] Device wakedown. Suspend.\n");
525 cw1200_reg_write_16(priv,
526 ST90TDS_CONTROL_REG_ID, 0);
527 priv->device_can_sleep = true;
528 }
529
530 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
531 wake_up(&priv->bh_evt_wq);
532 status = wait_event_interruptible(priv->bh_wq,
533 CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
534 if (status < 0) {
535 wiphy_err(priv->hw->wiphy,
536 "Failed to wait for resume: %ld.\n",
537 status);
538 break;
539 }
540 pr_debug("[BH] Device resume.\n");
541 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
542 wake_up(&priv->bh_evt_wq);
543 atomic_add(1, &priv->bh_rx);
544 goto done;
545 }
546
547 rx:
548 tx += pending_tx;
549 pending_tx = 0;
550
551 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
552 break;
553
554 /* Don't bother trying to rx unless we have data to read */
555 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
556 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
557 if (ret < 0)
558 break;
559 /* Double up here if there's more data.. */
560 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
561 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
562 if (ret < 0)
563 break;
564 }
565 }
566
567 tx:
568 if (tx) {
569 tx = 0;
570
571 BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
572 tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
573 tx_allowed = tx_burst > 0;
574
575 if (!tx_allowed) {
576 /* Buffers full. Ensure we process tx
577 * after we handle rx..
578 */
579 pending_tx = tx;
580 goto done_rx;
581 }
582 ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
583 if (ret < 0)
584 break;
585 if (ret > 0) /* More to transmit */
586 tx = ret;
587
588 /* Re-read ctrl reg */
589 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
590 break;
591 }
592
593 done_rx:
594 if (priv->bh_error)
595 break;
596 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
597 goto rx;
598 if (tx)
599 goto tx;
600
601 done:
602 /* Re-enable device interrupts */
603 priv->hwbus_ops->lock(priv->hwbus_priv);
604 __cw1200_irq_enable(priv, 1);
605 priv->hwbus_ops->unlock(priv->hwbus_priv);
606 }
607
608 /* Explicitly disable device interrupts */
609 priv->hwbus_ops->lock(priv->hwbus_priv);
610 __cw1200_irq_enable(priv, 0);
611 priv->hwbus_ops->unlock(priv->hwbus_priv);
612
613 if (!term) {
614 pr_err("[BH] Fatal error, exiting.\n");
615 priv->bh_error = 1;
616 /* TODO: schedule_work(recovery) */
617 }
618 return 0;
619}
diff --git a/drivers/net/wireless/cw1200/bh.h b/drivers/net/wireless/cw1200/bh.h
new file mode 100644
index 000000000000..af6a4853728f
--- /dev/null
+++ b/drivers/net/wireless/cw1200/bh.h
@@ -0,0 +1,28 @@
1/*
2 * Device handling thread interface for mac80211 ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef CW1200_BH_H
13#define CW1200_BH_H
14
15/* extern */ struct cw1200_common;
16
17int cw1200_register_bh(struct cw1200_common *priv);
18void cw1200_unregister_bh(struct cw1200_common *priv);
19void cw1200_irq_handler(struct cw1200_common *priv);
20void cw1200_bh_wakeup(struct cw1200_common *priv);
21int cw1200_bh_suspend(struct cw1200_common *priv);
22int cw1200_bh_resume(struct cw1200_common *priv);
23/* Must be called from BH thread. */
24void cw1200_enable_powersave(struct cw1200_common *priv,
25 bool enable);
26int wsm_release_tx_buffer(struct cw1200_common *priv, int count);
27
28#endif /* CW1200_BH_H */
diff --git a/drivers/net/wireless/cw1200/cw1200.h b/drivers/net/wireless/cw1200/cw1200.h
new file mode 100644
index 000000000000..1ad7d3602520
--- /dev/null
+++ b/drivers/net/wireless/cw1200/cw1200.h
@@ -0,0 +1,323 @@
1/*
2 * Common private data for ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on the mac80211 Prism54 code, which is
8 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
9 *
10 * Based on the islsm (softmac prism54) driver, which is:
11 * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#ifndef CW1200_H
19#define CW1200_H
20
21#include <linux/wait.h>
22#include <linux/mutex.h>
23#include <linux/workqueue.h>
24#include <net/mac80211.h>
25
26#include "queue.h"
27#include "wsm.h"
28#include "scan.h"
29#include "txrx.h"
30#include "pm.h"
31
32/* Forward declarations */
33struct hwbus_ops;
34struct task_struct;
35struct cw1200_debug_priv;
36struct firmware;
37
38#define CW1200_MAX_CTRL_FRAME_LEN (0x1000)
39
40#define CW1200_MAX_STA_IN_AP_MODE (5)
41#define CW1200_LINK_ID_AFTER_DTIM (CW1200_MAX_STA_IN_AP_MODE + 1)
42#define CW1200_LINK_ID_UAPSD (CW1200_MAX_STA_IN_AP_MODE + 2)
43#define CW1200_LINK_ID_MAX (CW1200_MAX_STA_IN_AP_MODE + 3)
44#define CW1200_MAX_REQUEUE_ATTEMPTS (5)
45
46#define CW1200_MAX_TID (8)
47
48#define CW1200_BLOCK_ACK_CNT (30)
49#define CW1200_BLOCK_ACK_THLD (800)
50#define CW1200_BLOCK_ACK_HIST (3)
51#define CW1200_BLOCK_ACK_INTERVAL (1 * HZ / CW1200_BLOCK_ACK_HIST)
52
53#define CW1200_JOIN_TIMEOUT (1 * HZ)
54#define CW1200_AUTH_TIMEOUT (5 * HZ)
55
56struct cw1200_ht_info {
57 struct ieee80211_sta_ht_cap ht_cap;
58 enum nl80211_channel_type channel_type;
59 u16 operation_mode;
60};
61
62/* Please keep order */
63enum cw1200_join_status {
64 CW1200_JOIN_STATUS_PASSIVE = 0,
65 CW1200_JOIN_STATUS_MONITOR,
66 CW1200_JOIN_STATUS_JOINING,
67 CW1200_JOIN_STATUS_PRE_STA,
68 CW1200_JOIN_STATUS_STA,
69 CW1200_JOIN_STATUS_IBSS,
70 CW1200_JOIN_STATUS_AP,
71};
72
73enum cw1200_link_status {
74 CW1200_LINK_OFF,
75 CW1200_LINK_RESERVE,
76 CW1200_LINK_SOFT,
77 CW1200_LINK_HARD,
78 CW1200_LINK_RESET,
79 CW1200_LINK_RESET_REMAP,
80};
81
82extern int cw1200_power_mode;
83extern const char * const cw1200_fw_types[];
84
85struct cw1200_link_entry {
86 unsigned long timestamp;
87 enum cw1200_link_status status;
88 enum cw1200_link_status prev_status;
89 u8 mac[ETH_ALEN];
90 u8 buffered[CW1200_MAX_TID];
91 struct sk_buff_head rx_queue;
92};
93
94struct cw1200_common {
95 /* interfaces to the rest of the stack */
96 struct ieee80211_hw *hw;
97 struct ieee80211_vif *vif;
98 struct device *pdev;
99
100 /* Statistics */
101 struct ieee80211_low_level_stats stats;
102
103 /* Our macaddr */
104 u8 mac_addr[ETH_ALEN];
105
106 /* Hardware interface */
107 const struct hwbus_ops *hwbus_ops;
108 struct hwbus_priv *hwbus_priv;
109
110 /* Hardware information */
111 enum {
112 HIF_9000_SILICON_VERSATILE = 0,
113 HIF_8601_VERSATILE,
114 HIF_8601_SILICON,
115 } hw_type;
116 enum {
117 CW1200_HW_REV_CUT10 = 10,
118 CW1200_HW_REV_CUT11 = 11,
119 CW1200_HW_REV_CUT20 = 20,
120 CW1200_HW_REV_CUT22 = 22,
121 CW1X60_HW_REV = 40,
122 } hw_revision;
123 int hw_refclk;
124 bool hw_have_5ghz;
125 const struct firmware *sdd;
126 char *sdd_path;
127
128 struct cw1200_debug_priv *debug;
129
130 struct workqueue_struct *workqueue;
131 struct mutex conf_mutex;
132
133 struct cw1200_queue tx_queue[4];
134 struct cw1200_queue_stats tx_queue_stats;
135 int tx_burst_idx;
136
137 /* firmware/hardware info */
138 unsigned int tx_hdr_len;
139
140 /* Radio data */
141 int output_power;
142
143 /* BBP/MAC state */
144 struct ieee80211_rate *rates;
145 struct ieee80211_rate *mcs_rates;
146 struct ieee80211_channel *channel;
147 struct wsm_edca_params edca;
148 struct wsm_tx_queue_params tx_queue_params;
149 struct wsm_mib_association_mode association_mode;
150 struct wsm_set_bss_params bss_params;
151 struct cw1200_ht_info ht_info;
152 struct wsm_set_pm powersave_mode;
153 struct wsm_set_pm firmware_ps_mode;
154 int cqm_rssi_thold;
155 unsigned cqm_rssi_hyst;
156 bool cqm_use_rssi;
157 int cqm_beacon_loss_count;
158 int channel_switch_in_progress;
159 wait_queue_head_t channel_switch_done;
160 u8 long_frame_max_tx_count;
161 u8 short_frame_max_tx_count;
162 int mode;
163 bool enable_beacon;
164 int beacon_int;
165 bool listening;
166 struct wsm_rx_filter rx_filter;
167 struct wsm_mib_multicast_filter multicast_filter;
168 bool has_multicast_subscription;
169 bool disable_beacon_filter;
170 struct work_struct update_filtering_work;
171 struct work_struct set_beacon_wakeup_period_work;
172
173 u8 ba_rx_tid_mask;
174 u8 ba_tx_tid_mask;
175
176 struct cw1200_pm_state pm_state;
177
178 struct wsm_p2p_ps_modeinfo p2p_ps_modeinfo;
179 struct wsm_uapsd_info uapsd_info;
180 bool setbssparams_done;
181 bool bt_present;
182 u8 conf_listen_interval;
183 u32 listen_interval;
184 u32 erp_info;
185 u32 rts_threshold;
186
187 /* BH */
188 atomic_t bh_rx;
189 atomic_t bh_tx;
190 atomic_t bh_term;
191 atomic_t bh_suspend;
192
193 struct workqueue_struct *bh_workqueue;
194 struct work_struct bh_work;
195
196 int bh_error;
197 wait_queue_head_t bh_wq;
198 wait_queue_head_t bh_evt_wq;
199 u8 buf_id_tx;
200 u8 buf_id_rx;
201 u8 wsm_rx_seq;
202 u8 wsm_tx_seq;
203 int hw_bufs_used;
204 bool powersave_enabled;
205 bool device_can_sleep;
206
207 /* Scan status */
208 struct cw1200_scan scan;
209 /* Keep cw1200 awake (WUP = 1) 1 second after each scan to avoid
210 * FW issue with sleeping/waking up.
211 */
212 atomic_t recent_scan;
213 struct delayed_work clear_recent_scan_work;
214
215 /* WSM */
216 struct wsm_startup_ind wsm_caps;
217 struct mutex wsm_cmd_mux;
218 struct wsm_buf wsm_cmd_buf;
219 struct wsm_cmd wsm_cmd;
220 wait_queue_head_t wsm_cmd_wq;
221 wait_queue_head_t wsm_startup_done;
222 int firmware_ready;
223 atomic_t tx_lock;
224
225 /* WSM debug */
226 int wsm_enable_wsm_dumps;
227
228 /* WSM Join */
229 enum cw1200_join_status join_status;
230 u32 pending_frame_id;
231 bool join_pending;
232 struct delayed_work join_timeout;
233 struct work_struct unjoin_work;
234 struct work_struct join_complete_work;
235 int join_complete_status;
236 int join_dtim_period;
237 bool delayed_unjoin;
238
239 /* TX/RX and security */
240 s8 wep_default_key_id;
241 struct work_struct wep_key_work;
242 u32 key_map;
243 struct wsm_add_key keys[WSM_KEY_MAX_INDEX + 1];
244
245 /* AP powersave */
246 u32 link_id_map;
247 struct cw1200_link_entry link_id_db[CW1200_MAX_STA_IN_AP_MODE];
248 struct work_struct link_id_work;
249 struct delayed_work link_id_gc_work;
250 u32 sta_asleep_mask;
251 u32 pspoll_mask;
252 bool aid0_bit_set;
253 spinlock_t ps_state_lock; /* Protect power save state */
254 bool buffered_multicasts;
255 bool tx_multicast;
256 struct work_struct set_tim_work;
257 struct work_struct set_cts_work;
258 struct work_struct multicast_start_work;
259 struct work_struct multicast_stop_work;
260 struct timer_list mcast_timeout;
261
262 /* WSM events and CQM implementation */
263 spinlock_t event_queue_lock; /* Protect event queue */
264 struct list_head event_queue;
265 struct work_struct event_handler;
266
267 struct delayed_work bss_loss_work;
268 spinlock_t bss_loss_lock; /* Protect BSS loss state */
269 int bss_loss_state;
270 u32 bss_loss_confirm_id;
271 int delayed_link_loss;
272 struct work_struct bss_params_work;
273
274 /* TX rate policy cache */
275 struct tx_policy_cache tx_policy_cache;
276 struct work_struct tx_policy_upload_work;
277
278 /* legacy PS mode switch in suspend */
279 int ps_mode_switch_in_progress;
280 wait_queue_head_t ps_mode_switch_done;
281
282 /* Workaround for WFD testcase 6.1.10*/
283 struct work_struct linkid_reset_work;
284 u8 action_frame_sa[ETH_ALEN];
285 u8 action_linkid;
286};
287
288struct cw1200_sta_priv {
289 int link_id;
290};
291
292/* interfaces for the drivers */
293int cw1200_core_probe(const struct hwbus_ops *hwbus_ops,
294 struct hwbus_priv *hwbus,
295 struct device *pdev,
296 struct cw1200_common **pself,
297 int ref_clk, const u8 *macaddr,
298 const char *sdd_path, bool have_5ghz);
299void cw1200_core_release(struct cw1200_common *self);
300
301#define FWLOAD_BLOCK_SIZE (1024)
302
303static inline int cw1200_is_ht(const struct cw1200_ht_info *ht_info)
304{
305 return ht_info->channel_type != NL80211_CHAN_NO_HT;
306}
307
308static inline int cw1200_ht_greenfield(const struct cw1200_ht_info *ht_info)
309{
310 return cw1200_is_ht(ht_info) &&
311 (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
312 !(ht_info->operation_mode &
313 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
314}
315
316static inline int cw1200_ht_ampdu_density(const struct cw1200_ht_info *ht_info)
317{
318 if (!cw1200_is_ht(ht_info))
319 return 0;
320 return ht_info->ht_cap.ampdu_density;
321}
322
323#endif /* CW1200_H */
diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c
new file mode 100644
index 000000000000..ebdcdf44f155
--- /dev/null
+++ b/drivers/net/wireless/cw1200/cw1200_sdio.c
@@ -0,0 +1,425 @@
1/*
2 * Mac80211 SDIO driver for ST-Ericsson CW1200 device
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/gpio.h>
14#include <linux/delay.h>
15#include <linux/mmc/host.h>
16#include <linux/mmc/sdio_func.h>
17#include <linux/mmc/card.h>
18#include <linux/mmc/sdio.h>
19#include <net/mac80211.h>
20
21#include "cw1200.h"
22#include "hwbus.h"
23#include <linux/platform_data/net-cw1200.h>
24#include "hwio.h"
25
26MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>");
27MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SDIO driver");
28MODULE_LICENSE("GPL");
29
30#define SDIO_BLOCK_SIZE (512)
31
32/* Default platform data for Sagrad modules */
33static struct cw1200_platform_data_sdio sagrad_109x_evk_platform_data = {
34 .ref_clk = 38400,
35 .have_5ghz = false,
36 .sdd_file = "sdd_sagrad_1091_1098.bin",
37};
38
39/* Allow platform data to be overridden */
40static struct cw1200_platform_data_sdio *global_plat_data = &sagrad_109x_evk_platform_data;
41
42void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata)
43{
44 global_plat_data = pdata;
45}
46
47struct hwbus_priv {
48 struct sdio_func *func;
49 struct cw1200_common *core;
50 const struct cw1200_platform_data_sdio *pdata;
51};
52
53#ifndef SDIO_VENDOR_ID_STE
54#define SDIO_VENDOR_ID_STE 0x0020
55#endif
56
57#ifndef SDIO_DEVICE_ID_STE_CW1200
58#define SDIO_DEVICE_ID_STE_CW1200 0x2280
59#endif
60
61static const struct sdio_device_id cw1200_sdio_ids[] = {
62 { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
63 { /* end: all zeroes */ },
64};
65
66/* hwbus_ops implemetation */
67
68static int cw1200_sdio_memcpy_fromio(struct hwbus_priv *self,
69 unsigned int addr,
70 void *dst, int count)
71{
72 return sdio_memcpy_fromio(self->func, dst, addr, count);
73}
74
75static int cw1200_sdio_memcpy_toio(struct hwbus_priv *self,
76 unsigned int addr,
77 const void *src, int count)
78{
79 return sdio_memcpy_toio(self->func, addr, (void *)src, count);
80}
81
82static void cw1200_sdio_lock(struct hwbus_priv *self)
83{
84 sdio_claim_host(self->func);
85}
86
87static void cw1200_sdio_unlock(struct hwbus_priv *self)
88{
89 sdio_release_host(self->func);
90}
91
92static void cw1200_sdio_irq_handler(struct sdio_func *func)
93{
94 struct hwbus_priv *self = sdio_get_drvdata(func);
95
96 /* note: sdio_host already claimed here. */
97 if (self->core)
98 cw1200_irq_handler(self->core);
99}
100
101static irqreturn_t cw1200_gpio_hardirq(int irq, void *dev_id)
102{
103 return IRQ_WAKE_THREAD;
104}
105
106static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id)
107{
108 struct hwbus_priv *self = dev_id;
109
110 if (self->core) {
111 sdio_claim_host(self->func);
112 cw1200_irq_handler(self->core);
113 sdio_release_host(self->func);
114 return IRQ_HANDLED;
115 } else {
116 return IRQ_NONE;
117 }
118}
119
120static int cw1200_request_irq(struct hwbus_priv *self)
121{
122 int ret;
123 u8 cccr;
124
125 cccr = sdio_f0_readb(self->func, SDIO_CCCR_IENx, &ret);
126 if (WARN_ON(ret))
127 goto err;
128
129 /* Master interrupt enable ... */
130 cccr |= BIT(0);
131
132 /* ... for our function */
133 cccr |= BIT(self->func->num);
134
135 sdio_f0_writeb(self->func, cccr, SDIO_CCCR_IENx, &ret);
136 if (WARN_ON(ret))
137 goto err;
138
139 ret = enable_irq_wake(self->pdata->irq);
140 if (WARN_ON(ret))
141 goto err;
142
143 /* Request the IRQ */
144 ret = request_threaded_irq(self->pdata->irq, cw1200_gpio_hardirq,
145 cw1200_gpio_irq,
146 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
147 "cw1200_wlan_irq", self);
148 if (WARN_ON(ret))
149 goto err;
150
151 return 0;
152
153err:
154 return ret;
155}
156
157static int cw1200_sdio_irq_subscribe(struct hwbus_priv *self)
158{
159 int ret = 0;
160
161 pr_debug("SW IRQ subscribe\n");
162 sdio_claim_host(self->func);
163 if (self->pdata->irq)
164 ret = cw1200_request_irq(self);
165 else
166 ret = sdio_claim_irq(self->func, cw1200_sdio_irq_handler);
167
168 sdio_release_host(self->func);
169 return ret;
170}
171
172static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self)
173{
174 int ret = 0;
175
176 pr_debug("SW IRQ unsubscribe\n");
177
178 if (self->pdata->irq) {
179 disable_irq_wake(self->pdata->irq);
180 free_irq(self->pdata->irq, self);
181 } else {
182 sdio_claim_host(self->func);
183 ret = sdio_release_irq(self->func);
184 sdio_release_host(self->func);
185 }
186 return ret;
187}
188
189static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
190{
191 if (pdata->reset) {
192 gpio_set_value(pdata->reset, 0);
193 msleep(30); /* Min is 2 * CLK32K cycles */
194 gpio_free(pdata->reset);
195 }
196
197 if (pdata->power_ctrl)
198 pdata->power_ctrl(pdata, false);
199 if (pdata->clk_ctrl)
200 pdata->clk_ctrl(pdata, false);
201
202 return 0;
203}
204
205static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
206{
207 /* Ensure I/Os are pulled low */
208 if (pdata->reset) {
209 gpio_request(pdata->reset, "cw1200_wlan_reset");
210 gpio_direction_output(pdata->reset, 0);
211 }
212 if (pdata->powerup) {
213 gpio_request(pdata->powerup, "cw1200_wlan_powerup");
214 gpio_direction_output(pdata->powerup, 0);
215 }
216 if (pdata->reset || pdata->powerup)
217 msleep(10); /* Settle time? */
218
219 /* Enable 3v3 and 1v8 to hardware */
220 if (pdata->power_ctrl) {
221 if (pdata->power_ctrl(pdata, true)) {
222 pr_err("power_ctrl() failed!\n");
223 return -1;
224 }
225 }
226
227 /* Enable CLK32K */
228 if (pdata->clk_ctrl) {
229 if (pdata->clk_ctrl(pdata, true)) {
230 pr_err("clk_ctrl() failed!\n");
231 return -1;
232 }
233 msleep(10); /* Delay until clock is stable for 2 cycles */
234 }
235
236 /* Enable POWERUP signal */
237 if (pdata->powerup) {
238 gpio_set_value(pdata->powerup, 1);
239 msleep(250); /* or more..? */
240 }
241 /* Enable RSTn signal */
242 if (pdata->reset) {
243 gpio_set_value(pdata->reset, 1);
244 msleep(50); /* Or more..? */
245 }
246 return 0;
247}
248
249static size_t cw1200_sdio_align_size(struct hwbus_priv *self, size_t size)
250{
251 if (self->pdata->no_nptb)
252 size = round_up(size, SDIO_BLOCK_SIZE);
253 else
254 size = sdio_align_size(self->func, size);
255
256 return size;
257}
258
259static int cw1200_sdio_pm(struct hwbus_priv *self, bool suspend)
260{
261 int ret = 0;
262
263 if (self->pdata->irq)
264 ret = irq_set_irq_wake(self->pdata->irq, suspend);
265 return ret;
266}
267
268static struct hwbus_ops cw1200_sdio_hwbus_ops = {
269 .hwbus_memcpy_fromio = cw1200_sdio_memcpy_fromio,
270 .hwbus_memcpy_toio = cw1200_sdio_memcpy_toio,
271 .lock = cw1200_sdio_lock,
272 .unlock = cw1200_sdio_unlock,
273 .align_size = cw1200_sdio_align_size,
274 .power_mgmt = cw1200_sdio_pm,
275};
276
277/* Probe Function to be called by SDIO stack when device is discovered */
278static int cw1200_sdio_probe(struct sdio_func *func,
279 const struct sdio_device_id *id)
280{
281 struct hwbus_priv *self;
282 int status;
283
284 pr_info("cw1200_wlan_sdio: Probe called\n");
285
286 /* We are only able to handle the wlan function */
287 if (func->num != 0x01)
288 return -ENODEV;
289
290 self = kzalloc(sizeof(*self), GFP_KERNEL);
291 if (!self) {
292 pr_err("Can't allocate SDIO hwbus_priv.\n");
293 return -ENOMEM;
294 }
295
296 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
297
298 self->pdata = global_plat_data; /* FIXME */
299 self->func = func;
300 sdio_set_drvdata(func, self);
301 sdio_claim_host(func);
302 sdio_enable_func(func);
303 sdio_release_host(func);
304
305 status = cw1200_sdio_irq_subscribe(self);
306
307 status = cw1200_core_probe(&cw1200_sdio_hwbus_ops,
308 self, &func->dev, &self->core,
309 self->pdata->ref_clk,
310 self->pdata->macaddr,
311 self->pdata->sdd_file,
312 self->pdata->have_5ghz);
313 if (status) {
314 cw1200_sdio_irq_unsubscribe(self);
315 sdio_claim_host(func);
316 sdio_disable_func(func);
317 sdio_release_host(func);
318 sdio_set_drvdata(func, NULL);
319 kfree(self);
320 }
321
322 return status;
323}
324
325/* Disconnect Function to be called by SDIO stack when
326 * device is disconnected
327 */
328static void cw1200_sdio_disconnect(struct sdio_func *func)
329{
330 struct hwbus_priv *self = sdio_get_drvdata(func);
331
332 if (self) {
333 cw1200_sdio_irq_unsubscribe(self);
334 if (self->core) {
335 cw1200_core_release(self->core);
336 self->core = NULL;
337 }
338 sdio_claim_host(func);
339 sdio_disable_func(func);
340 sdio_release_host(func);
341 sdio_set_drvdata(func, NULL);
342 kfree(self);
343 }
344}
345
346#ifdef CONFIG_PM
347static int cw1200_sdio_suspend(struct device *dev)
348{
349 int ret;
350 struct sdio_func *func = dev_to_sdio_func(dev);
351 struct hwbus_priv *self = sdio_get_drvdata(func);
352
353 if (!cw1200_can_suspend(self->core))
354 return -EAGAIN;
355
356 /* Notify SDIO that CW1200 will remain powered during suspend */
357 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
358 if (ret)
359 pr_err("Error setting SDIO pm flags: %i\n", ret);
360
361 return ret;
362}
363
364static int cw1200_sdio_resume(struct device *dev)
365{
366 return 0;
367}
368
369static const struct dev_pm_ops cw1200_pm_ops = {
370 .suspend = cw1200_sdio_suspend,
371 .resume = cw1200_sdio_resume,
372};
373#endif
374
375static struct sdio_driver sdio_driver = {
376 .name = "cw1200_wlan_sdio",
377 .id_table = cw1200_sdio_ids,
378 .probe = cw1200_sdio_probe,
379 .remove = cw1200_sdio_disconnect,
380#ifdef CONFIG_PM
381 .drv = {
382 .pm = &cw1200_pm_ops,
383 }
384#endif
385};
386
387/* Init Module function -> Called by insmod */
388static int __init cw1200_sdio_init(void)
389{
390 const struct cw1200_platform_data_sdio *pdata;
391 int ret;
392
393 /* FIXME -- this won't support multiple devices */
394 pdata = global_plat_data;
395
396 if (cw1200_sdio_on(pdata)) {
397 ret = -1;
398 goto err;
399 }
400
401 ret = sdio_register_driver(&sdio_driver);
402 if (ret)
403 goto err;
404
405 return 0;
406
407err:
408 cw1200_sdio_off(pdata);
409 return ret;
410}
411
412/* Called at Driver Unloading */
413static void __exit cw1200_sdio_exit(void)
414{
415 const struct cw1200_platform_data_sdio *pdata;
416
417 /* FIXME -- this won't support multiple devices */
418 pdata = global_plat_data;
419 sdio_unregister_driver(&sdio_driver);
420 cw1200_sdio_off(pdata);
421}
422
423
424module_init(cw1200_sdio_init);
425module_exit(cw1200_sdio_exit);
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
new file mode 100644
index 000000000000..d06376014bcd
--- /dev/null
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -0,0 +1,471 @@
1/*
2 * Mac80211 SPI driver for ST-Ericsson CW1200 device
3 *
4 * Copyright (c) 2011, Sagrad Inc.
5 * Author: Solomon Peachy <speachy@sagrad.com>
6 *
7 * Based on cw1200_sdio.c
8 * Copyright (c) 2010, ST-Ericsson
9 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/module.h>
17#include <linux/gpio.h>
18#include <linux/delay.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
21#include <net/mac80211.h>
22
23#include <linux/spi/spi.h>
24#include <linux/device.h>
25
26#include "cw1200.h"
27#include "hwbus.h"
28#include <linux/platform_data/net-cw1200.h>
29#include "hwio.h"
30
31MODULE_AUTHOR("Solomon Peachy <speachy@sagrad.com>");
32MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SPI driver");
33MODULE_LICENSE("GPL");
34MODULE_ALIAS("spi:cw1200_wlan_spi");
35
36/* #define SPI_DEBUG */
37
38struct hwbus_priv {
39 struct spi_device *func;
40 struct cw1200_common *core;
41 const struct cw1200_platform_data_spi *pdata;
42 spinlock_t lock; /* Serialize all bus operations */
43 int claimed;
44};
45
46#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
47#define SET_WRITE 0x7FFF /* usage: and operation */
48#define SET_READ 0x8000 /* usage: or operation */
49
50/* Notes on byte ordering:
51 LE: B0 B1 B2 B3
52 BE: B3 B2 B1 B0
53
54 Hardware expects 32-bit data to be written as 16-bit BE words:
55
56 B1 B0 B3 B2
57*/
58
59static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
60 unsigned int addr,
61 void *dst, int count)
62{
63 int ret, i;
64 u16 regaddr;
65 struct spi_message m;
66
67 struct spi_transfer t_addr = {
68 .tx_buf = &regaddr,
69 .len = sizeof(regaddr),
70 };
71 struct spi_transfer t_msg = {
72 .rx_buf = dst,
73 .len = count,
74 };
75
76 regaddr = (SDIO_TO_SPI_ADDR(addr))<<12;
77 regaddr |= SET_READ;
78 regaddr |= (count>>1);
79
80#ifdef SPI_DEBUG
81 pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, regaddr);
82#endif
83
84 /* Header is LE16 */
85 regaddr = cpu_to_le16(regaddr);
86
87 /* We have to byteswap if the SPI bus is limited to 8b operation
88 or we are running on a Big Endian system
89 */
90#if defined(__LITTLE_ENDIAN)
91 if (self->func->bits_per_word == 8)
92#endif
93 regaddr = swab16(regaddr);
94
95 spi_message_init(&m);
96 spi_message_add_tail(&t_addr, &m);
97 spi_message_add_tail(&t_msg, &m);
98 ret = spi_sync(self->func, &m);
99
100#ifdef SPI_DEBUG
101 pr_info("READ : ");
102 for (i = 0; i < t_addr.len; i++)
103 printk("%02x ", ((u8 *)t_addr.tx_buf)[i]);
104 printk(" : ");
105 for (i = 0; i < t_msg.len; i++)
106 printk("%02x ", ((u8 *)t_msg.rx_buf)[i]);
107 printk("\n");
108#endif
109
110 /* We have to byteswap if the SPI bus is limited to 8b operation
111 or we are running on a Big Endian system
112 */
113#if defined(__LITTLE_ENDIAN)
114 if (self->func->bits_per_word == 8)
115#endif
116 {
117 uint16_t *buf = (uint16_t *)dst;
118 for (i = 0; i < ((count + 1) >> 1); i++)
119 buf[i] = swab16(buf[i]);
120 }
121
122 return ret;
123}
124
125static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
126 unsigned int addr,
127 const void *src, int count)
128{
129 int rval, i;
130 u16 regaddr;
131 struct spi_transfer t_addr = {
132 .tx_buf = &regaddr,
133 .len = sizeof(regaddr),
134 };
135 struct spi_transfer t_msg = {
136 .tx_buf = src,
137 .len = count,
138 };
139 struct spi_message m;
140
141 regaddr = (SDIO_TO_SPI_ADDR(addr))<<12;
142 regaddr &= SET_WRITE;
143 regaddr |= (count>>1);
144
145#ifdef SPI_DEBUG
146 pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr, regaddr);
147#endif
148
149 /* Header is LE16 */
150 regaddr = cpu_to_le16(regaddr);
151
152 /* We have to byteswap if the SPI bus is limited to 8b operation
153 or we are running on a Big Endian system
154 */
155#if defined(__LITTLE_ENDIAN)
156 if (self->func->bits_per_word == 8)
157#endif
158 {
159 uint16_t *buf = (uint16_t *)src;
160 regaddr = swab16(regaddr);
161 for (i = 0; i < ((count + 1) >> 1); i++)
162 buf[i] = swab16(buf[i]);
163 }
164
165#ifdef SPI_DEBUG
166 pr_info("WRITE: ");
167 for (i = 0; i < t_addr.len; i++)
168 printk("%02x ", ((u8 *)t_addr.tx_buf)[i]);
169 printk(" : ");
170 for (i = 0; i < t_msg.len; i++)
171 printk("%02x ", ((u8 *)t_msg.tx_buf)[i]);
172 printk("\n");
173#endif
174
175 spi_message_init(&m);
176 spi_message_add_tail(&t_addr, &m);
177 spi_message_add_tail(&t_msg, &m);
178 rval = spi_sync(self->func, &m);
179
180#ifdef SPI_DEBUG
181 pr_info("WROTE: %d\n", m.actual_length);
182#endif
183
184#if defined(__LITTLE_ENDIAN)
185 /* We have to byteswap if the SPI bus is limited to 8b operation */
186 if (self->func->bits_per_word == 8)
187#endif
188 {
189 uint16_t *buf = (uint16_t *)src;
190 for (i = 0; i < ((count + 1) >> 1); i++)
191 buf[i] = swab16(buf[i]);
192 }
193 return rval;
194}
195
196static void cw1200_spi_lock(struct hwbus_priv *self)
197{
198 unsigned long flags;
199
200 might_sleep();
201
202 spin_lock_irqsave(&self->lock, flags);
203 while (1) {
204 set_current_state(TASK_UNINTERRUPTIBLE);
205 if (!self->claimed)
206 break;
207 spin_unlock_irqrestore(&self->lock, flags);
208 schedule();
209 spin_lock_irqsave(&self->lock, flags);
210 }
211 set_current_state(TASK_RUNNING);
212 self->claimed = 1;
213 spin_unlock_irqrestore(&self->lock, flags);
214
215 return;
216}
217
218static void cw1200_spi_unlock(struct hwbus_priv *self)
219{
220 unsigned long flags;
221
222 spin_lock_irqsave(&self->lock, flags);
223 self->claimed = 0;
224 spin_unlock_irqrestore(&self->lock, flags);
225 return;
226}
227
228static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
229{
230 struct hwbus_priv *self = dev_id;
231
232 if (self->core) {
233 cw1200_irq_handler(self->core);
234 return IRQ_HANDLED;
235 } else {
236 return IRQ_NONE;
237 }
238}
239
240static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
241{
242 int ret;
243
244 pr_debug("SW IRQ subscribe\n");
245
246 ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler,
247 IRQF_TRIGGER_HIGH,
248 "cw1200_wlan_irq", self);
249 if (WARN_ON(ret < 0))
250 goto exit;
251
252 ret = enable_irq_wake(self->func->irq);
253 if (WARN_ON(ret))
254 goto free_irq;
255
256 return 0;
257
258free_irq:
259 free_irq(self->func->irq, self);
260exit:
261 return ret;
262}
263
264static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
265{
266 int ret = 0;
267
268 pr_debug("SW IRQ unsubscribe\n");
269 disable_irq_wake(self->func->irq);
270 free_irq(self->func->irq, self);
271
272 return ret;
273}
274
275static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
276{
277 if (pdata->reset) {
278 gpio_set_value(pdata->reset, 0);
279 msleep(30); /* Min is 2 * CLK32K cycles */
280 gpio_free(pdata->reset);
281 }
282
283 if (pdata->power_ctrl)
284 pdata->power_ctrl(pdata, false);
285 if (pdata->clk_ctrl)
286 pdata->clk_ctrl(pdata, false);
287
288 return 0;
289}
290
291static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
292{
293 /* Ensure I/Os are pulled low */
294 if (pdata->reset) {
295 gpio_request(pdata->reset, "cw1200_wlan_reset");
296 gpio_direction_output(pdata->reset, 0);
297 }
298 if (pdata->powerup) {
299 gpio_request(pdata->powerup, "cw1200_wlan_powerup");
300 gpio_direction_output(pdata->powerup, 0);
301 }
302 if (pdata->reset || pdata->powerup)
303 msleep(10); /* Settle time? */
304
305 /* Enable 3v3 and 1v8 to hardware */
306 if (pdata->power_ctrl) {
307 if (pdata->power_ctrl(pdata, true)) {
308 pr_err("power_ctrl() failed!\n");
309 return -1;
310 }
311 }
312
313 /* Enable CLK32K */
314 if (pdata->clk_ctrl) {
315 if (pdata->clk_ctrl(pdata, true)) {
316 pr_err("clk_ctrl() failed!\n");
317 return -1;
318 }
319 msleep(10); /* Delay until clock is stable for 2 cycles */
320 }
321
322 /* Enable POWERUP signal */
323 if (pdata->powerup) {
324 gpio_set_value(pdata->powerup, 1);
325 msleep(250); /* or more..? */
326 }
327 /* Enable RSTn signal */
328 if (pdata->reset) {
329 gpio_set_value(pdata->reset, 1);
330 msleep(50); /* Or more..? */
331 }
332 return 0;
333}
334
335static size_t cw1200_spi_align_size(struct hwbus_priv *self, size_t size)
336{
337 return size & 1 ? size + 1 : size;
338}
339
340static int cw1200_spi_pm(struct hwbus_priv *self, bool suspend)
341{
342 return irq_set_irq_wake(self->func->irq, suspend);
343}
344
345static struct hwbus_ops cw1200_spi_hwbus_ops = {
346 .hwbus_memcpy_fromio = cw1200_spi_memcpy_fromio,
347 .hwbus_memcpy_toio = cw1200_spi_memcpy_toio,
348 .lock = cw1200_spi_lock,
349 .unlock = cw1200_spi_unlock,
350 .align_size = cw1200_spi_align_size,
351 .power_mgmt = cw1200_spi_pm,
352};
353
354/* Probe Function to be called by SPI stack when device is discovered */
355static int cw1200_spi_probe(struct spi_device *func)
356{
357 const struct cw1200_platform_data_spi *plat_data =
358 func->dev.platform_data;
359 struct hwbus_priv *self;
360 int status;
361
362 /* Sanity check speed */
363 if (func->max_speed_hz > 52000000)
364 func->max_speed_hz = 52000000;
365 if (func->max_speed_hz < 1000000)
366 func->max_speed_hz = 1000000;
367
368 /* Fix up transfer size */
369 if (plat_data->spi_bits_per_word)
370 func->bits_per_word = plat_data->spi_bits_per_word;
371 if (!func->bits_per_word)
372 func->bits_per_word = 16;
373
374 /* And finally.. */
375 func->mode = SPI_MODE_0;
376
377 pr_info("cw1200_wlan_spi: Probe called (CS %d M %d BPW %d CLK %d)\n",
378 func->chip_select, func->mode, func->bits_per_word,
379 func->max_speed_hz);
380
381 if (cw1200_spi_on(plat_data)) {
382 pr_err("spi_on() failed!\n");
383 return -1;
384 }
385
386 if (spi_setup(func)) {
387 pr_err("spi_setup() failed!\n");
388 return -1;
389 }
390
391 self = kzalloc(sizeof(*self), GFP_KERNEL);
392 if (!self) {
393 pr_err("Can't allocate SPI hwbus_priv.");
394 return -ENOMEM;
395 }
396
397 self->pdata = plat_data;
398 self->func = func;
399 spin_lock_init(&self->lock);
400
401 spi_set_drvdata(func, self);
402
403 status = cw1200_spi_irq_subscribe(self);
404
405 status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
406 self, &func->dev, &self->core,
407 self->pdata->ref_clk,
408 self->pdata->macaddr,
409 self->pdata->sdd_file,
410 self->pdata->have_5ghz);
411
412 if (status) {
413 cw1200_spi_irq_unsubscribe(self);
414 cw1200_spi_off(plat_data);
415 kfree(self);
416 }
417
418 return status;
419}
420
421/* Disconnect Function to be called by SPI stack when device is disconnected */
422static int cw1200_spi_disconnect(struct spi_device *func)
423{
424 struct hwbus_priv *self = spi_get_drvdata(func);
425
426 if (self) {
427 cw1200_spi_irq_unsubscribe(self);
428 if (self->core) {
429 cw1200_core_release(self->core);
430 self->core = NULL;
431 }
432 kfree(self);
433 }
434 cw1200_spi_off(func->dev.platform_data);
435
436 return 0;
437}
438
439#ifdef CONFIG_PM
440static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
441{
442 struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
443
444 if (!cw1200_can_suspend(self->core))
445 return -EAGAIN;
446
447 /* XXX notify host that we have to keep CW1200 powered on? */
448 return 0;
449}
450
451static int cw1200_spi_resume(struct device *dev)
452{
453 return 0;
454}
455#endif
456
457static struct spi_driver spi_driver = {
458 .probe = cw1200_spi_probe,
459 .remove = cw1200_spi_disconnect,
460 .driver = {
461 .name = "cw1200_wlan_spi",
462 .bus = &spi_bus_type,
463 .owner = THIS_MODULE,
464#ifdef CONFIG_PM
465 .suspend = cw1200_spi_suspend,
466 .resume = cw1200_spi_resume,
467#endif
468 },
469};
470
471module_spi_driver(spi_driver);
diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c
new file mode 100644
index 000000000000..e323b4d54338
--- /dev/null
+++ b/drivers/net/wireless/cw1200/debug.c
@@ -0,0 +1,428 @@
1/*
2 * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
3 * DebugFS code
4 *
5 * Copyright (c) 2010, ST-Ericsson
6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/debugfs.h>
15#include <linux/seq_file.h>
16#include "cw1200.h"
17#include "debug.h"
18#include "fwio.h"
19
20/* join_status */
21static const char * const cw1200_debug_join_status[] = {
22 "passive",
23 "monitor",
24 "station (joining)",
25 "station (not authenticated yet)",
26 "station",
27 "adhoc",
28 "access point",
29};
30
31/* WSM_JOIN_PREAMBLE_... */
32static const char * const cw1200_debug_preamble[] = {
33 "long",
34 "short",
35 "long on 1 and 2 Mbps",
36};
37
38
39static const char * const cw1200_debug_link_id[] = {
40 "OFF",
41 "REQ",
42 "SOFT",
43 "HARD",
44};
45
46static const char *cw1200_debug_mode(int mode)
47{
48 switch (mode) {
49 case NL80211_IFTYPE_UNSPECIFIED:
50 return "unspecified";
51 case NL80211_IFTYPE_MONITOR:
52 return "monitor";
53 case NL80211_IFTYPE_STATION:
54 return "station";
55 case NL80211_IFTYPE_ADHOC:
56 return "adhoc";
57 case NL80211_IFTYPE_MESH_POINT:
58 return "mesh point";
59 case NL80211_IFTYPE_AP:
60 return "access point";
61 case NL80211_IFTYPE_P2P_CLIENT:
62 return "p2p client";
63 case NL80211_IFTYPE_P2P_GO:
64 return "p2p go";
65 default:
66 return "unsupported";
67 }
68}
69
70static void cw1200_queue_status_show(struct seq_file *seq,
71 struct cw1200_queue *q)
72{
73 int i;
74 seq_printf(seq, "Queue %d:\n", q->queue_id);
75 seq_printf(seq, " capacity: %zu\n", q->capacity);
76 seq_printf(seq, " queued: %zu\n", q->num_queued);
77 seq_printf(seq, " pending: %zu\n", q->num_pending);
78 seq_printf(seq, " sent: %zu\n", q->num_sent);
79 seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no");
80 seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no");
81 seq_puts(seq, " link map: 0-> ");
82 for (i = 0; i < q->stats->map_capacity; ++i)
83 seq_printf(seq, "%.2d ", q->link_map_cache[i]);
84 seq_printf(seq, "<-%zu\n", q->stats->map_capacity);
85}
86
87static void cw1200_debug_print_map(struct seq_file *seq,
88 struct cw1200_common *priv,
89 const char *label,
90 u32 map)
91{
92 int i;
93 seq_printf(seq, "%s0-> ", label);
94 for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i)
95 seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : "..");
96 seq_printf(seq, "<-%zu\n", priv->tx_queue_stats.map_capacity - 1);
97}
98
99static int cw1200_status_show(struct seq_file *seq, void *v)
100{
101 int i;
102 struct list_head *item;
103 struct cw1200_common *priv = seq->private;
104 struct cw1200_debug_priv *d = priv->debug;
105
106 seq_puts(seq, "CW1200 Wireless LAN driver status\n");
107 seq_printf(seq, "Hardware: %d.%d\n",
108 priv->wsm_caps.hw_id,
109 priv->wsm_caps.hw_subid);
110 seq_printf(seq, "Firmware: %s %d.%d\n",
111 cw1200_fw_types[priv->wsm_caps.fw_type],
112 priv->wsm_caps.fw_ver,
113 priv->wsm_caps.fw_build);
114 seq_printf(seq, "FW API: %d\n",
115 priv->wsm_caps.fw_api);
116 seq_printf(seq, "FW caps: 0x%.4X\n",
117 priv->wsm_caps.fw_cap);
118 seq_printf(seq, "FW label: '%s'\n",
119 priv->wsm_caps.fw_label);
120 seq_printf(seq, "Mode: %s%s\n",
121 cw1200_debug_mode(priv->mode),
122 priv->listening ? " (listening)" : "");
123 seq_printf(seq, "Join state: %s\n",
124 cw1200_debug_join_status[priv->join_status]);
125 if (priv->channel)
126 seq_printf(seq, "Channel: %d%s\n",
127 priv->channel->hw_value,
128 priv->channel_switch_in_progress ?
129 " (switching)" : "");
130 if (priv->rx_filter.promiscuous)
131 seq_puts(seq, "Filter: promisc\n");
132 else if (priv->rx_filter.fcs)
133 seq_puts(seq, "Filter: fcs\n");
134 if (priv->rx_filter.bssid)
135 seq_puts(seq, "Filter: bssid\n");
136 if (!priv->disable_beacon_filter)
137 seq_puts(seq, "Filter: beacons\n");
138
139 if (priv->enable_beacon ||
140 priv->mode == NL80211_IFTYPE_AP ||
141 priv->mode == NL80211_IFTYPE_ADHOC ||
142 priv->mode == NL80211_IFTYPE_MESH_POINT ||
143 priv->mode == NL80211_IFTYPE_P2P_GO)
144 seq_printf(seq, "Beaconing: %s\n",
145 priv->enable_beacon ?
146 "enabled" : "disabled");
147
148 for (i = 0; i < 4; ++i)
149 seq_printf(seq, "EDCA(%d): %d, %d, %d, %d, %d\n", i,
150 priv->edca.params[i].cwmin,
151 priv->edca.params[i].cwmax,
152 priv->edca.params[i].aifns,
153 priv->edca.params[i].txop_limit,
154 priv->edca.params[i].max_rx_lifetime);
155
156 if (priv->join_status == CW1200_JOIN_STATUS_STA) {
157 static const char *pm_mode = "unknown";
158 switch (priv->powersave_mode.mode) {
159 case WSM_PSM_ACTIVE:
160 pm_mode = "off";
161 break;
162 case WSM_PSM_PS:
163 pm_mode = "on";
164 break;
165 case WSM_PSM_FAST_PS:
166 pm_mode = "dynamic";
167 break;
168 }
169 seq_printf(seq, "Preamble: %s\n",
170 cw1200_debug_preamble[priv->association_mode.preamble]);
171 seq_printf(seq, "AMPDU spcn: %d\n",
172 priv->association_mode.mpdu_start_spacing);
173 seq_printf(seq, "Basic rate: 0x%.8X\n",
174 le32_to_cpu(priv->association_mode.basic_rate_set));
175 seq_printf(seq, "Bss lost: %d beacons\n",
176 priv->bss_params.beacon_lost_count);
177 seq_printf(seq, "AID: %d\n",
178 priv->bss_params.aid);
179 seq_printf(seq, "Rates: 0x%.8X\n",
180 priv->bss_params.operational_rate_set);
181 seq_printf(seq, "Powersave: %s\n", pm_mode);
182 }
183 seq_printf(seq, "HT: %s\n",
184 cw1200_is_ht(&priv->ht_info) ? "on" : "off");
185 if (cw1200_is_ht(&priv->ht_info)) {
186 seq_printf(seq, "Greenfield: %s\n",
187 cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no");
188 seq_printf(seq, "AMPDU dens: %d\n",
189 cw1200_ht_ampdu_density(&priv->ht_info));
190 }
191 seq_printf(seq, "RSSI thold: %d\n",
192 priv->cqm_rssi_thold);
193 seq_printf(seq, "RSSI hyst: %d\n",
194 priv->cqm_rssi_hyst);
195 seq_printf(seq, "Long retr: %d\n",
196 priv->long_frame_max_tx_count);
197 seq_printf(seq, "Short retr: %d\n",
198 priv->short_frame_max_tx_count);
199 spin_lock_bh(&priv->tx_policy_cache.lock);
200 i = 0;
201 list_for_each(item, &priv->tx_policy_cache.used)
202 ++i;
203 spin_unlock_bh(&priv->tx_policy_cache.lock);
204 seq_printf(seq, "RC in use: %d\n", i);
205
206 seq_puts(seq, "\n");
207 for (i = 0; i < 4; ++i) {
208 cw1200_queue_status_show(seq, &priv->tx_queue[i]);
209 seq_puts(seq, "\n");
210 }
211
212 cw1200_debug_print_map(seq, priv, "Link map: ",
213 priv->link_id_map);
214 cw1200_debug_print_map(seq, priv, "Asleep map: ",
215 priv->sta_asleep_mask);
216 cw1200_debug_print_map(seq, priv, "PSPOLL map: ",
217 priv->pspoll_mask);
218
219 seq_puts(seq, "\n");
220
221 for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
222 if (priv->link_id_db[i].status) {
223 seq_printf(seq, "Link %d: %s, %pM\n",
224 i + 1,
225 cw1200_debug_link_id[priv->link_id_db[i].status],
226 priv->link_id_db[i].mac);
227 }
228 }
229
230 seq_puts(seq, "\n");
231
232 seq_printf(seq, "BH status: %s\n",
233 atomic_read(&priv->bh_term) ? "terminated" : "alive");
234 seq_printf(seq, "Pending RX: %d\n",
235 atomic_read(&priv->bh_rx));
236 seq_printf(seq, "Pending TX: %d\n",
237 atomic_read(&priv->bh_tx));
238 if (priv->bh_error)
239 seq_printf(seq, "BH errcode: %d\n",
240 priv->bh_error);
241 seq_printf(seq, "TX bufs: %d x %d bytes\n",
242 priv->wsm_caps.input_buffers,
243 priv->wsm_caps.input_buffer_size);
244 seq_printf(seq, "Used bufs: %d\n",
245 priv->hw_bufs_used);
246 seq_printf(seq, "Powermgmt: %s\n",
247 priv->powersave_enabled ? "on" : "off");
248 seq_printf(seq, "Device: %s\n",
249 priv->device_can_sleep ? "asleep" : "awake");
250
251 spin_lock(&priv->wsm_cmd.lock);
252 seq_printf(seq, "WSM status: %s\n",
253 priv->wsm_cmd.done ? "idle" : "active");
254 seq_printf(seq, "WSM cmd: 0x%.4X (%td bytes)\n",
255 priv->wsm_cmd.cmd, priv->wsm_cmd.len);
256 seq_printf(seq, "WSM retval: %d\n",
257 priv->wsm_cmd.ret);
258 spin_unlock(&priv->wsm_cmd.lock);
259
260 seq_printf(seq, "Datapath: %s\n",
261 atomic_read(&priv->tx_lock) ? "locked" : "unlocked");
262 if (atomic_read(&priv->tx_lock))
263 seq_printf(seq, "TXlock cnt: %d\n",
264 atomic_read(&priv->tx_lock));
265
266 seq_printf(seq, "TXed: %d\n",
267 d->tx);
268 seq_printf(seq, "AGG TXed: %d\n",
269 d->tx_agg);
270 seq_printf(seq, "MULTI TXed: %d (%d)\n",
271 d->tx_multi, d->tx_multi_frames);
272 seq_printf(seq, "RXed: %d\n",
273 d->rx);
274 seq_printf(seq, "AGG RXed: %d\n",
275 d->rx_agg);
276 seq_printf(seq, "TX miss: %d\n",
277 d->tx_cache_miss);
278 seq_printf(seq, "TX align: %d\n",
279 d->tx_align);
280 seq_printf(seq, "TX burst: %d\n",
281 d->tx_burst);
282 seq_printf(seq, "TX TTL: %d\n",
283 d->tx_ttl);
284 seq_printf(seq, "Scan: %s\n",
285 atomic_read(&priv->scan.in_progress) ? "active" : "idle");
286
287 return 0;
288}
289
290static int cw1200_status_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, &cw1200_status_show,
293 inode->i_private);
294}
295
296static const struct file_operations fops_status = {
297 .open = cw1200_status_open,
298 .read = seq_read,
299 .llseek = seq_lseek,
300 .release = single_release,
301 .owner = THIS_MODULE,
302};
303
304static int cw1200_counters_show(struct seq_file *seq, void *v)
305{
306 int ret;
307 struct cw1200_common *priv = seq->private;
308 struct wsm_mib_counters_table counters;
309
310 ret = wsm_get_counters_table(priv, &counters);
311 if (ret)
312 return ret;
313
314#define PUT_COUNTER(tab, name) \
315 seq_printf(seq, "%s:" tab "%d\n", #name, \
316 __le32_to_cpu(counters.name))
317
318 PUT_COUNTER("\t\t", plcp_errors);
319 PUT_COUNTER("\t\t", fcs_errors);
320 PUT_COUNTER("\t\t", tx_packets);
321 PUT_COUNTER("\t\t", rx_packets);
322 PUT_COUNTER("\t\t", rx_packet_errors);
323 PUT_COUNTER("\t", rx_decryption_failures);
324 PUT_COUNTER("\t\t", rx_mic_failures);
325 PUT_COUNTER("\t", rx_no_key_failures);
326 PUT_COUNTER("\t", tx_multicast_frames);
327 PUT_COUNTER("\t", tx_frames_success);
328 PUT_COUNTER("\t", tx_frame_failures);
329 PUT_COUNTER("\t", tx_frames_retried);
330 PUT_COUNTER("\t", tx_frames_multi_retried);
331 PUT_COUNTER("\t", rx_frame_duplicates);
332 PUT_COUNTER("\t\t", rts_success);
333 PUT_COUNTER("\t\t", rts_failures);
334 PUT_COUNTER("\t\t", ack_failures);
335 PUT_COUNTER("\t", rx_multicast_frames);
336 PUT_COUNTER("\t", rx_frames_success);
337 PUT_COUNTER("\t", rx_cmac_icv_errors);
338 PUT_COUNTER("\t\t", rx_cmac_replays);
339 PUT_COUNTER("\t", rx_mgmt_ccmp_replays);
340
341#undef PUT_COUNTER
342
343 return 0;
344}
345
346static int cw1200_counters_open(struct inode *inode, struct file *file)
347{
348 return single_open(file, &cw1200_counters_show,
349 inode->i_private);
350}
351
352static const struct file_operations fops_counters = {
353 .open = cw1200_counters_open,
354 .read = seq_read,
355 .llseek = seq_lseek,
356 .release = single_release,
357 .owner = THIS_MODULE,
358};
359
360static ssize_t cw1200_wsm_dumps(struct file *file,
361 const char __user *user_buf, size_t count, loff_t *ppos)
362{
363 struct cw1200_common *priv = file->private_data;
364 char buf[1];
365
366 if (!count)
367 return -EINVAL;
368 if (copy_from_user(buf, user_buf, 1))
369 return -EFAULT;
370
371 if (buf[0] == '1')
372 priv->wsm_enable_wsm_dumps = 1;
373 else
374 priv->wsm_enable_wsm_dumps = 0;
375
376 return count;
377}
378
379static const struct file_operations fops_wsm_dumps = {
380 .open = simple_open,
381 .write = cw1200_wsm_dumps,
382 .llseek = default_llseek,
383};
384
385int cw1200_debug_init(struct cw1200_common *priv)
386{
387 int ret = -ENOMEM;
388 struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv),
389 GFP_KERNEL);
390 priv->debug = d;
391 if (!d)
392 return ret;
393
394 d->debugfs_phy = debugfs_create_dir("cw1200",
395 priv->hw->wiphy->debugfsdir);
396 if (!d->debugfs_phy)
397 goto err;
398
399 if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy,
400 priv, &fops_status))
401 goto err;
402
403 if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy,
404 priv, &fops_counters))
405 goto err;
406
407 if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy,
408 priv, &fops_wsm_dumps))
409 goto err;
410
411 return 0;
412
413err:
414 priv->debug = NULL;
415 debugfs_remove_recursive(d->debugfs_phy);
416 kfree(d);
417 return ret;
418}
419
420void cw1200_debug_release(struct cw1200_common *priv)
421{
422 struct cw1200_debug_priv *d = priv->debug;
423 if (d) {
424 debugfs_remove_recursive(d->debugfs_phy);
425 priv->debug = NULL;
426 kfree(d);
427 }
428}
diff --git a/drivers/net/wireless/cw1200/debug.h b/drivers/net/wireless/cw1200/debug.h
new file mode 100644
index 000000000000..b525aba53bfc
--- /dev/null
+++ b/drivers/net/wireless/cw1200/debug.h
@@ -0,0 +1,93 @@
1/*
2 * DebugFS code for ST-Ericsson CW1200 mac80211 driver
3 *
4 * Copyright (c) 2011, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef CW1200_DEBUG_H_INCLUDED
13#define CW1200_DEBUG_H_INCLUDED
14
15struct cw1200_debug_priv {
16 struct dentry *debugfs_phy;
17 int tx;
18 int tx_agg;
19 int rx;
20 int rx_agg;
21 int tx_multi;
22 int tx_multi_frames;
23 int tx_cache_miss;
24 int tx_align;
25 int tx_ttl;
26 int tx_burst;
27 int ba_cnt;
28 int ba_acc;
29 int ba_cnt_rx;
30 int ba_acc_rx;
31};
32
33int cw1200_debug_init(struct cw1200_common *priv);
34void cw1200_debug_release(struct cw1200_common *priv);
35
36static inline void cw1200_debug_txed(struct cw1200_common *priv)
37{
38 ++priv->debug->tx;
39}
40
41static inline void cw1200_debug_txed_agg(struct cw1200_common *priv)
42{
43 ++priv->debug->tx_agg;
44}
45
46static inline void cw1200_debug_txed_multi(struct cw1200_common *priv,
47 int count)
48{
49 ++priv->debug->tx_multi;
50 priv->debug->tx_multi_frames += count;
51}
52
53static inline void cw1200_debug_rxed(struct cw1200_common *priv)
54{
55 ++priv->debug->rx;
56}
57
58static inline void cw1200_debug_rxed_agg(struct cw1200_common *priv)
59{
60 ++priv->debug->rx_agg;
61}
62
63static inline void cw1200_debug_tx_cache_miss(struct cw1200_common *priv)
64{
65 ++priv->debug->tx_cache_miss;
66}
67
68static inline void cw1200_debug_tx_align(struct cw1200_common *priv)
69{
70 ++priv->debug->tx_align;
71}
72
73static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv)
74{
75 ++priv->debug->tx_ttl;
76}
77
78static inline void cw1200_debug_tx_burst(struct cw1200_common *priv)
79{
80 ++priv->debug->tx_burst;
81}
82
83static inline void cw1200_debug_ba(struct cw1200_common *priv,
84 int ba_cnt, int ba_acc,
85 int ba_cnt_rx, int ba_acc_rx)
86{
87 priv->debug->ba_cnt = ba_cnt;
88 priv->debug->ba_acc = ba_acc;
89 priv->debug->ba_cnt_rx = ba_cnt_rx;
90 priv->debug->ba_acc_rx = ba_acc_rx;
91}
92
93#endif /* CW1200_DEBUG_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
new file mode 100644
index 000000000000..acdff0f7f952
--- /dev/null
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -0,0 +1,520 @@
1/*
2 * Firmware I/O code for mac80211 ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on:
8 * ST-Ericsson UMAC CW1200 driver which is
9 * Copyright (c) 2010, ST-Ericsson
10 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/init.h>
18#include <linux/vmalloc.h>
19#include <linux/sched.h>
20#include <linux/firmware.h>
21
22#include "cw1200.h"
23#include "fwio.h"
24#include "hwio.h"
25#include "hwbus.h"
26#include "bh.h"
27
28static int cw1200_get_hw_type(u32 config_reg_val, int *major_revision)
29{
30 int hw_type = -1;
31 u32 silicon_type = (config_reg_val >> 24) & 0x7;
32 u32 silicon_vers = (config_reg_val >> 31) & 0x1;
33
34 switch (silicon_type) {
35 case 0x00:
36 *major_revision = 1;
37 hw_type = HIF_9000_SILICON_VERSATILE;
38 break;
39 case 0x01:
40 case 0x02: /* CW1x00 */
41 case 0x04: /* CW1x60 */
42 *major_revision = silicon_type;
43 if (silicon_vers)
44 hw_type = HIF_8601_VERSATILE;
45 else
46 hw_type = HIF_8601_SILICON;
47 break;
48 default:
49 break;
50 }
51
52 return hw_type;
53}
54
55static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
56{
57 int ret, block, num_blocks;
58 unsigned i;
59 u32 val32;
60 u32 put = 0, get = 0;
61 u8 *buf = NULL;
62 const char *fw_path;
63 const struct firmware *firmware = NULL;
64
65 /* Macroses are local. */
66#define APB_WRITE(reg, val) \
67 do { \
68 ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \
69 if (ret < 0) \
70 goto error; \
71 } while (0)
72#define APB_READ(reg, val) \
73 do { \
74 ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \
75 if (ret < 0) \
76 goto error; \
77 } while (0)
78#define REG_WRITE(reg, val) \
79 do { \
80 ret = cw1200_reg_write_32(priv, (reg), (val)); \
81 if (ret < 0) \
82 goto error; \
83 } while (0)
84#define REG_READ(reg, val) \
85 do { \
86 ret = cw1200_reg_read_32(priv, (reg), &(val)); \
87 if (ret < 0) \
88 goto error; \
89 } while (0)
90
91 switch (priv->hw_revision) {
92 case CW1200_HW_REV_CUT10:
93 fw_path = FIRMWARE_CUT10;
94 if (!priv->sdd_path)
95 priv->sdd_path = SDD_FILE_10;
96 break;
97 case CW1200_HW_REV_CUT11:
98 fw_path = FIRMWARE_CUT11;
99 if (!priv->sdd_path)
100 priv->sdd_path = SDD_FILE_11;
101 break;
102 case CW1200_HW_REV_CUT20:
103 fw_path = FIRMWARE_CUT20;
104 if (!priv->sdd_path)
105 priv->sdd_path = SDD_FILE_20;
106 break;
107 case CW1200_HW_REV_CUT22:
108 fw_path = FIRMWARE_CUT22;
109 if (!priv->sdd_path)
110 priv->sdd_path = SDD_FILE_22;
111 break;
112 case CW1X60_HW_REV:
113 fw_path = FIRMWARE_CW1X60;
114 if (!priv->sdd_path)
115 priv->sdd_path = SDD_FILE_CW1X60;
116 break;
117 default:
118 pr_err("Invalid silicon revision %d.\n", priv->hw_revision);
119 return -EINVAL;
120 }
121
122 /* Initialize common registers */
123 APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, DOWNLOAD_ARE_YOU_HERE);
124 APB_WRITE(DOWNLOAD_PUT_REG, 0);
125 APB_WRITE(DOWNLOAD_GET_REG, 0);
126 APB_WRITE(DOWNLOAD_STATUS_REG, DOWNLOAD_PENDING);
127 APB_WRITE(DOWNLOAD_FLAGS_REG, 0);
128
129 /* Write the NOP Instruction */
130 REG_WRITE(ST90TDS_SRAM_BASE_ADDR_REG_ID, 0xFFF20000);
131 REG_WRITE(ST90TDS_AHB_DPORT_REG_ID, 0xEAFFFFFE);
132
133 /* Release CPU from RESET */
134 REG_READ(ST90TDS_CONFIG_REG_ID, val32);
135 val32 &= ~ST90TDS_CONFIG_CPU_RESET_BIT;
136 REG_WRITE(ST90TDS_CONFIG_REG_ID, val32);
137
138 /* Enable Clock */
139 val32 &= ~ST90TDS_CONFIG_CPU_CLK_DIS_BIT;
140 REG_WRITE(ST90TDS_CONFIG_REG_ID, val32);
141
142 /* Load a firmware file */
143 ret = request_firmware(&firmware, fw_path, priv->pdev);
144 if (ret) {
145 pr_err("Can't load firmware file %s.\n", fw_path);
146 goto error;
147 }
148
149 buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
150 if (!buf) {
151 pr_err("Can't allocate firmware load buffer.\n");
152 ret = -ENOMEM;
153 goto error;
154 }
155
156 /* Check if the bootloader is ready */
157 for (i = 0; i < 100; i += 1 + i / 2) {
158 APB_READ(DOWNLOAD_IMAGE_SIZE_REG, val32);
159 if (val32 == DOWNLOAD_I_AM_HERE)
160 break;
161 mdelay(i);
162 } /* End of for loop */
163
164 if (val32 != DOWNLOAD_I_AM_HERE) {
165 pr_err("Bootloader is not ready.\n");
166 ret = -ETIMEDOUT;
167 goto error;
168 }
169
170 /* Calculcate number of download blocks */
171 num_blocks = (firmware->size - 1) / DOWNLOAD_BLOCK_SIZE + 1;
172
173 /* Updating the length in Download Ctrl Area */
174 val32 = firmware->size; /* Explicit cast from size_t to u32 */
175 APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32);
176
177 /* Firmware downloading loop */
178 for (block = 0; block < num_blocks; block++) {
179 size_t tx_size;
180 size_t block_size;
181
182 /* check the download status */
183 APB_READ(DOWNLOAD_STATUS_REG, val32);
184 if (val32 != DOWNLOAD_PENDING) {
185 pr_err("Bootloader reported error %d.\n", val32);
186 ret = -EIO;
187 goto error;
188 }
189
190 /* loop until put - get <= 24K */
191 for (i = 0; i < 100; i++) {
192 APB_READ(DOWNLOAD_GET_REG, get);
193 if ((put - get) <=
194 (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE))
195 break;
196 mdelay(i);
197 }
198
199 if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) {
200 pr_err("Timeout waiting for FIFO.\n");
201 ret = -ETIMEDOUT;
202 goto error;
203 }
204
205 /* calculate the block size */
206 tx_size = block_size = min((size_t)(firmware->size - put),
207 (size_t)DOWNLOAD_BLOCK_SIZE);
208
209 memcpy(buf, &firmware->data[put], block_size);
210 if (block_size < DOWNLOAD_BLOCK_SIZE) {
211 memset(&buf[block_size], 0,
212 DOWNLOAD_BLOCK_SIZE - block_size);
213 tx_size = DOWNLOAD_BLOCK_SIZE;
214 }
215
216 /* send the block to sram */
217 ret = cw1200_apb_write(priv,
218 CW1200_APB(DOWNLOAD_FIFO_OFFSET +
219 (put & (DOWNLOAD_FIFO_SIZE - 1))),
220 buf, tx_size);
221 if (ret < 0) {
222 pr_err("Can't write firmware block @ %d!\n",
223 put & (DOWNLOAD_FIFO_SIZE - 1));
224 goto error;
225 }
226
227 /* update the put register */
228 put += block_size;
229 APB_WRITE(DOWNLOAD_PUT_REG, put);
230 } /* End of firmware download loop */
231
232 /* Wait for the download completion */
233 for (i = 0; i < 300; i += 1 + i / 2) {
234 APB_READ(DOWNLOAD_STATUS_REG, val32);
235 if (val32 != DOWNLOAD_PENDING)
236 break;
237 mdelay(i);
238 }
239 if (val32 != DOWNLOAD_SUCCESS) {
240 pr_err("Wait for download completion failed: 0x%.8X\n", val32);
241 ret = -ETIMEDOUT;
242 goto error;
243 } else {
244 pr_info("Firmware download completed.\n");
245 ret = 0;
246 }
247
248error:
249 kfree(buf);
250 if (firmware)
251 release_firmware(firmware);
252 return ret;
253
254#undef APB_WRITE
255#undef APB_READ
256#undef REG_WRITE
257#undef REG_READ
258}
259
260
261static int config_reg_read(struct cw1200_common *priv, u32 *val)
262{
263 switch (priv->hw_type) {
264 case HIF_9000_SILICON_VERSATILE: {
265 u16 val16;
266 int ret = cw1200_reg_read_16(priv,
267 ST90TDS_CONFIG_REG_ID,
268 &val16);
269 if (ret < 0)
270 return ret;
271 *val = val16;
272 return 0;
273 }
274 case HIF_8601_VERSATILE:
275 case HIF_8601_SILICON:
276 default:
277 cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, val);
278 break;
279 }
280 return 0;
281}
282
283static int config_reg_write(struct cw1200_common *priv, u32 val)
284{
285 switch (priv->hw_type) {
286 case HIF_9000_SILICON_VERSATILE:
287 return cw1200_reg_write_16(priv,
288 ST90TDS_CONFIG_REG_ID,
289 (u16)val);
290 case HIF_8601_VERSATILE:
291 case HIF_8601_SILICON:
292 default:
293 return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val);
294 break;
295 }
296 return 0;
297}
298
299int cw1200_load_firmware(struct cw1200_common *priv)
300{
301 int ret;
302 int i;
303 u32 val32;
304 u16 val16;
305 int major_revision = -1;
306
307 /* Read CONFIG Register */
308 ret = cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
309 if (ret < 0) {
310 pr_err("Can't read config register.\n");
311 goto out;
312 }
313
314 if (val32 == 0 || val32 == 0xffffffff) {
315 pr_err("Bad config register value (0x%08x)\n", val32);
316 ret = -EIO;
317 goto out;
318 }
319
320 priv->hw_type = cw1200_get_hw_type(val32, &major_revision);
321 if (priv->hw_type < 0) {
322 pr_err("Can't deduce hardware type.\n");
323 ret = -ENOTSUPP;
324 goto out;
325 }
326
327 /* Set DPLL Reg value, and read back to confirm writes work */
328 ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
329 cw1200_dpll_from_clk(priv->hw_refclk));
330 if (ret < 0) {
331 pr_err("Can't write DPLL register.\n");
332 goto out;
333 }
334
335 msleep(20);
336
337 ret = cw1200_reg_read_32(priv,
338 ST90TDS_TSET_GEN_R_W_REG_ID, &val32);
339 if (ret < 0) {
340 pr_err("Can't read DPLL register.\n");
341 goto out;
342 }
343
344 if (val32 != cw1200_dpll_from_clk(priv->hw_refclk)) {
345 pr_err("Unable to initialise DPLL register. Wrote 0x%.8X, Read 0x%.8X.\n",
346 cw1200_dpll_from_clk(priv->hw_refclk), val32);
347 ret = -EIO;
348 goto out;
349 }
350
351 /* Set wakeup bit in device */
352 ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16);
353 if (ret < 0) {
354 pr_err("set_wakeup: can't read control register.\n");
355 goto out;
356 }
357
358 ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
359 val16 | ST90TDS_CONT_WUP_BIT);
360 if (ret < 0) {
361 pr_err("set_wakeup: can't write control register.\n");
362 goto out;
363 }
364
365 /* Wait for wakeup */
366 for (i = 0; i < 300; i += (1 + i / 2)) {
367 ret = cw1200_reg_read_16(priv,
368 ST90TDS_CONTROL_REG_ID, &val16);
369 if (ret < 0) {
370 pr_err("wait_for_wakeup: can't read control register.\n");
371 goto out;
372 }
373
374 if (val16 & ST90TDS_CONT_RDY_BIT)
375 break;
376
377 msleep(i);
378 }
379
380 if ((val16 & ST90TDS_CONT_RDY_BIT) == 0) {
381 pr_err("wait_for_wakeup: device is not responding.\n");
382 ret = -ETIMEDOUT;
383 goto out;
384 }
385
386 switch (major_revision) {
387 case 1:
388 /* CW1200 Hardware detection logic : Check for CUT1.1 */
389 ret = cw1200_ahb_read_32(priv, CW1200_CUT_ID_ADDR, &val32);
390 if (ret) {
391 pr_err("HW detection: can't read CUT ID.\n");
392 goto out;
393 }
394
395 switch (val32) {
396 case CW1200_CUT_11_ID_STR:
397 pr_info("CW1x00 Cut 1.1 silicon detected.\n");
398 priv->hw_revision = CW1200_HW_REV_CUT11;
399 break;
400 default:
401 pr_info("CW1x00 Cut 1.0 silicon detected.\n");
402 priv->hw_revision = CW1200_HW_REV_CUT10;
403 break;
404 }
405
406 /* According to ST-E, CUT<2.0 has busted BA TID0-3.
407 Just disable it entirely...
408 */
409 priv->ba_rx_tid_mask = 0;
410 priv->ba_tx_tid_mask = 0;
411 break;
412 case 2: {
413 u32 ar1, ar2, ar3;
414 ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR, &ar1);
415 if (ret) {
416 pr_err("(1) HW detection: can't read CUT ID\n");
417 goto out;
418 }
419 ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 4, &ar2);
420 if (ret) {
421 pr_err("(2) HW detection: can't read CUT ID.\n");
422 goto out;
423 }
424
425 ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 8, &ar3);
426 if (ret) {
427 pr_err("(3) HW detection: can't read CUT ID.\n");
428 goto out;
429 }
430
431 if (ar1 == CW1200_CUT_22_ID_STR1 &&
432 ar2 == CW1200_CUT_22_ID_STR2 &&
433 ar3 == CW1200_CUT_22_ID_STR3) {
434 pr_info("CW1x00 Cut 2.2 silicon detected.\n");
435 priv->hw_revision = CW1200_HW_REV_CUT22;
436 } else {
437 pr_info("CW1x00 Cut 2.0 silicon detected.\n");
438 priv->hw_revision = CW1200_HW_REV_CUT20;
439 }
440 break;
441 }
442 case 4:
443 pr_info("CW1x60 silicon detected.\n");
444 priv->hw_revision = CW1X60_HW_REV;
445 break;
446 default:
447 pr_err("Unsupported silicon major revision %d.\n",
448 major_revision);
449 ret = -ENOTSUPP;
450 goto out;
451 }
452
453 /* Checking for access mode */
454 ret = config_reg_read(priv, &val32);
455 if (ret < 0) {
456 pr_err("Can't read config register.\n");
457 goto out;
458 }
459
460 if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) {
461 pr_err("Device is already in QUEUE mode!\n");
462 ret = -EINVAL;
463 goto out;
464 }
465
466 switch (priv->hw_type) {
467 case HIF_8601_SILICON:
468 if (priv->hw_revision == CW1X60_HW_REV) {
469 pr_err("Can't handle CW1160/1260 firmware load yet.\n");
470 ret = -ENOTSUPP;
471 goto out;
472 }
473 ret = cw1200_load_firmware_cw1200(priv);
474 break;
475 default:
476 pr_err("Can't perform firmware load for hw type %d.\n",
477 priv->hw_type);
478 ret = -ENOTSUPP;
479 goto out;
480 }
481 if (ret < 0) {
482 pr_err("Firmware load error.\n");
483 goto out;
484 }
485
486 /* Enable interrupt signalling */
487 priv->hwbus_ops->lock(priv->hwbus_priv);
488 ret = __cw1200_irq_enable(priv, 1);
489 priv->hwbus_ops->unlock(priv->hwbus_priv);
490 if (ret < 0)
491 goto unsubscribe;
492
493 /* Configure device for MESSSAGE MODE */
494 ret = config_reg_read(priv, &val32);
495 if (ret < 0) {
496 pr_err("Can't read config register.\n");
497 goto unsubscribe;
498 }
499 ret = config_reg_write(priv, val32 & ~ST90TDS_CONFIG_ACCESS_MODE_BIT);
500 if (ret < 0) {
501 pr_err("Can't write config register.\n");
502 goto unsubscribe;
503 }
504
505 /* Unless we read the CONFIG Register we are
506 * not able to get an interrupt
507 */
508 mdelay(10);
509 config_reg_read(priv, &val32);
510
511out:
512 return ret;
513
514unsubscribe:
515 /* Disable interrupt signalling */
516 priv->hwbus_ops->lock(priv->hwbus_priv);
517 ret = __cw1200_irq_enable(priv, 0);
518 priv->hwbus_ops->unlock(priv->hwbus_priv);
519 return ret;
520}
diff --git a/drivers/net/wireless/cw1200/fwio.h b/drivers/net/wireless/cw1200/fwio.h
new file mode 100644
index 000000000000..ea3099362cdf
--- /dev/null
+++ b/drivers/net/wireless/cw1200/fwio.h
@@ -0,0 +1,39 @@
1/*
2 * Firmware API for mac80211 ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on:
8 * ST-Ericsson UMAC CW1200 driver which is
9 * Copyright (c) 2010, ST-Ericsson
10 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#ifndef FWIO_H_INCLUDED
18#define FWIO_H_INCLUDED
19
20#define BOOTLOADER_CW1X60 "boot_cw1x60.bin"
21#define FIRMWARE_CW1X60 "wsm_cw1x60.bin"
22#define FIRMWARE_CUT22 "wsm_22.bin"
23#define FIRMWARE_CUT20 "wsm_20.bin"
24#define FIRMWARE_CUT11 "wsm_11.bin"
25#define FIRMWARE_CUT10 "wsm_10.bin"
26#define SDD_FILE_CW1X60 "sdd_cw1x60.bin"
27#define SDD_FILE_22 "sdd_22.bin"
28#define SDD_FILE_20 "sdd_20.bin"
29#define SDD_FILE_11 "sdd_11.bin"
30#define SDD_FILE_10 "sdd_10.bin"
31
32int cw1200_load_firmware(struct cw1200_common *priv);
33
34/* SDD definitions */
35#define SDD_PTA_CFG_ELT_ID 0xEB
36#define SDD_REFERENCE_FREQUENCY_ELT_ID 0xc5
37u32 cw1200_dpll_from_clk(u16 clk);
38
39#endif
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
new file mode 100644
index 000000000000..8b2fc831c3de
--- /dev/null
+++ b/drivers/net/wireless/cw1200/hwbus.h
@@ -0,0 +1,33 @@
1/*
2 * Common hwbus abstraction layer interface for cw1200 wireless driver
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef CW1200_HWBUS_H
13#define CW1200_HWBUS_H
14
15struct hwbus_priv;
16
17void cw1200_irq_handler(struct cw1200_common *priv);
18
19/* This MUST be wrapped with hwbus_ops->lock/unlock! */
20int __cw1200_irq_enable(struct cw1200_common *priv, int enable);
21
22struct hwbus_ops {
23 int (*hwbus_memcpy_fromio)(struct hwbus_priv *self, unsigned int addr,
24 void *dst, int count);
25 int (*hwbus_memcpy_toio)(struct hwbus_priv *self, unsigned int addr,
26 const void *src, int count);
27 void (*lock)(struct hwbus_priv *self);
28 void (*unlock)(struct hwbus_priv *self);
29 size_t (*align_size)(struct hwbus_priv *self, size_t size);
30 int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
31};
32
33#endif /* CW1200_HWBUS_H */
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
new file mode 100644
index 000000000000..ff230b7aeedd
--- /dev/null
+++ b/drivers/net/wireless/cw1200/hwio.c
@@ -0,0 +1,312 @@
1/*
2 * Low-level device IO routines for ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on:
8 * ST-Ericsson UMAC CW1200 driver, which is
9 * Copyright (c) 2010, ST-Ericsson
10 * Author: Ajitpal Singh <ajitpal.singh@lockless.no>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/types.h>
18
19#include "cw1200.h"
20#include "hwio.h"
21#include "hwbus.h"
22
23 /* Sdio addr is 4*spi_addr */
24#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2)
25#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \
26 ((((buf_id) & 0x1F) << 7) \
27 | (((mpf) & 1) << 6) \
28 | (((rfu) & 1) << 5) \
29 | (((reg_id_ofs) & 0x1F) << 0))
30#define MAX_RETRY 3
31
32
33static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr,
34 void *buf, size_t buf_len, int buf_id)
35{
36 u16 addr_sdio;
37 u32 sdio_reg_addr_17bit;
38
39 /* Check if buffer is aligned to 4 byte boundary */
40 if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) {
41 pr_err("buffer is not aligned.\n");
42 return -EINVAL;
43 }
44
45 /* Convert to SDIO Register Address */
46 addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
47 sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
48
49 return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv,
50 sdio_reg_addr_17bit,
51 buf, buf_len);
52}
53
54static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr,
55 const void *buf, size_t buf_len, int buf_id)
56{
57 u16 addr_sdio;
58 u32 sdio_reg_addr_17bit;
59
60 /* Convert to SDIO Register Address */
61 addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
62 sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
63
64 return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv,
65 sdio_reg_addr_17bit,
66 buf, buf_len);
67}
68
69static inline int __cw1200_reg_read_32(struct cw1200_common *priv,
70 u16 addr, u32 *val)
71{
72 __le32 tmp;
73 int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
74 *val = le32_to_cpu(tmp);
75 return i;
76}
77
78static inline int __cw1200_reg_write_32(struct cw1200_common *priv,
79 u16 addr, u32 val)
80{
81 __le32 tmp = cpu_to_le32(val);
82 return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
83}
84
85static inline int __cw1200_reg_read_16(struct cw1200_common *priv,
86 u16 addr, u16 *val)
87{
88 __le16 tmp;
89 int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
90 *val = le16_to_cpu(tmp);
91 return i;
92}
93
94static inline int __cw1200_reg_write_16(struct cw1200_common *priv,
95 u16 addr, u16 val)
96{
97 __le16 tmp = cpu_to_le16(val);
98 return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
99}
100
101int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf,
102 size_t buf_len)
103{
104 int ret;
105 priv->hwbus_ops->lock(priv->hwbus_priv);
106 ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0);
107 priv->hwbus_ops->unlock(priv->hwbus_priv);
108 return ret;
109}
110
111int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf,
112 size_t buf_len)
113{
114 int ret;
115 priv->hwbus_ops->lock(priv->hwbus_priv);
116 ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0);
117 priv->hwbus_ops->unlock(priv->hwbus_priv);
118 return ret;
119}
120
121int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len)
122{
123 int ret, retry = 1;
124 int buf_id_rx = priv->buf_id_rx;
125
126 priv->hwbus_ops->lock(priv->hwbus_priv);
127
128 while (retry <= MAX_RETRY) {
129 ret = __cw1200_reg_read(priv,
130 ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
131 buf_len, buf_id_rx + 1);
132 if (!ret) {
133 buf_id_rx = (buf_id_rx + 1) & 3;
134 priv->buf_id_rx = buf_id_rx;
135 break;
136 } else {
137 retry++;
138 mdelay(1);
139 pr_err("error :[%d]\n", ret);
140 }
141 }
142
143 priv->hwbus_ops->unlock(priv->hwbus_priv);
144 return ret;
145}
146
147int cw1200_data_write(struct cw1200_common *priv, const void *buf,
148 size_t buf_len)
149{
150 int ret, retry = 1;
151 int buf_id_tx = priv->buf_id_tx;
152
153 priv->hwbus_ops->lock(priv->hwbus_priv);
154
155 while (retry <= MAX_RETRY) {
156 ret = __cw1200_reg_write(priv,
157 ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
158 buf_len, buf_id_tx);
159 if (!ret) {
160 buf_id_tx = (buf_id_tx + 1) & 31;
161 priv->buf_id_tx = buf_id_tx;
162 break;
163 } else {
164 retry++;
165 mdelay(1);
166 pr_err("error :[%d]\n", ret);
167 }
168 }
169
170 priv->hwbus_ops->unlock(priv->hwbus_priv);
171 return ret;
172}
173
174int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
175 size_t buf_len, u32 prefetch, u16 port_addr)
176{
177 u32 val32 = 0;
178 int i, ret;
179
180 if ((buf_len / 2) >= 0x1000) {
181 pr_err("Can't read more than 0xfff words.\n");
182 return -EINVAL;
183 }
184
185 priv->hwbus_ops->lock(priv->hwbus_priv);
186 /* Write address */
187 ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
188 if (ret < 0) {
189 pr_err("Can't write address register.\n");
190 goto out;
191 }
192
193 /* Read CONFIG Register Value - We will read 32 bits */
194 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
195 if (ret < 0) {
196 pr_err("Can't read config register.\n");
197 goto out;
198 }
199
200 /* Set PREFETCH bit */
201 ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID,
202 val32 | prefetch);
203 if (ret < 0) {
204 pr_err("Can't write prefetch bit.\n");
205 goto out;
206 }
207
208 /* Check for PRE-FETCH bit to be cleared */
209 for (i = 0; i < 20; i++) {
210 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
211 if (ret < 0) {
212 pr_err("Can't check prefetch bit.\n");
213 goto out;
214 }
215 if (!(val32 & prefetch))
216 break;
217
218 mdelay(i);
219 }
220
221 if (val32 & prefetch) {
222 pr_err("Prefetch bit is not cleared.\n");
223 goto out;
224 }
225
226 /* Read data port */
227 ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0);
228 if (ret < 0) {
229 pr_err("Can't read data port.\n");
230 goto out;
231 }
232
233out:
234 priv->hwbus_ops->unlock(priv->hwbus_priv);
235 return ret;
236}
237
238int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
239 size_t buf_len)
240{
241 int ret;
242
243 if ((buf_len / 2) >= 0x1000) {
244 pr_err("Can't write more than 0xfff words.\n");
245 return -EINVAL;
246 }
247
248 priv->hwbus_ops->lock(priv->hwbus_priv);
249
250 /* Write address */
251 ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
252 if (ret < 0) {
253 pr_err("Can't write address register.\n");
254 goto out;
255 }
256
257 /* Write data port */
258 ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID,
259 buf, buf_len, 0);
260 if (ret < 0) {
261 pr_err("Can't write data port.\n");
262 goto out;
263 }
264
265out:
266 priv->hwbus_ops->unlock(priv->hwbus_priv);
267 return ret;
268}
269
270int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
271{
272 u32 val32;
273 u16 val16;
274 int ret;
275
276 if (HIF_8601_SILICON == priv->hw_type) {
277 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
278 if (ret < 0) {
279 pr_err("Can't read config register.\n");
280 return ret;
281 }
282
283 if (enable)
284 val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE;
285 else
286 val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE;
287
288 ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32);
289 if (ret < 0) {
290 pr_err("Can't write config register.\n");
291 return ret;
292 }
293 } else {
294 ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16);
295 if (ret < 0) {
296 pr_err("Can't read control register.\n");
297 return ret;
298 }
299
300 if (enable)
301 val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE;
302 else
303 val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE;
304
305 ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16);
306 if (ret < 0) {
307 pr_err("Can't write control register.\n");
308 return ret;
309 }
310 }
311 return 0;
312}
diff --git a/drivers/net/wireless/cw1200/hwio.h b/drivers/net/wireless/cw1200/hwio.h
new file mode 100644
index 000000000000..ddf52669dc5b
--- /dev/null
+++ b/drivers/net/wireless/cw1200/hwio.h
@@ -0,0 +1,247 @@
1/*
2 * Low-level API for mac80211 ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on:
8 * ST-Ericsson UMAC CW1200 driver which is
9 * Copyright (c) 2010, ST-Ericsson
10 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#ifndef CW1200_HWIO_H_INCLUDED
18#define CW1200_HWIO_H_INCLUDED
19
20/* extern */ struct cw1200_common;
21
22#define CW1200_CUT_11_ID_STR (0x302E3830)
23#define CW1200_CUT_22_ID_STR1 (0x302e3132)
24#define CW1200_CUT_22_ID_STR2 (0x32302e30)
25#define CW1200_CUT_22_ID_STR3 (0x3335)
26#define CW1200_CUT_ID_ADDR (0xFFF17F90)
27#define CW1200_CUT2_ID_ADDR (0xFFF1FF90)
28
29/* Download control area */
30/* boot loader start address in SRAM */
31#define DOWNLOAD_BOOT_LOADER_OFFSET (0x00000000)
32/* 32K, 0x4000 to 0xDFFF */
33#define DOWNLOAD_FIFO_OFFSET (0x00004000)
34/* 32K */
35#define DOWNLOAD_FIFO_SIZE (0x00008000)
36/* 128 bytes, 0xFF80 to 0xFFFF */
37#define DOWNLOAD_CTRL_OFFSET (0x0000FF80)
38#define DOWNLOAD_CTRL_DATA_DWORDS (32-6)
39
40struct download_cntl_t {
41 /* size of whole firmware file (including Cheksum), host init */
42 u32 image_size;
43 /* downloading flags */
44 u32 flags;
45 /* No. of bytes put into the download, init & updated by host */
46 u32 put;
47 /* last traced program counter, last ARM reg_pc */
48 u32 trace_pc;
49 /* No. of bytes read from the download, host init, device updates */
50 u32 get;
51 /* r0, boot losader status, host init to pending, device updates */
52 u32 status;
53 /* Extra debug info, r1 to r14 if status=r0=DOWNLOAD_EXCEPTION */
54 u32 debug_data[DOWNLOAD_CTRL_DATA_DWORDS];
55};
56
57#define DOWNLOAD_IMAGE_SIZE_REG \
58 (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, image_size))
59#define DOWNLOAD_FLAGS_REG \
60 (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, flags))
61#define DOWNLOAD_PUT_REG \
62 (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, put))
63#define DOWNLOAD_TRACE_PC_REG \
64 (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, trace_pc))
65#define DOWNLOAD_GET_REG \
66 (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, get))
67#define DOWNLOAD_STATUS_REG \
68 (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, status))
69#define DOWNLOAD_DEBUG_DATA_REG \
70 (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, debug_data))
71#define DOWNLOAD_DEBUG_DATA_LEN (108)
72
73#define DOWNLOAD_BLOCK_SIZE (1024)
74
75/* For boot loader detection */
76#define DOWNLOAD_ARE_YOU_HERE (0x87654321)
77#define DOWNLOAD_I_AM_HERE (0x12345678)
78
79/* Download error code */
80#define DOWNLOAD_PENDING (0xFFFFFFFF)
81#define DOWNLOAD_SUCCESS (0)
82#define DOWNLOAD_EXCEPTION (1)
83#define DOWNLOAD_ERR_MEM_1 (2)
84#define DOWNLOAD_ERR_MEM_2 (3)
85#define DOWNLOAD_ERR_SOFTWARE (4)
86#define DOWNLOAD_ERR_FILE_SIZE (5)
87#define DOWNLOAD_ERR_CHECKSUM (6)
88#define DOWNLOAD_ERR_OVERFLOW (7)
89#define DOWNLOAD_ERR_IMAGE (8)
90#define DOWNLOAD_ERR_HOST (9)
91#define DOWNLOAD_ERR_ABORT (10)
92
93
94#define SYS_BASE_ADDR_SILICON (0)
95#define PAC_BASE_ADDRESS_SILICON (SYS_BASE_ADDR_SILICON + 0x09000000)
96#define PAC_SHARED_MEMORY_SILICON (PAC_BASE_ADDRESS_SILICON)
97
98#define CW1200_APB(addr) (PAC_SHARED_MEMORY_SILICON + (addr))
99
100/* Device register definitions */
101
102/* WBF - SPI Register Addresses */
103#define ST90TDS_ADDR_ID_BASE (0x0000)
104/* 16/32 bits */
105#define ST90TDS_CONFIG_REG_ID (0x0000)
106/* 16/32 bits */
107#define ST90TDS_CONTROL_REG_ID (0x0001)
108/* 16 bits, Q mode W/R */
109#define ST90TDS_IN_OUT_QUEUE_REG_ID (0x0002)
110/* 32 bits, AHB bus R/W */
111#define ST90TDS_AHB_DPORT_REG_ID (0x0003)
112/* 16/32 bits */
113#define ST90TDS_SRAM_BASE_ADDR_REG_ID (0x0004)
114/* 32 bits, APB bus R/W */
115#define ST90TDS_SRAM_DPORT_REG_ID (0x0005)
116/* 32 bits, t_settle/general */
117#define ST90TDS_TSET_GEN_R_W_REG_ID (0x0006)
118/* 16 bits, Q mode read, no length */
119#define ST90TDS_FRAME_OUT_REG_ID (0x0007)
120#define ST90TDS_ADDR_ID_MAX (ST90TDS_FRAME_OUT_REG_ID)
121
122/* WBF - Control register bit set */
123/* next o/p length, bit 11 to 0 */
124#define ST90TDS_CONT_NEXT_LEN_MASK (0x0FFF)
125#define ST90TDS_CONT_WUP_BIT (BIT(12))
126#define ST90TDS_CONT_RDY_BIT (BIT(13))
127#define ST90TDS_CONT_IRQ_ENABLE (BIT(14))
128#define ST90TDS_CONT_RDY_ENABLE (BIT(15))
129#define ST90TDS_CONT_IRQ_RDY_ENABLE (BIT(14)|BIT(15))
130
131/* SPI Config register bit set */
132#define ST90TDS_CONFIG_FRAME_BIT (BIT(2))
133#define ST90TDS_CONFIG_WORD_MODE_BITS (BIT(3)|BIT(4))
134#define ST90TDS_CONFIG_WORD_MODE_1 (BIT(3))
135#define ST90TDS_CONFIG_WORD_MODE_2 (BIT(4))
136#define ST90TDS_CONFIG_ERROR_0_BIT (BIT(5))
137#define ST90TDS_CONFIG_ERROR_1_BIT (BIT(6))
138#define ST90TDS_CONFIG_ERROR_2_BIT (BIT(7))
139/* TBD: Sure??? */
140#define ST90TDS_CONFIG_CSN_FRAME_BIT (BIT(7))
141#define ST90TDS_CONFIG_ERROR_3_BIT (BIT(8))
142#define ST90TDS_CONFIG_ERROR_4_BIT (BIT(9))
143/* QueueM */
144#define ST90TDS_CONFIG_ACCESS_MODE_BIT (BIT(10))
145/* AHB bus */
146#define ST90TDS_CONFIG_AHB_PRFETCH_BIT (BIT(11))
147#define ST90TDS_CONFIG_CPU_CLK_DIS_BIT (BIT(12))
148/* APB bus */
149#define ST90TDS_CONFIG_PRFETCH_BIT (BIT(13))
150/* cpu reset */
151#define ST90TDS_CONFIG_CPU_RESET_BIT (BIT(14))
152#define ST90TDS_CONFIG_CLEAR_INT_BIT (BIT(15))
153
154/* For CW1200 the IRQ Enable and Ready Bits are in CONFIG register */
155#define ST90TDS_CONF_IRQ_ENABLE (BIT(16))
156#define ST90TDS_CONF_RDY_ENABLE (BIT(17))
157#define ST90TDS_CONF_IRQ_RDY_ENABLE (BIT(16)|BIT(17))
158
159int cw1200_data_read(struct cw1200_common *priv,
160 void *buf, size_t buf_len);
161int cw1200_data_write(struct cw1200_common *priv,
162 const void *buf, size_t buf_len);
163
164int cw1200_reg_read(struct cw1200_common *priv, u16 addr,
165 void *buf, size_t buf_len);
166int cw1200_reg_write(struct cw1200_common *priv, u16 addr,
167 const void *buf, size_t buf_len);
168
169static inline int cw1200_reg_read_16(struct cw1200_common *priv,
170 u16 addr, u16 *val)
171{
172 __le32 tmp;
173 int i;
174 i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp));
175 *val = le32_to_cpu(tmp) & 0xfffff;
176 return i;
177}
178
179static inline int cw1200_reg_write_16(struct cw1200_common *priv,
180 u16 addr, u16 val)
181{
182 __le32 tmp = cpu_to_le32((u32)val);
183 return cw1200_reg_write(priv, addr, &tmp, sizeof(tmp));
184}
185
186static inline int cw1200_reg_read_32(struct cw1200_common *priv,
187 u16 addr, u32 *val)
188{
189 __le32 tmp;
190 int i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp));
191 *val = le32_to_cpu(tmp);
192 return i;
193}
194
195static inline int cw1200_reg_write_32(struct cw1200_common *priv,
196 u16 addr, u32 val)
197{
198 __le32 tmp = cpu_to_le32(val);
199 return cw1200_reg_write(priv, addr, &tmp, sizeof(val));
200}
201
202int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
203 size_t buf_len, u32 prefetch, u16 port_addr);
204int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
205 size_t buf_len);
206
207static inline int cw1200_apb_read(struct cw1200_common *priv, u32 addr,
208 void *buf, size_t buf_len)
209{
210 return cw1200_indirect_read(priv, addr, buf, buf_len,
211 ST90TDS_CONFIG_PRFETCH_BIT,
212 ST90TDS_SRAM_DPORT_REG_ID);
213}
214
215static inline int cw1200_ahb_read(struct cw1200_common *priv, u32 addr,
216 void *buf, size_t buf_len)
217{
218 return cw1200_indirect_read(priv, addr, buf, buf_len,
219 ST90TDS_CONFIG_AHB_PRFETCH_BIT,
220 ST90TDS_AHB_DPORT_REG_ID);
221}
222
223static inline int cw1200_apb_read_32(struct cw1200_common *priv,
224 u32 addr, u32 *val)
225{
226 __le32 tmp;
227 int i = cw1200_apb_read(priv, addr, &tmp, sizeof(tmp));
228 *val = le32_to_cpu(tmp);
229 return i;
230}
231
232static inline int cw1200_apb_write_32(struct cw1200_common *priv,
233 u32 addr, u32 val)
234{
235 __le32 tmp = cpu_to_le32(val);
236 return cw1200_apb_write(priv, addr, &tmp, sizeof(val));
237}
238static inline int cw1200_ahb_read_32(struct cw1200_common *priv,
239 u32 addr, u32 *val)
240{
241 __le32 tmp;
242 int i = cw1200_ahb_read(priv, addr, &tmp, sizeof(tmp));
243 *val = le32_to_cpu(tmp);
244 return i;
245}
246
247#endif /* CW1200_HWIO_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
new file mode 100644
index 000000000000..3724e739cbf4
--- /dev/null
+++ b/drivers/net/wireless/cw1200/main.c
@@ -0,0 +1,605 @@
1/*
2 * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on:
8 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
9 * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
10 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
11 *
12 * Based on:
13 * - the islsm (softmac prism54) driver, which is:
14 * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
15 * - stlc45xx driver
16 * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation.
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/firmware.h>
26#include <linux/etherdevice.h>
27#include <linux/vmalloc.h>
28#include <linux/random.h>
29#include <linux/sched.h>
30#include <net/mac80211.h>
31
32#include "cw1200.h"
33#include "txrx.h"
34#include "hwbus.h"
35#include "fwio.h"
36#include "hwio.h"
37#include "bh.h"
38#include "sta.h"
39#include "scan.h"
40#include "debug.h"
41#include "pm.h"
42
43MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>");
44MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code");
45MODULE_LICENSE("GPL");
46MODULE_ALIAS("cw1200_core");
47
48/* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */
49static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00};
50module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO);
51MODULE_PARM_DESC(macaddr, "Override platform_data MAC address");
52
53static char *cw1200_sdd_path;
54module_param(cw1200_sdd_path, charp, 0644);
55MODULE_PARM_DESC(cw1200_sdd_path, "Override platform_data SDD file");
56static int cw1200_refclk;
57module_param(cw1200_refclk, int, 0644);
58MODULE_PARM_DESC(cw1200_refclk, "Override platform_data reference clock");
59
60int cw1200_power_mode = wsm_power_mode_quiescent;
61module_param(cw1200_power_mode, int, 0644);
62MODULE_PARM_DESC(cw1200_power_mode, "WSM power mode. 0 == active, 1 == doze, 2 == quiescent (default)");
63
64#define RATETAB_ENT(_rate, _rateid, _flags) \
65 { \
66 .bitrate = (_rate), \
67 .hw_value = (_rateid), \
68 .flags = (_flags), \
69 }
70
71static struct ieee80211_rate cw1200_rates[] = {
72 RATETAB_ENT(10, 0, 0),
73 RATETAB_ENT(20, 1, 0),
74 RATETAB_ENT(55, 2, 0),
75 RATETAB_ENT(110, 3, 0),
76 RATETAB_ENT(60, 6, 0),
77 RATETAB_ENT(90, 7, 0),
78 RATETAB_ENT(120, 8, 0),
79 RATETAB_ENT(180, 9, 0),
80 RATETAB_ENT(240, 10, 0),
81 RATETAB_ENT(360, 11, 0),
82 RATETAB_ENT(480, 12, 0),
83 RATETAB_ENT(540, 13, 0),
84};
85
86static struct ieee80211_rate cw1200_mcs_rates[] = {
87 RATETAB_ENT(65, 14, IEEE80211_TX_RC_MCS),
88 RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS),
89 RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS),
90 RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS),
91 RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS),
92 RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS),
93 RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS),
94 RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS),
95};
96
97#define cw1200_a_rates (cw1200_rates + 4)
98#define cw1200_a_rates_size (ARRAY_SIZE(cw1200_rates) - 4)
99#define cw1200_g_rates (cw1200_rates + 0)
100#define cw1200_g_rates_size (ARRAY_SIZE(cw1200_rates))
101#define cw1200_n_rates (cw1200_mcs_rates)
102#define cw1200_n_rates_size (ARRAY_SIZE(cw1200_mcs_rates))
103
104
105#define CHAN2G(_channel, _freq, _flags) { \
106 .band = IEEE80211_BAND_2GHZ, \
107 .center_freq = (_freq), \
108 .hw_value = (_channel), \
109 .flags = (_flags), \
110 .max_antenna_gain = 0, \
111 .max_power = 30, \
112}
113
114#define CHAN5G(_channel, _flags) { \
115 .band = IEEE80211_BAND_5GHZ, \
116 .center_freq = 5000 + (5 * (_channel)), \
117 .hw_value = (_channel), \
118 .flags = (_flags), \
119 .max_antenna_gain = 0, \
120 .max_power = 30, \
121}
122
123static struct ieee80211_channel cw1200_2ghz_chantable[] = {
124 CHAN2G(1, 2412, 0),
125 CHAN2G(2, 2417, 0),
126 CHAN2G(3, 2422, 0),
127 CHAN2G(4, 2427, 0),
128 CHAN2G(5, 2432, 0),
129 CHAN2G(6, 2437, 0),
130 CHAN2G(7, 2442, 0),
131 CHAN2G(8, 2447, 0),
132 CHAN2G(9, 2452, 0),
133 CHAN2G(10, 2457, 0),
134 CHAN2G(11, 2462, 0),
135 CHAN2G(12, 2467, 0),
136 CHAN2G(13, 2472, 0),
137 CHAN2G(14, 2484, 0),
138};
139
140static struct ieee80211_channel cw1200_5ghz_chantable[] = {
141 CHAN5G(34, 0), CHAN5G(36, 0),
142 CHAN5G(38, 0), CHAN5G(40, 0),
143 CHAN5G(42, 0), CHAN5G(44, 0),
144 CHAN5G(46, 0), CHAN5G(48, 0),
145 CHAN5G(52, 0), CHAN5G(56, 0),
146 CHAN5G(60, 0), CHAN5G(64, 0),
147 CHAN5G(100, 0), CHAN5G(104, 0),
148 CHAN5G(108, 0), CHAN5G(112, 0),
149 CHAN5G(116, 0), CHAN5G(120, 0),
150 CHAN5G(124, 0), CHAN5G(128, 0),
151 CHAN5G(132, 0), CHAN5G(136, 0),
152 CHAN5G(140, 0), CHAN5G(149, 0),
153 CHAN5G(153, 0), CHAN5G(157, 0),
154 CHAN5G(161, 0), CHAN5G(165, 0),
155 CHAN5G(184, 0), CHAN5G(188, 0),
156 CHAN5G(192, 0), CHAN5G(196, 0),
157 CHAN5G(200, 0), CHAN5G(204, 0),
158 CHAN5G(208, 0), CHAN5G(212, 0),
159 CHAN5G(216, 0),
160};
161
162static struct ieee80211_supported_band cw1200_band_2ghz = {
163 .channels = cw1200_2ghz_chantable,
164 .n_channels = ARRAY_SIZE(cw1200_2ghz_chantable),
165 .bitrates = cw1200_g_rates,
166 .n_bitrates = cw1200_g_rates_size,
167 .ht_cap = {
168 .cap = IEEE80211_HT_CAP_GRN_FLD |
169 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
170 IEEE80211_HT_CAP_MAX_AMSDU,
171 .ht_supported = 1,
172 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
173 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
174 .mcs = {
175 .rx_mask[0] = 0xFF,
176 .rx_highest = __cpu_to_le16(0x41),
177 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
178 },
179 },
180};
181
182static struct ieee80211_supported_band cw1200_band_5ghz = {
183 .channels = cw1200_5ghz_chantable,
184 .n_channels = ARRAY_SIZE(cw1200_5ghz_chantable),
185 .bitrates = cw1200_a_rates,
186 .n_bitrates = cw1200_a_rates_size,
187 .ht_cap = {
188 .cap = IEEE80211_HT_CAP_GRN_FLD |
189 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
190 IEEE80211_HT_CAP_MAX_AMSDU,
191 .ht_supported = 1,
192 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
193 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
194 .mcs = {
195 .rx_mask[0] = 0xFF,
196 .rx_highest = __cpu_to_le16(0x41),
197 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
198 },
199 },
200};
201
202static const unsigned long cw1200_ttl[] = {
203 1 * HZ, /* VO */
204 2 * HZ, /* VI */
205 5 * HZ, /* BE */
206 10 * HZ /* BK */
207};
208
209static const struct ieee80211_ops cw1200_ops = {
210 .start = cw1200_start,
211 .stop = cw1200_stop,
212 .add_interface = cw1200_add_interface,
213 .remove_interface = cw1200_remove_interface,
214 .change_interface = cw1200_change_interface,
215 .tx = cw1200_tx,
216 .hw_scan = cw1200_hw_scan,
217 .set_tim = cw1200_set_tim,
218 .sta_notify = cw1200_sta_notify,
219 .sta_add = cw1200_sta_add,
220 .sta_remove = cw1200_sta_remove,
221 .set_key = cw1200_set_key,
222 .set_rts_threshold = cw1200_set_rts_threshold,
223 .config = cw1200_config,
224 .bss_info_changed = cw1200_bss_info_changed,
225 .prepare_multicast = cw1200_prepare_multicast,
226 .configure_filter = cw1200_configure_filter,
227 .conf_tx = cw1200_conf_tx,
228 .get_stats = cw1200_get_stats,
229 .ampdu_action = cw1200_ampdu_action,
230 .flush = cw1200_flush,
231#ifdef CONFIG_PM
232 .suspend = cw1200_wow_suspend,
233 .resume = cw1200_wow_resume,
234#endif
235 /* Intentionally not offloaded: */
236 /*.channel_switch = cw1200_channel_switch, */
237 /*.remain_on_channel = cw1200_remain_on_channel, */
238 /*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */
239};
240
241static int cw1200_ba_rx_tids = -1;
242static int cw1200_ba_tx_tids = -1;
243module_param(cw1200_ba_rx_tids, int, 0644);
244module_param(cw1200_ba_tx_tids, int, 0644);
245MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs");
246MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs");
247
248#ifdef CONFIG_PM
249static const struct wiphy_wowlan_support cw1200_wowlan_support = {
250 /* Support only for limited wowlan functionalities */
251 .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
252};
253#endif
254
255
256static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
257 const bool have_5ghz)
258{
259 int i, band;
260 struct ieee80211_hw *hw;
261 struct cw1200_common *priv;
262
263 hw = ieee80211_alloc_hw(sizeof(struct cw1200_common), &cw1200_ops);
264 if (!hw)
265 return NULL;
266
267 priv = hw->priv;
268 priv->hw = hw;
269 priv->hw_type = -1;
270 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
271 priv->rates = cw1200_rates; /* TODO: fetch from FW */
272 priv->mcs_rates = cw1200_n_rates;
273 if (cw1200_ba_rx_tids != -1)
274 priv->ba_rx_tid_mask = cw1200_ba_rx_tids;
275 else
276 priv->ba_rx_tid_mask = 0xFF; /* Enable RX BLKACK for all TIDs */
277 if (cw1200_ba_tx_tids != -1)
278 priv->ba_tx_tid_mask = cw1200_ba_tx_tids;
279 else
280 priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */
281
282 hw->flags = IEEE80211_HW_SIGNAL_DBM |
283 IEEE80211_HW_SUPPORTS_PS |
284 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
285 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
286 IEEE80211_HW_SUPPORTS_UAPSD |
287 IEEE80211_HW_CONNECTION_MONITOR |
288 IEEE80211_HW_AMPDU_AGGREGATION |
289 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
290 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC;
291
292 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
293 BIT(NL80211_IFTYPE_ADHOC) |
294 BIT(NL80211_IFTYPE_AP) |
295 BIT(NL80211_IFTYPE_MESH_POINT) |
296 BIT(NL80211_IFTYPE_P2P_CLIENT) |
297 BIT(NL80211_IFTYPE_P2P_GO);
298
299#ifdef CONFIG_PM
300 hw->wiphy->wowlan = &cw1200_wowlan_support;
301#endif
302
303 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
304
305 hw->channel_change_time = 1000; /* TODO: find actual value */
306 hw->queues = 4;
307
308 priv->rts_threshold = -1;
309
310 hw->max_rates = 8;
311 hw->max_rate_tries = 15;
312 hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM +
313 8; /* TKIP IV */
314
315 hw->sta_data_size = sizeof(struct cw1200_sta_priv);
316
317 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
318 if (have_5ghz)
319 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
320
321 /* Channel params have to be cleared before registering wiphy again */
322 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
323 struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
324 if (!sband)
325 continue;
326 for (i = 0; i < sband->n_channels; i++) {
327 sband->channels[i].flags = 0;
328 sband->channels[i].max_antenna_gain = 0;
329 sband->channels[i].max_power = 30;
330 }
331 }
332
333 hw->wiphy->max_scan_ssids = 2;
334 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
335
336 if (macaddr)
337 SET_IEEE80211_PERM_ADDR(hw, (u8 *)macaddr);
338 else
339 SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template);
340
341 /* Fix up mac address if necessary */
342 if (hw->wiphy->perm_addr[3] == 0 &&
343 hw->wiphy->perm_addr[4] == 0 &&
344 hw->wiphy->perm_addr[5] == 0) {
345 get_random_bytes(&hw->wiphy->perm_addr[3], 3);
346 }
347
348 mutex_init(&priv->wsm_cmd_mux);
349 mutex_init(&priv->conf_mutex);
350 priv->workqueue = create_singlethread_workqueue("cw1200_wq");
351 sema_init(&priv->scan.lock, 1);
352 INIT_WORK(&priv->scan.work, cw1200_scan_work);
353 INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
354 INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout);
355 INIT_DELAYED_WORK(&priv->clear_recent_scan_work,
356 cw1200_clear_recent_scan_work);
357 INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout);
358 INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work);
359 INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work);
360 INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work);
361 INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work);
362 spin_lock_init(&priv->event_queue_lock);
363 INIT_LIST_HEAD(&priv->event_queue);
364 INIT_WORK(&priv->event_handler, cw1200_event_handler);
365 INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work);
366 INIT_WORK(&priv->bss_params_work, cw1200_bss_params_work);
367 spin_lock_init(&priv->bss_loss_lock);
368 spin_lock_init(&priv->ps_state_lock);
369 INIT_WORK(&priv->set_cts_work, cw1200_set_cts_work);
370 INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work);
371 INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work);
372 INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work);
373 INIT_WORK(&priv->link_id_work, cw1200_link_id_work);
374 INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work);
375 INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset);
376 INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work);
377 INIT_WORK(&priv->set_beacon_wakeup_period_work,
378 cw1200_set_beacon_wakeup_period_work);
379 init_timer(&priv->mcast_timeout);
380 priv->mcast_timeout.data = (unsigned long)priv;
381 priv->mcast_timeout.function = cw1200_mcast_timeout;
382
383 if (cw1200_queue_stats_init(&priv->tx_queue_stats,
384 CW1200_LINK_ID_MAX,
385 cw1200_skb_dtor,
386 priv)) {
387 ieee80211_free_hw(hw);
388 return NULL;
389 }
390
391 for (i = 0; i < 4; ++i) {
392 if (cw1200_queue_init(&priv->tx_queue[i],
393 &priv->tx_queue_stats, i, 16,
394 cw1200_ttl[i])) {
395 for (; i > 0; i--)
396 cw1200_queue_deinit(&priv->tx_queue[i - 1]);
397 cw1200_queue_stats_deinit(&priv->tx_queue_stats);
398 ieee80211_free_hw(hw);
399 return NULL;
400 }
401 }
402
403 init_waitqueue_head(&priv->channel_switch_done);
404 init_waitqueue_head(&priv->wsm_cmd_wq);
405 init_waitqueue_head(&priv->wsm_startup_done);
406 init_waitqueue_head(&priv->ps_mode_switch_done);
407 wsm_buf_init(&priv->wsm_cmd_buf);
408 spin_lock_init(&priv->wsm_cmd.lock);
409 priv->wsm_cmd.done = 1;
410 tx_policy_init(priv);
411
412 return hw;
413}
414
415static int cw1200_register_common(struct ieee80211_hw *dev)
416{
417 struct cw1200_common *priv = dev->priv;
418 int err;
419
420#ifdef CONFIG_PM
421 err = cw1200_pm_init(&priv->pm_state, priv);
422 if (err) {
423 pr_err("Cannot init PM. (%d).\n",
424 err);
425 return err;
426 }
427#endif
428
429 err = ieee80211_register_hw(dev);
430 if (err) {
431 pr_err("Cannot register device (%d).\n",
432 err);
433#ifdef CONFIG_PM
434 cw1200_pm_deinit(&priv->pm_state);
435#endif
436 return err;
437 }
438
439 cw1200_debug_init(priv);
440
441 pr_info("Registered as '%s'\n", wiphy_name(dev->wiphy));
442 return 0;
443}
444
445static void cw1200_free_common(struct ieee80211_hw *dev)
446{
447 ieee80211_free_hw(dev);
448}
449
450static void cw1200_unregister_common(struct ieee80211_hw *dev)
451{
452 struct cw1200_common *priv = dev->priv;
453 int i;
454
455 ieee80211_unregister_hw(dev);
456
457 del_timer_sync(&priv->mcast_timeout);
458 cw1200_unregister_bh(priv);
459
460 cw1200_debug_release(priv);
461
462 mutex_destroy(&priv->conf_mutex);
463
464 wsm_buf_deinit(&priv->wsm_cmd_buf);
465
466 destroy_workqueue(priv->workqueue);
467 priv->workqueue = NULL;
468
469 if (priv->sdd) {
470 release_firmware(priv->sdd);
471 priv->sdd = NULL;
472 }
473
474 for (i = 0; i < 4; ++i)
475 cw1200_queue_deinit(&priv->tx_queue[i]);
476
477 cw1200_queue_stats_deinit(&priv->tx_queue_stats);
478#ifdef CONFIG_PM
479 cw1200_pm_deinit(&priv->pm_state);
480#endif
481}
482
483/* Clock is in KHz */
484u32 cw1200_dpll_from_clk(u16 clk_khz)
485{
486 switch (clk_khz) {
487 case 0x32C8: /* 13000 KHz */
488 return 0x1D89D241;
489 case 0x3E80: /* 16000 KHz */
490 return 0x000001E1;
491 case 0x41A0: /* 16800 KHz */
492 return 0x124931C1;
493 case 0x4B00: /* 19200 KHz */
494 return 0x00000191;
495 case 0x5DC0: /* 24000 KHz */
496 return 0x00000141;
497 case 0x6590: /* 26000 KHz */
498 return 0x0EC4F121;
499 case 0x8340: /* 33600 KHz */
500 return 0x092490E1;
501 case 0x9600: /* 38400 KHz */
502 return 0x100010C1;
503 case 0x9C40: /* 40000 KHz */
504 return 0x000000C1;
505 case 0xBB80: /* 48000 KHz */
506 return 0x000000A1;
507 case 0xCB20: /* 52000 KHz */
508 return 0x07627091;
509 default:
510 pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n",
511 clk_khz);
512 return 0x0EC4F121;
513 }
514}
515
516int cw1200_core_probe(const struct hwbus_ops *hwbus_ops,
517 struct hwbus_priv *hwbus,
518 struct device *pdev,
519 struct cw1200_common **core,
520 int ref_clk, const u8 *macaddr,
521 const char *sdd_path, bool have_5ghz)
522{
523 int err = -EINVAL;
524 struct ieee80211_hw *dev;
525 struct cw1200_common *priv;
526 struct wsm_operational_mode mode = {
527 .power_mode = cw1200_power_mode,
528 .disable_more_flag_usage = true,
529 };
530
531 dev = cw1200_init_common(macaddr, have_5ghz);
532 if (!dev)
533 goto err;
534
535 priv = dev->priv;
536 priv->hw_refclk = ref_clk;
537 if (cw1200_refclk)
538 priv->hw_refclk = cw1200_refclk;
539
540 priv->sdd_path = (char *)sdd_path;
541 if (cw1200_sdd_path)
542 priv->sdd_path = cw1200_sdd_path;
543
544 priv->hwbus_ops = hwbus_ops;
545 priv->hwbus_priv = hwbus;
546 priv->pdev = pdev;
547 SET_IEEE80211_DEV(priv->hw, pdev);
548
549 /* Pass struct cw1200_common back up */
550 *core = priv;
551
552 err = cw1200_register_bh(priv);
553 if (err)
554 goto err1;
555
556 err = cw1200_load_firmware(priv);
557 if (err)
558 goto err2;
559
560 if (wait_event_interruptible_timeout(priv->wsm_startup_done,
561 priv->firmware_ready,
562 3*HZ) <= 0) {
563 /* TODO: Need to find how to reset device
564 in QUEUE mode properly.
565 */
566 pr_err("Timeout waiting on device startup\n");
567 err = -ETIMEDOUT;
568 goto err2;
569 }
570
571 /* Set low-power mode. */
572 wsm_set_operational_mode(priv, &mode);
573
574 /* Enable multi-TX confirmation */
575 wsm_use_multi_tx_conf(priv, true);
576
577 err = cw1200_register_common(dev);
578 if (err)
579 goto err2;
580
581 return err;
582
583err2:
584 cw1200_unregister_bh(priv);
585err1:
586 cw1200_free_common(dev);
587err:
588 *core = NULL;
589 return err;
590}
591EXPORT_SYMBOL_GPL(cw1200_core_probe);
592
593void cw1200_core_release(struct cw1200_common *self)
594{
595 /* Disable device interrupts */
596 self->hwbus_ops->lock(self->hwbus_priv);
597 __cw1200_irq_enable(self, 0);
598 self->hwbus_ops->unlock(self->hwbus_priv);
599
600 /* And then clean up */
601 cw1200_unregister_common(self->hw);
602 cw1200_free_common(self->hw);
603 return;
604}
605EXPORT_SYMBOL_GPL(cw1200_core_release);
diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c
new file mode 100644
index 000000000000..b37abb9f0453
--- /dev/null
+++ b/drivers/net/wireless/cw1200/pm.c
@@ -0,0 +1,367 @@
1/*
2 * Mac80211 power management API for ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2011, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/if_ether.h>
14#include "cw1200.h"
15#include "pm.h"
16#include "sta.h"
17#include "bh.h"
18#include "hwbus.h"
19
20#define CW1200_BEACON_SKIPPING_MULTIPLIER 3
21
22struct cw1200_udp_port_filter {
23 struct wsm_udp_port_filter_hdr hdr;
24 /* Up to 4 filters are allowed. */
25 struct wsm_udp_port_filter filters[WSM_MAX_FILTER_ELEMENTS];
26} __packed;
27
28struct cw1200_ether_type_filter {
29 struct wsm_ether_type_filter_hdr hdr;
30 /* Up to 4 filters are allowed. */
31 struct wsm_ether_type_filter filters[WSM_MAX_FILTER_ELEMENTS];
32} __packed;
33
34static struct cw1200_udp_port_filter cw1200_udp_port_filter_on = {
35 .hdr.num = 2,
36 .filters = {
37 [0] = {
38 .action = WSM_FILTER_ACTION_FILTER_OUT,
39 .type = WSM_FILTER_PORT_TYPE_DST,
40 .port = __cpu_to_le16(67), /* DHCP Bootps */
41 },
42 [1] = {
43 .action = WSM_FILTER_ACTION_FILTER_OUT,
44 .type = WSM_FILTER_PORT_TYPE_DST,
45 .port = __cpu_to_le16(68), /* DHCP Bootpc */
46 },
47 }
48};
49
50static struct wsm_udp_port_filter_hdr cw1200_udp_port_filter_off = {
51 .num = 0,
52};
53
54#ifndef ETH_P_WAPI
55#define ETH_P_WAPI 0x88B4
56#endif
57
58static struct cw1200_ether_type_filter cw1200_ether_type_filter_on = {
59 .hdr.num = 4,
60 .filters = {
61 [0] = {
62 .action = WSM_FILTER_ACTION_FILTER_IN,
63 .type = __cpu_to_le16(ETH_P_IP),
64 },
65 [1] = {
66 .action = WSM_FILTER_ACTION_FILTER_IN,
67 .type = __cpu_to_le16(ETH_P_PAE),
68 },
69 [2] = {
70 .action = WSM_FILTER_ACTION_FILTER_IN,
71 .type = __cpu_to_le16(ETH_P_WAPI),
72 },
73 [3] = {
74 .action = WSM_FILTER_ACTION_FILTER_IN,
75 .type = __cpu_to_le16(ETH_P_ARP),
76 },
77 },
78};
79
80static struct wsm_ether_type_filter_hdr cw1200_ether_type_filter_off = {
81 .num = 0,
82};
83
84/* private */
85struct cw1200_suspend_state {
86 unsigned long bss_loss_tmo;
87 unsigned long join_tmo;
88 unsigned long direct_probe;
89 unsigned long link_id_gc;
90 bool beacon_skipping;
91 u8 prev_ps_mode;
92};
93
94static void cw1200_pm_stay_awake_tmo(unsigned long arg)
95{
96 /* XXX what's the point of this ? */
97}
98
99int cw1200_pm_init(struct cw1200_pm_state *pm,
100 struct cw1200_common *priv)
101{
102 spin_lock_init(&pm->lock);
103
104 init_timer(&pm->stay_awake);
105 pm->stay_awake.data = (unsigned long)pm;
106 pm->stay_awake.function = cw1200_pm_stay_awake_tmo;
107
108 return 0;
109}
110
111void cw1200_pm_deinit(struct cw1200_pm_state *pm)
112{
113 del_timer_sync(&pm->stay_awake);
114}
115
116void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
117 unsigned long tmo)
118{
119 long cur_tmo;
120 spin_lock_bh(&pm->lock);
121 cur_tmo = pm->stay_awake.expires - jiffies;
122 if (!timer_pending(&pm->stay_awake) || cur_tmo < (long)tmo)
123 mod_timer(&pm->stay_awake, jiffies + tmo);
124 spin_unlock_bh(&pm->lock);
125}
126
127static long cw1200_suspend_work(struct delayed_work *work)
128{
129 int ret = cancel_delayed_work(work);
130 long tmo;
131 if (ret > 0) {
132 /* Timer is pending */
133 tmo = work->timer.expires - jiffies;
134 if (tmo < 0)
135 tmo = 0;
136 } else {
137 tmo = -1;
138 }
139 return tmo;
140}
141
142static int cw1200_resume_work(struct cw1200_common *priv,
143 struct delayed_work *work,
144 unsigned long tmo)
145{
146 if ((long)tmo < 0)
147 return 1;
148
149 return queue_delayed_work(priv->workqueue, work, tmo);
150}
151
152int cw1200_can_suspend(struct cw1200_common *priv)
153{
154 if (atomic_read(&priv->bh_rx)) {
155 wiphy_dbg(priv->hw->wiphy, "Suspend interrupted.\n");
156 return 0;
157 }
158 return 1;
159}
160EXPORT_SYMBOL_GPL(cw1200_can_suspend);
161
162int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
163{
164 struct cw1200_common *priv = hw->priv;
165 struct cw1200_pm_state *pm_state = &priv->pm_state;
166 struct cw1200_suspend_state *state;
167 int ret;
168
169 spin_lock_bh(&pm_state->lock);
170 ret = timer_pending(&pm_state->stay_awake);
171 spin_unlock_bh(&pm_state->lock);
172 if (ret)
173 return -EAGAIN;
174
175 /* Do not suspend when datapath is not idle */
176 if (priv->tx_queue_stats.num_queued)
177 return -EBUSY;
178
179 /* Make sure there is no configuration requests in progress. */
180 if (!mutex_trylock(&priv->conf_mutex))
181 return -EBUSY;
182
183 /* Ensure pending operations are done.
184 * Note also that wow_suspend must return in ~2.5sec, before
185 * watchdog is triggered.
186 */
187 if (priv->channel_switch_in_progress)
188 goto revert1;
189
190 /* Do not suspend when join is pending */
191 if (priv->join_pending)
192 goto revert1;
193
194 /* Do not suspend when scanning */
195 if (down_trylock(&priv->scan.lock))
196 goto revert1;
197
198 /* Lock TX. */
199 wsm_lock_tx_async(priv);
200
201 /* Wait to avoid possible race with bh code.
202 * But do not wait too long...
203 */
204 if (wait_event_timeout(priv->bh_evt_wq,
205 !priv->hw_bufs_used, HZ / 10) <= 0)
206 goto revert2;
207
208 /* Set UDP filter */
209 wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_on.hdr);
210
211 /* Set ethernet frame type filter */
212 wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_on.hdr);
213
214 /* Allocate state */
215 state = kzalloc(sizeof(struct cw1200_suspend_state), GFP_KERNEL);
216 if (!state)
217 goto revert3;
218
219 /* Change to legacy PS while going to suspend */
220 if (!priv->vif->p2p &&
221 priv->join_status == CW1200_JOIN_STATUS_STA &&
222 priv->powersave_mode.mode != WSM_PSM_PS) {
223 state->prev_ps_mode = priv->powersave_mode.mode;
224 priv->powersave_mode.mode = WSM_PSM_PS;
225 cw1200_set_pm(priv, &priv->powersave_mode);
226 if (wait_event_interruptible_timeout(priv->ps_mode_switch_done,
227 !priv->ps_mode_switch_in_progress, 1*HZ) <= 0) {
228 goto revert3;
229 }
230 }
231
232 /* Store delayed work states. */
233 state->bss_loss_tmo =
234 cw1200_suspend_work(&priv->bss_loss_work);
235 state->join_tmo =
236 cw1200_suspend_work(&priv->join_timeout);
237 state->direct_probe =
238 cw1200_suspend_work(&priv->scan.probe_work);
239 state->link_id_gc =
240 cw1200_suspend_work(&priv->link_id_gc_work);
241
242 cancel_delayed_work_sync(&priv->clear_recent_scan_work);
243 atomic_set(&priv->recent_scan, 0);
244
245 /* Enable beacon skipping */
246 if (priv->join_status == CW1200_JOIN_STATUS_STA &&
247 priv->join_dtim_period &&
248 !priv->has_multicast_subscription) {
249 state->beacon_skipping = true;
250 wsm_set_beacon_wakeup_period(priv,
251 priv->join_dtim_period,
252 CW1200_BEACON_SKIPPING_MULTIPLIER * priv->join_dtim_period);
253 }
254
255 /* Stop serving thread */
256 if (cw1200_bh_suspend(priv))
257 goto revert4;
258
259 ret = timer_pending(&priv->mcast_timeout);
260 if (ret)
261 goto revert5;
262
263 /* Store suspend state */
264 pm_state->suspend_state = state;
265
266 /* Enable IRQ wake */
267 ret = priv->hwbus_ops->power_mgmt(priv->hwbus_priv, true);
268 if (ret) {
269 wiphy_err(priv->hw->wiphy,
270 "PM request failed: %d. WoW is disabled.\n", ret);
271 cw1200_wow_resume(hw);
272 return -EBUSY;
273 }
274
275 /* Force resume if event is coming from the device. */
276 if (atomic_read(&priv->bh_rx)) {
277 cw1200_wow_resume(hw);
278 return -EAGAIN;
279 }
280
281 return 0;
282
283revert5:
284 WARN_ON(cw1200_bh_resume(priv));
285revert4:
286 cw1200_resume_work(priv, &priv->bss_loss_work,
287 state->bss_loss_tmo);
288 cw1200_resume_work(priv, &priv->join_timeout,
289 state->join_tmo);
290 cw1200_resume_work(priv, &priv->scan.probe_work,
291 state->direct_probe);
292 cw1200_resume_work(priv, &priv->link_id_gc_work,
293 state->link_id_gc);
294 kfree(state);
295revert3:
296 wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
297 wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off);
298revert2:
299 wsm_unlock_tx(priv);
300 up(&priv->scan.lock);
301revert1:
302 mutex_unlock(&priv->conf_mutex);
303 return -EBUSY;
304}
305
306int cw1200_wow_resume(struct ieee80211_hw *hw)
307{
308 struct cw1200_common *priv = hw->priv;
309 struct cw1200_pm_state *pm_state = &priv->pm_state;
310 struct cw1200_suspend_state *state;
311
312 state = pm_state->suspend_state;
313 pm_state->suspend_state = NULL;
314
315 /* Disable IRQ wake */
316 priv->hwbus_ops->power_mgmt(priv->hwbus_priv, false);
317
318 /* Scan.lock must be released before BH is resumed other way
319 * in case when BSS_LOST command arrived the processing of the
320 * command will be delayed.
321 */
322 up(&priv->scan.lock);
323
324 /* Resume BH thread */
325 WARN_ON(cw1200_bh_resume(priv));
326
327 /* Restores previous PS mode */
328 if (!priv->vif->p2p && priv->join_status == CW1200_JOIN_STATUS_STA) {
329 priv->powersave_mode.mode = state->prev_ps_mode;
330 cw1200_set_pm(priv, &priv->powersave_mode);
331 }
332
333 if (state->beacon_skipping) {
334 wsm_set_beacon_wakeup_period(priv, priv->beacon_int *
335 priv->join_dtim_period >
336 MAX_BEACON_SKIP_TIME_MS ? 1 :
337 priv->join_dtim_period, 0);
338 state->beacon_skipping = false;
339 }
340
341 /* Resume delayed work */
342 cw1200_resume_work(priv, &priv->bss_loss_work,
343 state->bss_loss_tmo);
344 cw1200_resume_work(priv, &priv->join_timeout,
345 state->join_tmo);
346 cw1200_resume_work(priv, &priv->scan.probe_work,
347 state->direct_probe);
348 cw1200_resume_work(priv, &priv->link_id_gc_work,
349 state->link_id_gc);
350
351 /* Remove UDP port filter */
352 wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
353
354 /* Remove ethernet frame type filter */
355 wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off);
356
357 /* Unlock datapath */
358 wsm_unlock_tx(priv);
359
360 /* Unlock configuration mutex */
361 mutex_unlock(&priv->conf_mutex);
362
363 /* Free memory */
364 kfree(state);
365
366 return 0;
367}
diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/cw1200/pm.h
new file mode 100644
index 000000000000..3ed90ff22bb8
--- /dev/null
+++ b/drivers/net/wireless/cw1200/pm.h
@@ -0,0 +1,43 @@
1/*
2 * Mac80211 power management interface for ST-Ericsson CW1200 mac80211 drivers
3 *
4 * Copyright (c) 2011, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef PM_H_INCLUDED
13#define PM_H_INCLUDED
14
15/* ******************************************************************** */
16/* mac80211 API */
17
18/* extern */ struct cw1200_common;
19/* private */ struct cw1200_suspend_state;
20
21struct cw1200_pm_state {
22 struct cw1200_suspend_state *suspend_state;
23 struct timer_list stay_awake;
24 struct platform_device *pm_dev;
25 spinlock_t lock; /* Protect access */
26};
27
28#ifdef CONFIG_PM
29int cw1200_pm_init(struct cw1200_pm_state *pm,
30 struct cw1200_common *priv);
31void cw1200_pm_deinit(struct cw1200_pm_state *pm);
32int cw1200_wow_suspend(struct ieee80211_hw *hw,
33 struct cfg80211_wowlan *wowlan);
34int cw1200_wow_resume(struct ieee80211_hw *hw);
35int cw1200_can_suspend(struct cw1200_common *priv);
36void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
37 unsigned long tmo);
38#else
39static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
40 unsigned long tmo) {
41}
42#endif
43#endif
diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/cw1200/queue.c
new file mode 100644
index 000000000000..9c3925f58d79
--- /dev/null
+++ b/drivers/net/wireless/cw1200/queue.c
@@ -0,0 +1,583 @@
1/*
2 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <net/mac80211.h>
13#include <linux/sched.h>
14#include "queue.h"
15#include "cw1200.h"
16#include "debug.h"
17
18/* private */ struct cw1200_queue_item
19{
20 struct list_head head;
21 struct sk_buff *skb;
22 u32 packet_id;
23 unsigned long queue_timestamp;
24 unsigned long xmit_timestamp;
25 struct cw1200_txpriv txpriv;
26 u8 generation;
27};
28
29static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
30{
31 struct cw1200_queue_stats *stats = queue->stats;
32 if (queue->tx_locked_cnt++ == 0) {
33 pr_debug("[TX] Queue %d is locked.\n",
34 queue->queue_id);
35 ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
36 }
37}
38
39static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
40{
41 struct cw1200_queue_stats *stats = queue->stats;
42 BUG_ON(!queue->tx_locked_cnt);
43 if (--queue->tx_locked_cnt == 0) {
44 pr_debug("[TX] Queue %d is unlocked.\n",
45 queue->queue_id);
46 ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
47 }
48}
49
50static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
51 u8 *queue_id, u8 *item_generation,
52 u8 *item_id)
53{
54 *item_id = (packet_id >> 0) & 0xFF;
55 *item_generation = (packet_id >> 8) & 0xFF;
56 *queue_id = (packet_id >> 16) & 0xFF;
57 *queue_generation = (packet_id >> 24) & 0xFF;
58}
59
60static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
61 u8 item_generation, u8 item_id)
62{
63 return ((u32)item_id << 0) |
64 ((u32)item_generation << 8) |
65 ((u32)queue_id << 16) |
66 ((u32)queue_generation << 24);
67}
68
69static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
70 struct list_head *gc_list)
71{
72 struct cw1200_queue_item *item, *tmp;
73
74 list_for_each_entry_safe(item, tmp, gc_list, head) {
75 list_del(&item->head);
76 stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
77 kfree(item);
78 }
79}
80
81static void cw1200_queue_register_post_gc(struct list_head *gc_list,
82 struct cw1200_queue_item *item)
83{
84 struct cw1200_queue_item *gc_item;
85 gc_item = kmalloc(sizeof(struct cw1200_queue_item),
86 GFP_ATOMIC);
87 BUG_ON(!gc_item);
88 memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
89 list_add_tail(&gc_item->head, gc_list);
90}
91
92static void __cw1200_queue_gc(struct cw1200_queue *queue,
93 struct list_head *head,
94 bool unlock)
95{
96 struct cw1200_queue_stats *stats = queue->stats;
97 struct cw1200_queue_item *item = NULL, *tmp;
98 bool wakeup_stats = false;
99
100 list_for_each_entry_safe(item, tmp, &queue->queue, head) {
101 if (jiffies - item->queue_timestamp < queue->ttl)
102 break;
103 --queue->num_queued;
104 --queue->link_map_cache[item->txpriv.link_id];
105 spin_lock_bh(&stats->lock);
106 --stats->num_queued;
107 if (!--stats->link_map_cache[item->txpriv.link_id])
108 wakeup_stats = true;
109 spin_unlock_bh(&stats->lock);
110 cw1200_debug_tx_ttl(stats->priv);
111 cw1200_queue_register_post_gc(head, item);
112 item->skb = NULL;
113 list_move_tail(&item->head, &queue->free_pool);
114 }
115
116 if (wakeup_stats)
117 wake_up(&stats->wait_link_id_empty);
118
119 if (queue->overfull) {
120 if (queue->num_queued <= (queue->capacity >> 1)) {
121 queue->overfull = false;
122 if (unlock)
123 __cw1200_queue_unlock(queue);
124 } else if (item) {
125 unsigned long tmo = item->queue_timestamp + queue->ttl;
126 mod_timer(&queue->gc, tmo);
127 cw1200_pm_stay_awake(&stats->priv->pm_state,
128 tmo - jiffies);
129 }
130 }
131}
132
133static void cw1200_queue_gc(unsigned long arg)
134{
135 LIST_HEAD(list);
136 struct cw1200_queue *queue =
137 (struct cw1200_queue *)arg;
138
139 spin_lock_bh(&queue->lock);
140 __cw1200_queue_gc(queue, &list, true);
141 spin_unlock_bh(&queue->lock);
142 cw1200_queue_post_gc(queue->stats, &list);
143}
144
145int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
146 size_t map_capacity,
147 cw1200_queue_skb_dtor_t skb_dtor,
148 struct cw1200_common *priv)
149{
150 memset(stats, 0, sizeof(*stats));
151 stats->map_capacity = map_capacity;
152 stats->skb_dtor = skb_dtor;
153 stats->priv = priv;
154 spin_lock_init(&stats->lock);
155 init_waitqueue_head(&stats->wait_link_id_empty);
156
157 stats->link_map_cache = kzalloc(sizeof(int) * map_capacity,
158 GFP_KERNEL);
159 if (!stats->link_map_cache)
160 return -ENOMEM;
161
162 return 0;
163}
164
165int cw1200_queue_init(struct cw1200_queue *queue,
166 struct cw1200_queue_stats *stats,
167 u8 queue_id,
168 size_t capacity,
169 unsigned long ttl)
170{
171 size_t i;
172
173 memset(queue, 0, sizeof(*queue));
174 queue->stats = stats;
175 queue->capacity = capacity;
176 queue->queue_id = queue_id;
177 queue->ttl = ttl;
178 INIT_LIST_HEAD(&queue->queue);
179 INIT_LIST_HEAD(&queue->pending);
180 INIT_LIST_HEAD(&queue->free_pool);
181 spin_lock_init(&queue->lock);
182 init_timer(&queue->gc);
183 queue->gc.data = (unsigned long)queue;
184 queue->gc.function = cw1200_queue_gc;
185
186 queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
187 GFP_KERNEL);
188 if (!queue->pool)
189 return -ENOMEM;
190
191 queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity,
192 GFP_KERNEL);
193 if (!queue->link_map_cache) {
194 kfree(queue->pool);
195 queue->pool = NULL;
196 return -ENOMEM;
197 }
198
199 for (i = 0; i < capacity; ++i)
200 list_add_tail(&queue->pool[i].head, &queue->free_pool);
201
202 return 0;
203}
204
205int cw1200_queue_clear(struct cw1200_queue *queue)
206{
207 int i;
208 LIST_HEAD(gc_list);
209 struct cw1200_queue_stats *stats = queue->stats;
210 struct cw1200_queue_item *item, *tmp;
211
212 spin_lock_bh(&queue->lock);
213 queue->generation++;
214 list_splice_tail_init(&queue->queue, &queue->pending);
215 list_for_each_entry_safe(item, tmp, &queue->pending, head) {
216 WARN_ON(!item->skb);
217 cw1200_queue_register_post_gc(&gc_list, item);
218 item->skb = NULL;
219 list_move_tail(&item->head, &queue->free_pool);
220 }
221 queue->num_queued = 0;
222 queue->num_pending = 0;
223
224 spin_lock_bh(&stats->lock);
225 for (i = 0; i < stats->map_capacity; ++i) {
226 stats->num_queued -= queue->link_map_cache[i];
227 stats->link_map_cache[i] -= queue->link_map_cache[i];
228 queue->link_map_cache[i] = 0;
229 }
230 spin_unlock_bh(&stats->lock);
231 if (queue->overfull) {
232 queue->overfull = false;
233 __cw1200_queue_unlock(queue);
234 }
235 spin_unlock_bh(&queue->lock);
236 wake_up(&stats->wait_link_id_empty);
237 cw1200_queue_post_gc(stats, &gc_list);
238 return 0;
239}
240
241void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
242{
243 kfree(stats->link_map_cache);
244 stats->link_map_cache = NULL;
245}
246
247void cw1200_queue_deinit(struct cw1200_queue *queue)
248{
249 cw1200_queue_clear(queue);
250 del_timer_sync(&queue->gc);
251 INIT_LIST_HEAD(&queue->free_pool);
252 kfree(queue->pool);
253 kfree(queue->link_map_cache);
254 queue->pool = NULL;
255 queue->link_map_cache = NULL;
256 queue->capacity = 0;
257}
258
259size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
260 u32 link_id_map)
261{
262 size_t ret;
263 int i, bit;
264 size_t map_capacity = queue->stats->map_capacity;
265
266 if (!link_id_map)
267 return 0;
268
269 spin_lock_bh(&queue->lock);
270 if (link_id_map == (u32)-1) {
271 ret = queue->num_queued - queue->num_pending;
272 } else {
273 ret = 0;
274 for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
275 if (link_id_map & bit)
276 ret += queue->link_map_cache[i];
277 }
278 }
279 spin_unlock_bh(&queue->lock);
280 return ret;
281}
282
283int cw1200_queue_put(struct cw1200_queue *queue,
284 struct sk_buff *skb,
285 struct cw1200_txpriv *txpriv)
286{
287 int ret = 0;
288 LIST_HEAD(gc_list);
289 struct cw1200_queue_stats *stats = queue->stats;
290
291 if (txpriv->link_id >= queue->stats->map_capacity)
292 return -EINVAL;
293
294 spin_lock_bh(&queue->lock);
295 if (!WARN_ON(list_empty(&queue->free_pool))) {
296 struct cw1200_queue_item *item = list_first_entry(
297 &queue->free_pool, struct cw1200_queue_item, head);
298 BUG_ON(item->skb);
299
300 list_move_tail(&item->head, &queue->queue);
301 item->skb = skb;
302 item->txpriv = *txpriv;
303 item->generation = 0;
304 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
305 queue->queue_id,
306 item->generation,
307 item - queue->pool);
308 item->queue_timestamp = jiffies;
309
310 ++queue->num_queued;
311 ++queue->link_map_cache[txpriv->link_id];
312
313 spin_lock_bh(&stats->lock);
314 ++stats->num_queued;
315 ++stats->link_map_cache[txpriv->link_id];
316 spin_unlock_bh(&stats->lock);
317
318 /* TX may happen in parallel sometimes.
319 * Leave extra queue slots so we don't overflow.
320 */
321 if (queue->overfull == false &&
322 queue->num_queued >=
323 (queue->capacity - (num_present_cpus() - 1))) {
324 queue->overfull = true;
325 __cw1200_queue_lock(queue);
326 mod_timer(&queue->gc, jiffies);
327 }
328 } else {
329 ret = -ENOENT;
330 }
331 spin_unlock_bh(&queue->lock);
332 return ret;
333}
334
335int cw1200_queue_get(struct cw1200_queue *queue,
336 u32 link_id_map,
337 struct wsm_tx **tx,
338 struct ieee80211_tx_info **tx_info,
339 const struct cw1200_txpriv **txpriv)
340{
341 int ret = -ENOENT;
342 struct cw1200_queue_item *item;
343 struct cw1200_queue_stats *stats = queue->stats;
344 bool wakeup_stats = false;
345
346 spin_lock_bh(&queue->lock);
347 list_for_each_entry(item, &queue->queue, head) {
348 if (link_id_map & BIT(item->txpriv.link_id)) {
349 ret = 0;
350 break;
351 }
352 }
353
354 if (!WARN_ON(ret)) {
355 *tx = (struct wsm_tx *)item->skb->data;
356 *tx_info = IEEE80211_SKB_CB(item->skb);
357 *txpriv = &item->txpriv;
358 (*tx)->packet_id = item->packet_id;
359 list_move_tail(&item->head, &queue->pending);
360 ++queue->num_pending;
361 --queue->link_map_cache[item->txpriv.link_id];
362 item->xmit_timestamp = jiffies;
363
364 spin_lock_bh(&stats->lock);
365 --stats->num_queued;
366 if (!--stats->link_map_cache[item->txpriv.link_id])
367 wakeup_stats = true;
368 spin_unlock_bh(&stats->lock);
369 }
370 spin_unlock_bh(&queue->lock);
371 if (wakeup_stats)
372 wake_up(&stats->wait_link_id_empty);
373 return ret;
374}
375
376int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
377{
378 int ret = 0;
379 u8 queue_generation, queue_id, item_generation, item_id;
380 struct cw1200_queue_item *item;
381 struct cw1200_queue_stats *stats = queue->stats;
382
383 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
384 &item_generation, &item_id);
385
386 item = &queue->pool[item_id];
387
388 spin_lock_bh(&queue->lock);
389 BUG_ON(queue_id != queue->queue_id);
390 if (queue_generation != queue->generation) {
391 ret = -ENOENT;
392 } else if (item_id >= (unsigned) queue->capacity) {
393 WARN_ON(1);
394 ret = -EINVAL;
395 } else if (item->generation != item_generation) {
396 WARN_ON(1);
397 ret = -ENOENT;
398 } else {
399 --queue->num_pending;
400 ++queue->link_map_cache[item->txpriv.link_id];
401
402 spin_lock_bh(&stats->lock);
403 ++stats->num_queued;
404 ++stats->link_map_cache[item->txpriv.link_id];
405 spin_unlock_bh(&stats->lock);
406
407 item->generation = ++item_generation;
408 item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
409 queue_id,
410 item_generation,
411 item_id);
412 list_move(&item->head, &queue->queue);
413 }
414 spin_unlock_bh(&queue->lock);
415 return ret;
416}
417
418int cw1200_queue_requeue_all(struct cw1200_queue *queue)
419{
420 struct cw1200_queue_item *item, *tmp;
421 struct cw1200_queue_stats *stats = queue->stats;
422 spin_lock_bh(&queue->lock);
423
424 list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
425 --queue->num_pending;
426 ++queue->link_map_cache[item->txpriv.link_id];
427
428 spin_lock_bh(&stats->lock);
429 ++stats->num_queued;
430 ++stats->link_map_cache[item->txpriv.link_id];
431 spin_unlock_bh(&stats->lock);
432
433 ++item->generation;
434 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
435 queue->queue_id,
436 item->generation,
437 item - queue->pool);
438 list_move(&item->head, &queue->queue);
439 }
440 spin_unlock_bh(&queue->lock);
441
442 return 0;
443}
444
445int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
446{
447 int ret = 0;
448 u8 queue_generation, queue_id, item_generation, item_id;
449 struct cw1200_queue_item *item;
450 struct cw1200_queue_stats *stats = queue->stats;
451 struct sk_buff *gc_skb = NULL;
452 struct cw1200_txpriv gc_txpriv;
453
454 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
455 &item_generation, &item_id);
456
457 item = &queue->pool[item_id];
458
459 spin_lock_bh(&queue->lock);
460 BUG_ON(queue_id != queue->queue_id);
461 if (queue_generation != queue->generation) {
462 ret = -ENOENT;
463 } else if (item_id >= (unsigned) queue->capacity) {
464 WARN_ON(1);
465 ret = -EINVAL;
466 } else if (item->generation != item_generation) {
467 WARN_ON(1);
468 ret = -ENOENT;
469 } else {
470 gc_txpriv = item->txpriv;
471 gc_skb = item->skb;
472 item->skb = NULL;
473 --queue->num_pending;
474 --queue->num_queued;
475 ++queue->num_sent;
476 ++item->generation;
477 /* Do not use list_move_tail here, but list_move:
478 * try to utilize cache row.
479 */
480 list_move(&item->head, &queue->free_pool);
481
482 if (queue->overfull &&
483 (queue->num_queued <= (queue->capacity >> 1))) {
484 queue->overfull = false;
485 __cw1200_queue_unlock(queue);
486 }
487 }
488 spin_unlock_bh(&queue->lock);
489
490 if (gc_skb)
491 stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
492
493 return ret;
494}
495
496int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
497 struct sk_buff **skb,
498 const struct cw1200_txpriv **txpriv)
499{
500 int ret = 0;
501 u8 queue_generation, queue_id, item_generation, item_id;
502 struct cw1200_queue_item *item;
503 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
504 &item_generation, &item_id);
505
506 item = &queue->pool[item_id];
507
508 spin_lock_bh(&queue->lock);
509 BUG_ON(queue_id != queue->queue_id);
510 if (queue_generation != queue->generation) {
511 ret = -ENOENT;
512 } else if (item_id >= (unsigned) queue->capacity) {
513 WARN_ON(1);
514 ret = -EINVAL;
515 } else if (item->generation != item_generation) {
516 WARN_ON(1);
517 ret = -ENOENT;
518 } else {
519 *skb = item->skb;
520 *txpriv = &item->txpriv;
521 }
522 spin_unlock_bh(&queue->lock);
523 return ret;
524}
525
526void cw1200_queue_lock(struct cw1200_queue *queue)
527{
528 spin_lock_bh(&queue->lock);
529 __cw1200_queue_lock(queue);
530 spin_unlock_bh(&queue->lock);
531}
532
533void cw1200_queue_unlock(struct cw1200_queue *queue)
534{
535 spin_lock_bh(&queue->lock);
536 __cw1200_queue_unlock(queue);
537 spin_unlock_bh(&queue->lock);
538}
539
540bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
541 unsigned long *timestamp,
542 u32 pending_frame_id)
543{
544 struct cw1200_queue_item *item;
545 bool ret;
546
547 spin_lock_bh(&queue->lock);
548 ret = !list_empty(&queue->pending);
549 if (ret) {
550 list_for_each_entry(item, &queue->pending, head) {
551 if (item->packet_id != pending_frame_id)
552 if (time_before(item->xmit_timestamp,
553 *timestamp))
554 *timestamp = item->xmit_timestamp;
555 }
556 }
557 spin_unlock_bh(&queue->lock);
558 return ret;
559}
560
561bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
562 u32 link_id_map)
563{
564 bool empty = true;
565
566 spin_lock_bh(&stats->lock);
567 if (link_id_map == (u32)-1) {
568 empty = stats->num_queued == 0;
569 } else {
570 int i;
571 for (i = 0; i < stats->map_capacity; ++i) {
572 if (link_id_map & BIT(i)) {
573 if (stats->link_map_cache[i]) {
574 empty = false;
575 break;
576 }
577 }
578 }
579 }
580 spin_unlock_bh(&stats->lock);
581
582 return empty;
583}
diff --git a/drivers/net/wireless/cw1200/queue.h b/drivers/net/wireless/cw1200/queue.h
new file mode 100644
index 000000000000..119f9c79c14e
--- /dev/null
+++ b/drivers/net/wireless/cw1200/queue.h
@@ -0,0 +1,116 @@
1/*
2 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef CW1200_QUEUE_H_INCLUDED
13#define CW1200_QUEUE_H_INCLUDED
14
15/* private */ struct cw1200_queue_item;
16
17/* extern */ struct sk_buff;
18/* extern */ struct wsm_tx;
19/* extern */ struct cw1200_common;
20/* extern */ struct ieee80211_tx_queue_stats;
21/* extern */ struct cw1200_txpriv;
22
23/* forward */ struct cw1200_queue_stats;
24
25typedef void (*cw1200_queue_skb_dtor_t)(struct cw1200_common *priv,
26 struct sk_buff *skb,
27 const struct cw1200_txpriv *txpriv);
28
29struct cw1200_queue {
30 struct cw1200_queue_stats *stats;
31 size_t capacity;
32 size_t num_queued;
33 size_t num_pending;
34 size_t num_sent;
35 struct cw1200_queue_item *pool;
36 struct list_head queue;
37 struct list_head free_pool;
38 struct list_head pending;
39 int tx_locked_cnt;
40 int *link_map_cache;
41 bool overfull;
42 spinlock_t lock; /* Protect queue entry */
43 u8 queue_id;
44 u8 generation;
45 struct timer_list gc;
46 unsigned long ttl;
47};
48
49struct cw1200_queue_stats {
50 spinlock_t lock; /* Protect stats entry */
51 int *link_map_cache;
52 int num_queued;
53 size_t map_capacity;
54 wait_queue_head_t wait_link_id_empty;
55 cw1200_queue_skb_dtor_t skb_dtor;
56 struct cw1200_common *priv;
57};
58
59struct cw1200_txpriv {
60 u8 link_id;
61 u8 raw_link_id;
62 u8 tid;
63 u8 rate_id;
64 u8 offset;
65};
66
67int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
68 size_t map_capacity,
69 cw1200_queue_skb_dtor_t skb_dtor,
70 struct cw1200_common *priv);
71int cw1200_queue_init(struct cw1200_queue *queue,
72 struct cw1200_queue_stats *stats,
73 u8 queue_id,
74 size_t capacity,
75 unsigned long ttl);
76int cw1200_queue_clear(struct cw1200_queue *queue);
77void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats);
78void cw1200_queue_deinit(struct cw1200_queue *queue);
79
80size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
81 u32 link_id_map);
82int cw1200_queue_put(struct cw1200_queue *queue,
83 struct sk_buff *skb,
84 struct cw1200_txpriv *txpriv);
85int cw1200_queue_get(struct cw1200_queue *queue,
86 u32 link_id_map,
87 struct wsm_tx **tx,
88 struct ieee80211_tx_info **tx_info,
89 const struct cw1200_txpriv **txpriv);
90int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id);
91int cw1200_queue_requeue_all(struct cw1200_queue *queue);
92int cw1200_queue_remove(struct cw1200_queue *queue,
93 u32 packet_id);
94int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
95 struct sk_buff **skb,
96 const struct cw1200_txpriv **txpriv);
97void cw1200_queue_lock(struct cw1200_queue *queue);
98void cw1200_queue_unlock(struct cw1200_queue *queue);
99bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
100 unsigned long *timestamp,
101 u32 pending_frame_id);
102
103bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
104 u32 link_id_map);
105
106static inline u8 cw1200_queue_get_queue_id(u32 packet_id)
107{
108 return (packet_id >> 16) & 0xFF;
109}
110
111static inline u8 cw1200_queue_get_generation(u32 packet_id)
112{
113 return (packet_id >> 8) & 0xFF;
114}
115
116#endif /* CW1200_QUEUE_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
new file mode 100644
index 000000000000..ee3c19037aac
--- /dev/null
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -0,0 +1,461 @@
1/*
2 * Scan implementation for ST-Ericsson CW1200 mac80211 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/sched.h>
13#include "cw1200.h"
14#include "scan.h"
15#include "sta.h"
16#include "pm.h"
17
18static void cw1200_scan_restart_delayed(struct cw1200_common *priv);
19
20static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan)
21{
22 int ret, i;
23 int tmo = 2000;
24
25 switch (priv->join_status) {
26 case CW1200_JOIN_STATUS_PRE_STA:
27 case CW1200_JOIN_STATUS_JOINING:
28 return -EBUSY;
29 default:
30 break;
31 }
32
33 wiphy_dbg(priv->hw->wiphy, "[SCAN] hw req, type %d, %d channels, flags: 0x%x.\n",
34 scan->type, scan->num_channels, scan->flags);
35
36 for (i = 0; i < scan->num_channels; ++i)
37 tmo += scan->ch[i].max_chan_time + 10;
38
39 cancel_delayed_work_sync(&priv->clear_recent_scan_work);
40 atomic_set(&priv->scan.in_progress, 1);
41 atomic_set(&priv->recent_scan, 1);
42 cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000);
43 queue_delayed_work(priv->workqueue, &priv->scan.timeout,
44 tmo * HZ / 1000);
45 ret = wsm_scan(priv, scan);
46 if (ret) {
47 atomic_set(&priv->scan.in_progress, 0);
48 cancel_delayed_work_sync(&priv->scan.timeout);
49 cw1200_scan_restart_delayed(priv);
50 }
51 return ret;
52}
53
54int cw1200_hw_scan(struct ieee80211_hw *hw,
55 struct ieee80211_vif *vif,
56 struct cfg80211_scan_request *req)
57{
58 struct cw1200_common *priv = hw->priv;
59 struct wsm_template_frame frame = {
60 .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
61 };
62 int i, ret;
63
64 if (!priv->vif)
65 return -EINVAL;
66
67 /* Scan when P2P_GO corrupt firmware MiniAP mode */
68 if (priv->join_status == CW1200_JOIN_STATUS_AP)
69 return -EOPNOTSUPP;
70
71 if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
72 req->n_ssids = 0;
73
74 wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n",
75 req->n_ssids);
76
77 if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
78 return -EINVAL;
79
80 frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0,
81 req->ie_len);
82 if (!frame.skb)
83 return -ENOMEM;
84
85 if (req->ie_len)
86 memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
87
88 /* will be unlocked in cw1200_scan_work() */
89 down(&priv->scan.lock);
90 mutex_lock(&priv->conf_mutex);
91
92 ret = wsm_set_template_frame(priv, &frame);
93 if (!ret) {
94 /* Host want to be the probe responder. */
95 ret = wsm_set_probe_responder(priv, true);
96 }
97 if (ret) {
98 mutex_unlock(&priv->conf_mutex);
99 up(&priv->scan.lock);
100 dev_kfree_skb(frame.skb);
101 return ret;
102 }
103
104 wsm_lock_tx(priv);
105
106 BUG_ON(priv->scan.req);
107 priv->scan.req = req;
108 priv->scan.n_ssids = 0;
109 priv->scan.status = 0;
110 priv->scan.begin = &req->channels[0];
111 priv->scan.curr = priv->scan.begin;
112 priv->scan.end = &req->channels[req->n_channels];
113 priv->scan.output_power = priv->output_power;
114
115 for (i = 0; i < req->n_ssids; ++i) {
116 struct wsm_ssid *dst = &priv->scan.ssids[priv->scan.n_ssids];
117 memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
118 dst->length = req->ssids[i].ssid_len;
119 ++priv->scan.n_ssids;
120 }
121
122 mutex_unlock(&priv->conf_mutex);
123
124 if (frame.skb)
125 dev_kfree_skb(frame.skb);
126 queue_work(priv->workqueue, &priv->scan.work);
127 return 0;
128}
129
130void cw1200_scan_work(struct work_struct *work)
131{
132 struct cw1200_common *priv = container_of(work, struct cw1200_common,
133 scan.work);
134 struct ieee80211_channel **it;
135 struct wsm_scan scan = {
136 .type = WSM_SCAN_TYPE_FOREGROUND,
137 .flags = WSM_SCAN_FLAG_SPLIT_METHOD,
138 };
139 bool first_run = (priv->scan.begin == priv->scan.curr &&
140 priv->scan.begin != priv->scan.end);
141 int i;
142
143 if (first_run) {
144 /* Firmware gets crazy if scan request is sent
145 * when STA is joined but not yet associated.
146 * Force unjoin in this case.
147 */
148 if (cancel_delayed_work_sync(&priv->join_timeout) > 0)
149 cw1200_join_timeout(&priv->join_timeout.work);
150 }
151
152 mutex_lock(&priv->conf_mutex);
153
154 if (first_run) {
155 if (priv->join_status == CW1200_JOIN_STATUS_STA &&
156 !(priv->powersave_mode.mode & WSM_PSM_PS)) {
157 struct wsm_set_pm pm = priv->powersave_mode;
158 pm.mode = WSM_PSM_PS;
159 cw1200_set_pm(priv, &pm);
160 } else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
161 /* FW bug: driver has to restart p2p-dev mode
162 * after scan
163 */
164 cw1200_disable_listening(priv);
165 }
166 }
167
168 if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) {
169 if (priv->scan.output_power != priv->output_power)
170 wsm_set_output_power(priv, priv->output_power * 10);
171 if (priv->join_status == CW1200_JOIN_STATUS_STA &&
172 !(priv->powersave_mode.mode & WSM_PSM_PS))
173 cw1200_set_pm(priv, &priv->powersave_mode);
174
175 if (priv->scan.status < 0)
176 wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n",
177 priv->scan.status);
178 else if (priv->scan.req)
179 wiphy_dbg(priv->hw->wiphy,
180 "[SCAN] Scan completed.\n");
181 else
182 wiphy_dbg(priv->hw->wiphy,
183 "[SCAN] Scan canceled.\n");
184
185 priv->scan.req = NULL;
186 cw1200_scan_restart_delayed(priv);
187 wsm_unlock_tx(priv);
188 mutex_unlock(&priv->conf_mutex);
189 ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0);
190 up(&priv->scan.lock);
191 return;
192 } else {
193 struct ieee80211_channel *first = *priv->scan.curr;
194 for (it = priv->scan.curr + 1, i = 1;
195 it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS;
196 ++it, ++i) {
197 if ((*it)->band != first->band)
198 break;
199 if (((*it)->flags ^ first->flags) &
200 IEEE80211_CHAN_PASSIVE_SCAN)
201 break;
202 if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
203 (*it)->max_power != first->max_power)
204 break;
205 }
206 scan.band = first->band;
207
208 if (priv->scan.req->no_cck)
209 scan.max_tx_rate = WSM_TRANSMIT_RATE_6;
210 else
211 scan.max_tx_rate = WSM_TRANSMIT_RATE_1;
212 scan.num_probes =
213 (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2;
214 scan.num_ssids = priv->scan.n_ssids;
215 scan.ssids = &priv->scan.ssids[0];
216 scan.num_channels = it - priv->scan.curr;
217 /* TODO: Is it optimal? */
218 scan.probe_delay = 100;
219 /* It is not stated in WSM specification, however
220 * FW team says that driver may not use FG scan
221 * when joined.
222 */
223 if (priv->join_status == CW1200_JOIN_STATUS_STA) {
224 scan.type = WSM_SCAN_TYPE_BACKGROUND;
225 scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
226 }
227 scan.ch = kzalloc(
228 sizeof(struct wsm_scan_ch) * (it - priv->scan.curr),
229 GFP_KERNEL);
230 if (!scan.ch) {
231 priv->scan.status = -ENOMEM;
232 goto fail;
233 }
234 for (i = 0; i < scan.num_channels; ++i) {
235 scan.ch[i].number = priv->scan.curr[i]->hw_value;
236 if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
237 scan.ch[i].min_chan_time = 50;
238 scan.ch[i].max_chan_time = 100;
239 } else {
240 scan.ch[i].min_chan_time = 10;
241 scan.ch[i].max_chan_time = 25;
242 }
243 }
244 if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
245 priv->scan.output_power != first->max_power) {
246 priv->scan.output_power = first->max_power;
247 wsm_set_output_power(priv,
248 priv->scan.output_power * 10);
249 }
250 priv->scan.status = cw1200_scan_start(priv, &scan);
251 kfree(scan.ch);
252 if (priv->scan.status)
253 goto fail;
254 priv->scan.curr = it;
255 }
256 mutex_unlock(&priv->conf_mutex);
257 return;
258
259fail:
260 priv->scan.curr = priv->scan.end;
261 mutex_unlock(&priv->conf_mutex);
262 queue_work(priv->workqueue, &priv->scan.work);
263 return;
264}
265
266static void cw1200_scan_restart_delayed(struct cw1200_common *priv)
267{
268 /* FW bug: driver has to restart p2p-dev mode after scan. */
269 if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
270 cw1200_enable_listening(priv);
271 cw1200_update_filtering(priv);
272 }
273
274 if (priv->delayed_unjoin) {
275 priv->delayed_unjoin = false;
276 if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
277 wsm_unlock_tx(priv);
278 } else if (priv->delayed_link_loss) {
279 wiphy_dbg(priv->hw->wiphy, "[CQM] Requeue BSS loss.\n");
280 priv->delayed_link_loss = 0;
281 cw1200_cqm_bssloss_sm(priv, 1, 0, 0);
282 }
283}
284
285static void cw1200_scan_complete(struct cw1200_common *priv)
286{
287 queue_delayed_work(priv->workqueue, &priv->clear_recent_scan_work, HZ);
288 if (priv->scan.direct_probe) {
289 wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe complete.\n");
290 cw1200_scan_restart_delayed(priv);
291 priv->scan.direct_probe = 0;
292 up(&priv->scan.lock);
293 wsm_unlock_tx(priv);
294 } else {
295 cw1200_scan_work(&priv->scan.work);
296 }
297}
298
299void cw1200_scan_failed_cb(struct cw1200_common *priv)
300{
301 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
302 /* STA is stopped. */
303 return;
304
305 if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) {
306 priv->scan.status = -EIO;
307 queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0);
308 }
309}
310
311
312void cw1200_scan_complete_cb(struct cw1200_common *priv,
313 struct wsm_scan_complete *arg)
314{
315 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
316 /* STA is stopped. */
317 return;
318
319 if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) {
320 priv->scan.status = 1;
321 queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0);
322 }
323}
324
325void cw1200_clear_recent_scan_work(struct work_struct *work)
326{
327 struct cw1200_common *priv =
328 container_of(work, struct cw1200_common,
329 clear_recent_scan_work.work);
330 atomic_xchg(&priv->recent_scan, 0);
331}
332
333void cw1200_scan_timeout(struct work_struct *work)
334{
335 struct cw1200_common *priv =
336 container_of(work, struct cw1200_common, scan.timeout.work);
337 if (atomic_xchg(&priv->scan.in_progress, 0)) {
338 if (priv->scan.status > 0) {
339 priv->scan.status = 0;
340 } else if (!priv->scan.status) {
341 wiphy_warn(priv->hw->wiphy,
342 "Timeout waiting for scan complete notification.\n");
343 priv->scan.status = -ETIMEDOUT;
344 priv->scan.curr = priv->scan.end;
345 wsm_stop_scan(priv);
346 }
347 cw1200_scan_complete(priv);
348 }
349}
350
351void cw1200_probe_work(struct work_struct *work)
352{
353 struct cw1200_common *priv =
354 container_of(work, struct cw1200_common, scan.probe_work.work);
355 u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id);
356 struct cw1200_queue *queue = &priv->tx_queue[queue_id];
357 const struct cw1200_txpriv *txpriv;
358 struct wsm_tx *wsm;
359 struct wsm_template_frame frame = {
360 .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
361 };
362 struct wsm_ssid ssids[1] = {{
363 .length = 0,
364 } };
365 struct wsm_scan_ch ch[1] = {{
366 .min_chan_time = 0,
367 .max_chan_time = 10,
368 } };
369 struct wsm_scan scan = {
370 .type = WSM_SCAN_TYPE_FOREGROUND,
371 .num_probes = 1,
372 .probe_delay = 0,
373 .num_channels = 1,
374 .ssids = ssids,
375 .ch = ch,
376 };
377 u8 *ies;
378 size_t ies_len;
379 int ret;
380
381 wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n");
382
383 mutex_lock(&priv->conf_mutex);
384 if (down_trylock(&priv->scan.lock)) {
385 /* Scan is already in progress. Requeue self. */
386 schedule();
387 queue_delayed_work(priv->workqueue,
388 &priv->scan.probe_work, HZ / 10);
389 mutex_unlock(&priv->conf_mutex);
390 return;
391 }
392
393 /* Make sure we still have a pending probe req */
394 if (cw1200_queue_get_skb(queue, priv->pending_frame_id,
395 &frame.skb, &txpriv)) {
396 up(&priv->scan.lock);
397 mutex_unlock(&priv->conf_mutex);
398 wsm_unlock_tx(priv);
399 return;
400 }
401 wsm = (struct wsm_tx *)frame.skb->data;
402 scan.max_tx_rate = wsm->max_tx_rate;
403 scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
404 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
405 if (priv->join_status == CW1200_JOIN_STATUS_STA ||
406 priv->join_status == CW1200_JOIN_STATUS_IBSS) {
407 scan.type = WSM_SCAN_TYPE_BACKGROUND;
408 scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
409 }
410 ch[0].number = priv->channel->hw_value;
411
412 skb_pull(frame.skb, txpriv->offset);
413
414 ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)];
415 ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr);
416
417 if (ies_len) {
418 u8 *ssidie =
419 (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len);
420 if (ssidie && ssidie[1] && ssidie[1] <= sizeof(ssids[0].ssid)) {
421 u8 *nextie = &ssidie[2 + ssidie[1]];
422 /* Remove SSID from the IE list. It has to be provided
423 * as a separate argument in cw1200_scan_start call
424 */
425
426 /* Store SSID localy */
427 ssids[0].length = ssidie[1];
428 memcpy(ssids[0].ssid, &ssidie[2], ssids[0].length);
429 scan.num_ssids = 1;
430
431 /* Remove SSID from IE list */
432 ssidie[1] = 0;
433 memmove(&ssidie[2], nextie, &ies[ies_len] - nextie);
434 skb_trim(frame.skb, frame.skb->len - ssids[0].length);
435 }
436 }
437
438 /* FW bug: driver has to restart p2p-dev mode after scan */
439 if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
440 cw1200_disable_listening(priv);
441 ret = wsm_set_template_frame(priv, &frame);
442 priv->scan.direct_probe = 1;
443 if (!ret) {
444 wsm_flush_tx(priv);
445 ret = cw1200_scan_start(priv, &scan);
446 }
447 mutex_unlock(&priv->conf_mutex);
448
449 skb_push(frame.skb, txpriv->offset);
450 if (!ret)
451 IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK;
452 BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id));
453
454 if (ret) {
455 priv->scan.direct_probe = 0;
456 up(&priv->scan.lock);
457 wsm_unlock_tx(priv);
458 }
459
460 return;
461}
diff --git a/drivers/net/wireless/cw1200/scan.h b/drivers/net/wireless/cw1200/scan.h
new file mode 100644
index 000000000000..5a8296ccfa82
--- /dev/null
+++ b/drivers/net/wireless/cw1200/scan.h
@@ -0,0 +1,56 @@
1/*
2 * Scan interface for ST-Ericsson CW1200 mac80211 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef SCAN_H_INCLUDED
13#define SCAN_H_INCLUDED
14
15#include <linux/semaphore.h>
16#include "wsm.h"
17
18/* external */ struct sk_buff;
19/* external */ struct cfg80211_scan_request;
20/* external */ struct ieee80211_channel;
21/* external */ struct ieee80211_hw;
22/* external */ struct work_struct;
23
24struct cw1200_scan {
25 struct semaphore lock;
26 struct work_struct work;
27 struct delayed_work timeout;
28 struct cfg80211_scan_request *req;
29 struct ieee80211_channel **begin;
30 struct ieee80211_channel **curr;
31 struct ieee80211_channel **end;
32 struct wsm_ssid ssids[WSM_SCAN_MAX_NUM_OF_SSIDS];
33 int output_power;
34 int n_ssids;
35 int status;
36 atomic_t in_progress;
37 /* Direct probe requests workaround */
38 struct delayed_work probe_work;
39 int direct_probe;
40};
41
42int cw1200_hw_scan(struct ieee80211_hw *hw,
43 struct ieee80211_vif *vif,
44 struct cfg80211_scan_request *req);
45void cw1200_scan_work(struct work_struct *work);
46void cw1200_scan_timeout(struct work_struct *work);
47void cw1200_clear_recent_scan_work(struct work_struct *work);
48void cw1200_scan_complete_cb(struct cw1200_common *priv,
49 struct wsm_scan_complete *arg);
50void cw1200_scan_failed_cb(struct cw1200_common *priv);
51
52/* ******************************************************************** */
53/* Raw probe requests TX workaround */
54void cw1200_probe_work(struct work_struct *work);
55
56#endif
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
new file mode 100644
index 000000000000..7365674366f4
--- /dev/null
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -0,0 +1,2403 @@
1/*
2 * Mac80211 STA API for ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/vmalloc.h>
13#include <linux/sched.h>
14#include <linux/firmware.h>
15#include <linux/module.h>
16
17#include "cw1200.h"
18#include "sta.h"
19#include "fwio.h"
20#include "bh.h"
21#include "debug.h"
22
23#ifndef ERP_INFO_BYTE_OFFSET
24#define ERP_INFO_BYTE_OFFSET 2
25#endif
26
27static void cw1200_do_join(struct cw1200_common *priv);
28static void cw1200_do_unjoin(struct cw1200_common *priv);
29
30static int cw1200_upload_beacon(struct cw1200_common *priv);
31static int cw1200_upload_pspoll(struct cw1200_common *priv);
32static int cw1200_upload_null(struct cw1200_common *priv);
33static int cw1200_upload_qosnull(struct cw1200_common *priv);
34static int cw1200_start_ap(struct cw1200_common *priv);
35static int cw1200_update_beaconing(struct cw1200_common *priv);
36static int cw1200_enable_beaconing(struct cw1200_common *priv,
37 bool enable);
38static void __cw1200_sta_notify(struct ieee80211_hw *dev,
39 struct ieee80211_vif *vif,
40 enum sta_notify_cmd notify_cmd,
41 int link_id);
42static int __cw1200_flush(struct cw1200_common *priv, bool drop);
43
44static inline void __cw1200_free_event_queue(struct list_head *list)
45{
46 struct cw1200_wsm_event *event, *tmp;
47 list_for_each_entry_safe(event, tmp, list, link) {
48 list_del(&event->link);
49 kfree(event);
50 }
51}
52
53/* ******************************************************************** */
54/* STA API */
55
56int cw1200_start(struct ieee80211_hw *dev)
57{
58 struct cw1200_common *priv = dev->priv;
59 int ret = 0;
60
61 cw1200_pm_stay_awake(&priv->pm_state, HZ);
62
63 mutex_lock(&priv->conf_mutex);
64
65 /* default EDCA */
66 WSM_EDCA_SET(&priv->edca, 0, 0x0002, 0x0003, 0x0007, 47, 0xc8, false);
67 WSM_EDCA_SET(&priv->edca, 1, 0x0002, 0x0007, 0x000f, 94, 0xc8, false);
68 WSM_EDCA_SET(&priv->edca, 2, 0x0003, 0x000f, 0x03ff, 0, 0xc8, false);
69 WSM_EDCA_SET(&priv->edca, 3, 0x0007, 0x000f, 0x03ff, 0, 0xc8, false);
70 ret = wsm_set_edca_params(priv, &priv->edca);
71 if (ret)
72 goto out;
73
74 ret = cw1200_set_uapsd_param(priv, &priv->edca);
75 if (ret)
76 goto out;
77
78 priv->setbssparams_done = false;
79
80 memcpy(priv->mac_addr, dev->wiphy->perm_addr, ETH_ALEN);
81 priv->mode = NL80211_IFTYPE_MONITOR;
82 priv->wep_default_key_id = -1;
83
84 priv->cqm_beacon_loss_count = 10;
85
86 ret = cw1200_setup_mac(priv);
87 if (ret)
88 goto out;
89
90out:
91 mutex_unlock(&priv->conf_mutex);
92 return ret;
93}
94
95void cw1200_stop(struct ieee80211_hw *dev)
96{
97 struct cw1200_common *priv = dev->priv;
98 LIST_HEAD(list);
99 int i;
100
101 wsm_lock_tx(priv);
102
103 while (down_trylock(&priv->scan.lock)) {
104 /* Scan is in progress. Force it to stop. */
105 priv->scan.req = NULL;
106 schedule();
107 }
108 up(&priv->scan.lock);
109
110 cancel_delayed_work_sync(&priv->scan.probe_work);
111 cancel_delayed_work_sync(&priv->scan.timeout);
112 cancel_delayed_work_sync(&priv->clear_recent_scan_work);
113 cancel_delayed_work_sync(&priv->join_timeout);
114 cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
115 cancel_work_sync(&priv->unjoin_work);
116 cancel_delayed_work_sync(&priv->link_id_gc_work);
117 flush_workqueue(priv->workqueue);
118 del_timer_sync(&priv->mcast_timeout);
119 mutex_lock(&priv->conf_mutex);
120 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
121 priv->listening = false;
122
123 spin_lock(&priv->event_queue_lock);
124 list_splice_init(&priv->event_queue, &list);
125 spin_unlock(&priv->event_queue_lock);
126 __cw1200_free_event_queue(&list);
127
128
129 priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
130 priv->join_pending = false;
131
132 for (i = 0; i < 4; i++)
133 cw1200_queue_clear(&priv->tx_queue[i]);
134 mutex_unlock(&priv->conf_mutex);
135 tx_policy_clean(priv);
136
137 /* HACK! */
138 if (atomic_xchg(&priv->tx_lock, 1) != 1)
139 pr_debug("[STA] TX is force-unlocked due to stop request.\n");
140
141 wsm_unlock_tx(priv);
142 atomic_xchg(&priv->tx_lock, 0); /* for recovery to work */
143}
144
145static int cw1200_bssloss_mitigation = 1;
146module_param(cw1200_bssloss_mitigation, int, 0644);
147MODULE_PARM_DESC(cw1200_bssloss_mitigation, "BSS Loss mitigation. 0 == disabled, 1 == enabled (default)");
148
149
150void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
151 int init, int good, int bad)
152{
153 int tx = 0;
154
155 priv->delayed_link_loss = 0;
156 cancel_work_sync(&priv->bss_params_work);
157
158 pr_debug("[STA] CQM BSSLOSS_SM: state: %d init %d good %d bad: %d txlock: %d uj: %d\n",
159 priv->bss_loss_state,
160 init, good, bad,
161 atomic_read(&priv->tx_lock),
162 priv->delayed_unjoin);
163
164 /* If we have a pending unjoin */
165 if (priv->delayed_unjoin)
166 return;
167
168 if (init) {
169 queue_delayed_work(priv->workqueue,
170 &priv->bss_loss_work,
171 HZ);
172 priv->bss_loss_state = 0;
173
174 /* Skip the confimration procedure in P2P case */
175 if (!priv->vif->p2p && !atomic_read(&priv->tx_lock))
176 tx = 1;
177 } else if (good) {
178 cancel_delayed_work_sync(&priv->bss_loss_work);
179 priv->bss_loss_state = 0;
180 queue_work(priv->workqueue, &priv->bss_params_work);
181 } else if (bad) {
182 /* XXX Should we just keep going until we time out? */
183 if (priv->bss_loss_state < 3)
184 tx = 1;
185 } else {
186 cancel_delayed_work_sync(&priv->bss_loss_work);
187 priv->bss_loss_state = 0;
188 }
189
190 /* Bypass mitigation if it's disabled */
191 if (!cw1200_bssloss_mitigation)
192 tx = 0;
193
194 /* Spit out a NULL packet to our AP if necessary */
195 if (tx) {
196 struct sk_buff *skb;
197
198 priv->bss_loss_state++;
199
200 skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
201 WARN_ON(!skb);
202 if (skb)
203 cw1200_tx(priv->hw, NULL, skb);
204 }
205}
206
207int cw1200_add_interface(struct ieee80211_hw *dev,
208 struct ieee80211_vif *vif)
209{
210 int ret;
211 struct cw1200_common *priv = dev->priv;
212 /* __le32 auto_calibration_mode = __cpu_to_le32(1); */
213
214 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
215 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
216
217 mutex_lock(&priv->conf_mutex);
218
219 if (priv->mode != NL80211_IFTYPE_MONITOR) {
220 mutex_unlock(&priv->conf_mutex);
221 return -EOPNOTSUPP;
222 }
223
224 switch (vif->type) {
225 case NL80211_IFTYPE_STATION:
226 case NL80211_IFTYPE_ADHOC:
227 case NL80211_IFTYPE_MESH_POINT:
228 case NL80211_IFTYPE_AP:
229 priv->mode = vif->type;
230 break;
231 default:
232 mutex_unlock(&priv->conf_mutex);
233 return -EOPNOTSUPP;
234 }
235
236 priv->vif = vif;
237 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
238 ret = cw1200_setup_mac(priv);
239 /* Enable auto-calibration */
240 /* Exception in subsequent channel switch; disabled.
241 * wsm_write_mib(priv, WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE,
242 * &auto_calibration_mode, sizeof(auto_calibration_mode));
243 */
244
245 mutex_unlock(&priv->conf_mutex);
246 return ret;
247}
248
249void cw1200_remove_interface(struct ieee80211_hw *dev,
250 struct ieee80211_vif *vif)
251{
252 struct cw1200_common *priv = dev->priv;
253 struct wsm_reset reset = {
254 .reset_statistics = true,
255 };
256 int i;
257
258 mutex_lock(&priv->conf_mutex);
259 switch (priv->join_status) {
260 case CW1200_JOIN_STATUS_JOINING:
261 case CW1200_JOIN_STATUS_PRE_STA:
262 case CW1200_JOIN_STATUS_STA:
263 case CW1200_JOIN_STATUS_IBSS:
264 wsm_lock_tx(priv);
265 if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
266 wsm_unlock_tx(priv);
267 break;
268 case CW1200_JOIN_STATUS_AP:
269 for (i = 0; priv->link_id_map; ++i) {
270 if (priv->link_id_map & BIT(i)) {
271 reset.link_id = i;
272 wsm_reset(priv, &reset);
273 priv->link_id_map &= ~BIT(i);
274 }
275 }
276 memset(priv->link_id_db, 0, sizeof(priv->link_id_db));
277 priv->sta_asleep_mask = 0;
278 priv->enable_beacon = false;
279 priv->tx_multicast = false;
280 priv->aid0_bit_set = false;
281 priv->buffered_multicasts = false;
282 priv->pspoll_mask = 0;
283 reset.link_id = 0;
284 wsm_reset(priv, &reset);
285 break;
286 case CW1200_JOIN_STATUS_MONITOR:
287 cw1200_update_listening(priv, false);
288 break;
289 default:
290 break;
291 }
292 priv->vif = NULL;
293 priv->mode = NL80211_IFTYPE_MONITOR;
294 memset(priv->mac_addr, 0, ETH_ALEN);
295 memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo));
296 cw1200_free_keys(priv);
297 cw1200_setup_mac(priv);
298 priv->listening = false;
299 priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
300 if (!__cw1200_flush(priv, true))
301 wsm_unlock_tx(priv);
302
303 mutex_unlock(&priv->conf_mutex);
304}
305
306int cw1200_change_interface(struct ieee80211_hw *dev,
307 struct ieee80211_vif *vif,
308 enum nl80211_iftype new_type,
309 bool p2p)
310{
311 int ret = 0;
312 pr_debug("change_interface new: %d (%d), old: %d (%d)\n", new_type,
313 p2p, vif->type, vif->p2p);
314
315 if (new_type != vif->type || vif->p2p != p2p) {
316 cw1200_remove_interface(dev, vif);
317 vif->type = new_type;
318 vif->p2p = p2p;
319 ret = cw1200_add_interface(dev, vif);
320 }
321
322 return ret;
323}
324
325int cw1200_config(struct ieee80211_hw *dev, u32 changed)
326{
327 int ret = 0;
328 struct cw1200_common *priv = dev->priv;
329 struct ieee80211_conf *conf = &dev->conf;
330
331 pr_debug("CONFIG CHANGED: %08x\n", changed);
332
333 down(&priv->scan.lock);
334 mutex_lock(&priv->conf_mutex);
335 /* TODO: IEEE80211_CONF_CHANGE_QOS */
336 /* TODO: IEEE80211_CONF_CHANGE_LISTEN_INTERVAL */
337
338 if (changed & IEEE80211_CONF_CHANGE_POWER) {
339 priv->output_power = conf->power_level;
340 pr_debug("[STA] TX power: %d\n", priv->output_power);
341 wsm_set_output_power(priv, priv->output_power * 10);
342 }
343
344 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) &&
345 (priv->channel != conf->chandef.chan)) {
346 struct ieee80211_channel *ch = conf->chandef.chan;
347 struct wsm_switch_channel channel = {
348 .channel_number = ch->hw_value,
349 };
350 pr_debug("[STA] Freq %d (wsm ch: %d).\n",
351 ch->center_freq, ch->hw_value);
352
353 /* __cw1200_flush() implicitly locks tx, if successful */
354 if (!__cw1200_flush(priv, false)) {
355 if (!wsm_switch_channel(priv, &channel)) {
356 ret = wait_event_timeout(priv->channel_switch_done,
357 !priv->channel_switch_in_progress,
358 3 * HZ);
359 if (ret) {
360 /* Already unlocks if successful */
361 priv->channel = ch;
362 ret = 0;
363 } else {
364 ret = -ETIMEDOUT;
365 }
366 } else {
367 /* Unlock if switch channel fails */
368 wsm_unlock_tx(priv);
369 }
370 }
371 }
372
373 if (changed & IEEE80211_CONF_CHANGE_PS) {
374 if (!(conf->flags & IEEE80211_CONF_PS))
375 priv->powersave_mode.mode = WSM_PSM_ACTIVE;
376 else if (conf->dynamic_ps_timeout <= 0)
377 priv->powersave_mode.mode = WSM_PSM_PS;
378 else
379 priv->powersave_mode.mode = WSM_PSM_FAST_PS;
380
381 /* Firmware requires that value for this 1-byte field must
382 * be specified in units of 500us. Values above the 128ms
383 * threshold are not supported.
384 */
385 if (conf->dynamic_ps_timeout >= 0x80)
386 priv->powersave_mode.fast_psm_idle_period = 0xFF;
387 else
388 priv->powersave_mode.fast_psm_idle_period =
389 conf->dynamic_ps_timeout << 1;
390
391 if (priv->join_status == CW1200_JOIN_STATUS_STA &&
392 priv->bss_params.aid)
393 cw1200_set_pm(priv, &priv->powersave_mode);
394 }
395
396 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
397 /* TBD: It looks like it's transparent
398 * there's a monitor interface present -- use this
399 * to determine for example whether to calculate
400 * timestamps for packets or not, do not use instead
401 * of filter flags!
402 */
403 }
404
405 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
406 struct wsm_operational_mode mode = {
407 .power_mode = cw1200_power_mode,
408 .disable_more_flag_usage = true,
409 };
410
411 wsm_lock_tx(priv);
412 /* Disable p2p-dev mode forced by TX request */
413 if ((priv->join_status == CW1200_JOIN_STATUS_MONITOR) &&
414 (conf->flags & IEEE80211_CONF_IDLE) &&
415 !priv->listening) {
416 cw1200_disable_listening(priv);
417 priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
418 }
419 wsm_set_operational_mode(priv, &mode);
420 wsm_unlock_tx(priv);
421 }
422
423 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
424 pr_debug("[STA] Retry limits: %d (long), %d (short).\n",
425 conf->long_frame_max_tx_count,
426 conf->short_frame_max_tx_count);
427 spin_lock_bh(&priv->tx_policy_cache.lock);
428 priv->long_frame_max_tx_count = conf->long_frame_max_tx_count;
429 priv->short_frame_max_tx_count =
430 (conf->short_frame_max_tx_count < 0x0F) ?
431 conf->short_frame_max_tx_count : 0x0F;
432 priv->hw->max_rate_tries = priv->short_frame_max_tx_count;
433 spin_unlock_bh(&priv->tx_policy_cache.lock);
434 }
435 mutex_unlock(&priv->conf_mutex);
436 up(&priv->scan.lock);
437 return ret;
438}
439
440void cw1200_update_filtering(struct cw1200_common *priv)
441{
442 int ret;
443 bool bssid_filtering = !priv->rx_filter.bssid;
444 bool is_p2p = priv->vif && priv->vif->p2p;
445 bool is_sta = priv->vif && NL80211_IFTYPE_STATION == priv->vif->type;
446
447 static struct wsm_beacon_filter_control bf_ctrl;
448 static struct wsm_mib_beacon_filter_table bf_tbl = {
449 .entry[0].ie_id = WLAN_EID_VENDOR_SPECIFIC,
450 .entry[0].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
451 WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
452 WSM_BEACON_FILTER_IE_HAS_APPEARED,
453 .entry[0].oui[0] = 0x50,
454 .entry[0].oui[1] = 0x6F,
455 .entry[0].oui[2] = 0x9A,
456 .entry[1].ie_id = WLAN_EID_HT_OPERATION,
457 .entry[1].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
458 WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
459 WSM_BEACON_FILTER_IE_HAS_APPEARED,
460 .entry[2].ie_id = WLAN_EID_ERP_INFO,
461 .entry[2].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
462 WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
463 WSM_BEACON_FILTER_IE_HAS_APPEARED,
464 };
465
466 if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE)
467 return;
468 else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
469 bssid_filtering = false;
470
471 if (priv->disable_beacon_filter) {
472 bf_ctrl.enabled = 0;
473 bf_ctrl.bcn_count = 1;
474 bf_tbl.num = __cpu_to_le32(0);
475 } else if (is_p2p || !is_sta) {
476 bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE |
477 WSM_BEACON_FILTER_AUTO_ERP;
478 bf_ctrl.bcn_count = 0;
479 bf_tbl.num = __cpu_to_le32(2);
480 } else {
481 bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE;
482 bf_ctrl.bcn_count = 0;
483 bf_tbl.num = __cpu_to_le32(3);
484 }
485
486 /* When acting as p2p client being connected to p2p GO, in order to
487 * receive frames from a different p2p device, turn off bssid filter.
488 *
489 * WARNING: FW dependency!
490 * This can only be used with FW WSM371 and its successors.
491 * In that FW version even with bssid filter turned off,
492 * device will block most of the unwanted frames.
493 */
494 if (is_p2p)
495 bssid_filtering = false;
496
497 ret = wsm_set_rx_filter(priv, &priv->rx_filter);
498 if (!ret)
499 ret = wsm_set_beacon_filter_table(priv, &bf_tbl);
500 if (!ret)
501 ret = wsm_beacon_filter_control(priv, &bf_ctrl);
502 if (!ret)
503 ret = wsm_set_bssid_filtering(priv, bssid_filtering);
504 if (!ret)
505 ret = wsm_set_multicast_filter(priv, &priv->multicast_filter);
506 if (ret)
507 wiphy_err(priv->hw->wiphy,
508 "Update filtering failed: %d.\n", ret);
509 return;
510}
511
512void cw1200_update_filtering_work(struct work_struct *work)
513{
514 struct cw1200_common *priv =
515 container_of(work, struct cw1200_common,
516 update_filtering_work);
517
518 cw1200_update_filtering(priv);
519}
520
521void cw1200_set_beacon_wakeup_period_work(struct work_struct *work)
522{
523 struct cw1200_common *priv =
524 container_of(work, struct cw1200_common,
525 set_beacon_wakeup_period_work);
526
527 wsm_set_beacon_wakeup_period(priv,
528 priv->beacon_int * priv->join_dtim_period >
529 MAX_BEACON_SKIP_TIME_MS ? 1 :
530 priv->join_dtim_period, 0);
531}
532
533u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
534 struct netdev_hw_addr_list *mc_list)
535{
536 static u8 broadcast_ipv6[ETH_ALEN] = {
537 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
538 };
539 static u8 broadcast_ipv4[ETH_ALEN] = {
540 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01
541 };
542 struct cw1200_common *priv = hw->priv;
543 struct netdev_hw_addr *ha;
544 int count = 0;
545
546 /* Disable multicast filtering */
547 priv->has_multicast_subscription = false;
548 memset(&priv->multicast_filter, 0x00, sizeof(priv->multicast_filter));
549
550 if (netdev_hw_addr_list_count(mc_list) > WSM_MAX_GRP_ADDRTABLE_ENTRIES)
551 return 0;
552
553 /* Enable if requested */
554 netdev_hw_addr_list_for_each(ha, mc_list) {
555 pr_debug("[STA] multicast: %pM\n", ha->addr);
556 memcpy(&priv->multicast_filter.macaddrs[count],
557 ha->addr, ETH_ALEN);
558 if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) &&
559 memcmp(ha->addr, broadcast_ipv6, ETH_ALEN))
560 priv->has_multicast_subscription = true;
561 count++;
562 }
563
564 if (count) {
565 priv->multicast_filter.enable = __cpu_to_le32(1);
566 priv->multicast_filter.num_addrs = __cpu_to_le32(count);
567 }
568
569 return netdev_hw_addr_list_count(mc_list);
570}
571
572void cw1200_configure_filter(struct ieee80211_hw *dev,
573 unsigned int changed_flags,
574 unsigned int *total_flags,
575 u64 multicast)
576{
577 struct cw1200_common *priv = dev->priv;
578 bool listening = !!(*total_flags &
579 (FIF_PROMISC_IN_BSS |
580 FIF_OTHER_BSS |
581 FIF_BCN_PRBRESP_PROMISC |
582 FIF_PROBE_REQ));
583
584 *total_flags &= FIF_PROMISC_IN_BSS |
585 FIF_OTHER_BSS |
586 FIF_FCSFAIL |
587 FIF_BCN_PRBRESP_PROMISC |
588 FIF_PROBE_REQ;
589
590 down(&priv->scan.lock);
591 mutex_lock(&priv->conf_mutex);
592
593 priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS)
594 ? 1 : 0;
595 priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS |
596 FIF_PROBE_REQ)) ? 1 : 0;
597 priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0;
598 priv->disable_beacon_filter = !(*total_flags &
599 (FIF_BCN_PRBRESP_PROMISC |
600 FIF_PROMISC_IN_BSS |
601 FIF_PROBE_REQ));
602 if (priv->listening != listening) {
603 priv->listening = listening;
604 wsm_lock_tx(priv);
605 cw1200_update_listening(priv, listening);
606 wsm_unlock_tx(priv);
607 }
608 cw1200_update_filtering(priv);
609 mutex_unlock(&priv->conf_mutex);
610 up(&priv->scan.lock);
611}
612
613int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
614 u16 queue, const struct ieee80211_tx_queue_params *params)
615{
616 struct cw1200_common *priv = dev->priv;
617 int ret = 0;
618 /* To prevent re-applying PM request OID again and again*/
619 bool old_uapsd_flags;
620
621 mutex_lock(&priv->conf_mutex);
622
623 if (queue < dev->queues) {
624 old_uapsd_flags = le16_to_cpu(priv->uapsd_info.uapsd_flags);
625
626 WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0);
627 ret = wsm_set_tx_queue_params(priv,
628 &priv->tx_queue_params.params[queue], queue);
629 if (ret) {
630 ret = -EINVAL;
631 goto out;
632 }
633
634 WSM_EDCA_SET(&priv->edca, queue, params->aifs,
635 params->cw_min, params->cw_max,
636 params->txop, 0xc8,
637 params->uapsd);
638 ret = wsm_set_edca_params(priv, &priv->edca);
639 if (ret) {
640 ret = -EINVAL;
641 goto out;
642 }
643
644 if (priv->mode == NL80211_IFTYPE_STATION) {
645 ret = cw1200_set_uapsd_param(priv, &priv->edca);
646 if (!ret && priv->setbssparams_done &&
647 (priv->join_status == CW1200_JOIN_STATUS_STA) &&
648 (old_uapsd_flags != le16_to_cpu(priv->uapsd_info.uapsd_flags)))
649 ret = cw1200_set_pm(priv, &priv->powersave_mode);
650 }
651 } else {
652 ret = -EINVAL;
653 }
654
655out:
656 mutex_unlock(&priv->conf_mutex);
657 return ret;
658}
659
660int cw1200_get_stats(struct ieee80211_hw *dev,
661 struct ieee80211_low_level_stats *stats)
662{
663 struct cw1200_common *priv = dev->priv;
664
665 memcpy(stats, &priv->stats, sizeof(*stats));
666 return 0;
667}
668
669int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg)
670{
671 struct wsm_set_pm pm = *arg;
672
673 if (priv->uapsd_info.uapsd_flags != 0)
674 pm.mode &= ~WSM_PSM_FAST_PS_FLAG;
675
676 if (memcmp(&pm, &priv->firmware_ps_mode,
677 sizeof(struct wsm_set_pm))) {
678 priv->firmware_ps_mode = pm;
679 return wsm_set_pm(priv, &pm);
680 } else {
681 return 0;
682 }
683}
684
685int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
686 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
687 struct ieee80211_key_conf *key)
688{
689 int ret = -EOPNOTSUPP;
690 struct cw1200_common *priv = dev->priv;
691 struct ieee80211_key_seq seq;
692
693 mutex_lock(&priv->conf_mutex);
694
695 if (cmd == SET_KEY) {
696 u8 *peer_addr = NULL;
697 int pairwise = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ?
698 1 : 0;
699 int idx = cw1200_alloc_key(priv);
700 struct wsm_add_key *wsm_key = &priv->keys[idx];
701
702 if (idx < 0) {
703 ret = -EINVAL;
704 goto finally;
705 }
706
707 if (sta)
708 peer_addr = sta->addr;
709
710 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
711
712 switch (key->cipher) {
713 case WLAN_CIPHER_SUITE_WEP40:
714 case WLAN_CIPHER_SUITE_WEP104:
715 if (key->keylen > 16) {
716 cw1200_free_key(priv, idx);
717 ret = -EINVAL;
718 goto finally;
719 }
720
721 if (pairwise) {
722 wsm_key->type = WSM_KEY_TYPE_WEP_PAIRWISE;
723 memcpy(wsm_key->wep_pairwise.peer,
724 peer_addr, ETH_ALEN);
725 memcpy(wsm_key->wep_pairwise.keydata,
726 &key->key[0], key->keylen);
727 wsm_key->wep_pairwise.keylen = key->keylen;
728 } else {
729 wsm_key->type = WSM_KEY_TYPE_WEP_DEFAULT;
730 memcpy(wsm_key->wep_group.keydata,
731 &key->key[0], key->keylen);
732 wsm_key->wep_group.keylen = key->keylen;
733 wsm_key->wep_group.keyid = key->keyidx;
734 }
735 break;
736 case WLAN_CIPHER_SUITE_TKIP:
737 ieee80211_get_key_rx_seq(key, 0, &seq);
738 if (pairwise) {
739 wsm_key->type = WSM_KEY_TYPE_TKIP_PAIRWISE;
740 memcpy(wsm_key->tkip_pairwise.peer,
741 peer_addr, ETH_ALEN);
742 memcpy(wsm_key->tkip_pairwise.keydata,
743 &key->key[0], 16);
744 memcpy(wsm_key->tkip_pairwise.tx_mic_key,
745 &key->key[16], 8);
746 memcpy(wsm_key->tkip_pairwise.rx_mic_key,
747 &key->key[24], 8);
748 } else {
749 size_t mic_offset =
750 (priv->mode == NL80211_IFTYPE_AP) ?
751 16 : 24;
752 wsm_key->type = WSM_KEY_TYPE_TKIP_GROUP;
753 memcpy(wsm_key->tkip_group.keydata,
754 &key->key[0], 16);
755 memcpy(wsm_key->tkip_group.rx_mic_key,
756 &key->key[mic_offset], 8);
757
758 wsm_key->tkip_group.rx_seqnum[0] = seq.tkip.iv16 & 0xff;
759 wsm_key->tkip_group.rx_seqnum[1] = (seq.tkip.iv16 >> 8) & 0xff;
760 wsm_key->tkip_group.rx_seqnum[2] = seq.tkip.iv32 & 0xff;
761 wsm_key->tkip_group.rx_seqnum[3] = (seq.tkip.iv32 >> 8) & 0xff;
762 wsm_key->tkip_group.rx_seqnum[4] = (seq.tkip.iv32 >> 16) & 0xff;
763 wsm_key->tkip_group.rx_seqnum[5] = (seq.tkip.iv32 >> 24) & 0xff;
764 wsm_key->tkip_group.rx_seqnum[6] = 0;
765 wsm_key->tkip_group.rx_seqnum[7] = 0;
766
767 wsm_key->tkip_group.keyid = key->keyidx;
768 }
769 break;
770 case WLAN_CIPHER_SUITE_CCMP:
771 ieee80211_get_key_rx_seq(key, 0, &seq);
772 if (pairwise) {
773 wsm_key->type = WSM_KEY_TYPE_AES_PAIRWISE;
774 memcpy(wsm_key->aes_pairwise.peer,
775 peer_addr, ETH_ALEN);
776 memcpy(wsm_key->aes_pairwise.keydata,
777 &key->key[0], 16);
778 } else {
779 wsm_key->type = WSM_KEY_TYPE_AES_GROUP;
780 memcpy(wsm_key->aes_group.keydata,
781 &key->key[0], 16);
782
783 wsm_key->aes_group.rx_seqnum[0] = seq.ccmp.pn[5];
784 wsm_key->aes_group.rx_seqnum[1] = seq.ccmp.pn[4];
785 wsm_key->aes_group.rx_seqnum[2] = seq.ccmp.pn[3];
786 wsm_key->aes_group.rx_seqnum[3] = seq.ccmp.pn[2];
787 wsm_key->aes_group.rx_seqnum[4] = seq.ccmp.pn[1];
788 wsm_key->aes_group.rx_seqnum[5] = seq.ccmp.pn[0];
789 wsm_key->aes_group.rx_seqnum[6] = 0;
790 wsm_key->aes_group.rx_seqnum[7] = 0;
791 wsm_key->aes_group.keyid = key->keyidx;
792 }
793 break;
794 case WLAN_CIPHER_SUITE_SMS4:
795 if (pairwise) {
796 wsm_key->type = WSM_KEY_TYPE_WAPI_PAIRWISE;
797 memcpy(wsm_key->wapi_pairwise.peer,
798 peer_addr, ETH_ALEN);
799 memcpy(wsm_key->wapi_pairwise.keydata,
800 &key->key[0], 16);
801 memcpy(wsm_key->wapi_pairwise.mic_key,
802 &key->key[16], 16);
803 wsm_key->wapi_pairwise.keyid = key->keyidx;
804 } else {
805 wsm_key->type = WSM_KEY_TYPE_WAPI_GROUP;
806 memcpy(wsm_key->wapi_group.keydata,
807 &key->key[0], 16);
808 memcpy(wsm_key->wapi_group.mic_key,
809 &key->key[16], 16);
810 wsm_key->wapi_group.keyid = key->keyidx;
811 }
812 break;
813 default:
814 pr_warn("Unhandled key type %d\n", key->cipher);
815 cw1200_free_key(priv, idx);
816 ret = -EOPNOTSUPP;
817 goto finally;
818 }
819 ret = wsm_add_key(priv, wsm_key);
820 if (!ret)
821 key->hw_key_idx = idx;
822 else
823 cw1200_free_key(priv, idx);
824 } else if (cmd == DISABLE_KEY) {
825 struct wsm_remove_key wsm_key = {
826 .index = key->hw_key_idx,
827 };
828
829 if (wsm_key.index > WSM_KEY_MAX_INDEX) {
830 ret = -EINVAL;
831 goto finally;
832 }
833
834 cw1200_free_key(priv, wsm_key.index);
835 ret = wsm_remove_key(priv, &wsm_key);
836 } else {
837 pr_warn("Unhandled key command %d\n", cmd);
838 }
839
840finally:
841 mutex_unlock(&priv->conf_mutex);
842 return ret;
843}
844
845void cw1200_wep_key_work(struct work_struct *work)
846{
847 struct cw1200_common *priv =
848 container_of(work, struct cw1200_common, wep_key_work);
849 u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id);
850 struct cw1200_queue *queue = &priv->tx_queue[queue_id];
851 __le32 wep_default_key_id = __cpu_to_le32(
852 priv->wep_default_key_id);
853
854 pr_debug("[STA] Setting default WEP key: %d\n",
855 priv->wep_default_key_id);
856 wsm_flush_tx(priv);
857 wsm_write_mib(priv, WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
858 &wep_default_key_id, sizeof(wep_default_key_id));
859 cw1200_queue_requeue(queue, priv->pending_frame_id);
860 wsm_unlock_tx(priv);
861}
862
863int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
864{
865 int ret = 0;
866 __le32 val32;
867 struct cw1200_common *priv = hw->priv;
868
869 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
870 return 0;
871
872 if (value != (u32) -1)
873 val32 = __cpu_to_le32(value);
874 else
875 val32 = 0; /* disabled */
876
877 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
878 /* device is down, can _not_ set threshold */
879 ret = -ENODEV;
880 goto out;
881 }
882
883 if (priv->rts_threshold == value)
884 goto out;
885
886 pr_debug("[STA] Setting RTS threshold: %d\n",
887 priv->rts_threshold);
888
889 /* mutex_lock(&priv->conf_mutex); */
890 ret = wsm_write_mib(priv, WSM_MIB_ID_DOT11_RTS_THRESHOLD,
891 &val32, sizeof(val32));
892 if (!ret)
893 priv->rts_threshold = value;
894 /* mutex_unlock(&priv->conf_mutex); */
895
896out:
897 return ret;
898}
899
900/* If successful, LOCKS the TX queue! */
901static int __cw1200_flush(struct cw1200_common *priv, bool drop)
902{
903 int i, ret;
904
905 for (;;) {
906 /* TODO: correct flush handling is required when dev_stop.
907 * Temporary workaround: 2s
908 */
909 if (drop) {
910 for (i = 0; i < 4; ++i)
911 cw1200_queue_clear(&priv->tx_queue[i]);
912 } else {
913 ret = wait_event_timeout(
914 priv->tx_queue_stats.wait_link_id_empty,
915 cw1200_queue_stats_is_empty(
916 &priv->tx_queue_stats, -1),
917 2 * HZ);
918 }
919
920 if (!drop && ret <= 0) {
921 ret = -ETIMEDOUT;
922 break;
923 } else {
924 ret = 0;
925 }
926
927 wsm_lock_tx(priv);
928 if (!cw1200_queue_stats_is_empty(&priv->tx_queue_stats, -1)) {
929 /* Highly unlikely: WSM requeued frames. */
930 wsm_unlock_tx(priv);
931 continue;
932 }
933 break;
934 }
935 return ret;
936}
937
938void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
939{
940 struct cw1200_common *priv = hw->priv;
941
942 switch (priv->mode) {
943 case NL80211_IFTYPE_MONITOR:
944 drop = true;
945 break;
946 case NL80211_IFTYPE_AP:
947 if (!priv->enable_beacon)
948 drop = true;
949 break;
950 }
951
952 if (!__cw1200_flush(priv, drop))
953 wsm_unlock_tx(priv);
954
955 return;
956}
957
958/* ******************************************************************** */
959/* WSM callbacks */
960
961void cw1200_free_event_queue(struct cw1200_common *priv)
962{
963 LIST_HEAD(list);
964
965 spin_lock(&priv->event_queue_lock);
966 list_splice_init(&priv->event_queue, &list);
967 spin_unlock(&priv->event_queue_lock);
968
969 __cw1200_free_event_queue(&list);
970}
971
972void cw1200_event_handler(struct work_struct *work)
973{
974 struct cw1200_common *priv =
975 container_of(work, struct cw1200_common, event_handler);
976 struct cw1200_wsm_event *event;
977 LIST_HEAD(list);
978
979 spin_lock(&priv->event_queue_lock);
980 list_splice_init(&priv->event_queue, &list);
981 spin_unlock(&priv->event_queue_lock);
982
983 list_for_each_entry(event, &list, link) {
984 switch (event->evt.id) {
985 case WSM_EVENT_ERROR:
986 pr_err("Unhandled WSM Error from LMAC\n");
987 break;
988 case WSM_EVENT_BSS_LOST:
989 pr_debug("[CQM] BSS lost.\n");
990 cancel_work_sync(&priv->unjoin_work);
991 if (!down_trylock(&priv->scan.lock)) {
992 cw1200_cqm_bssloss_sm(priv, 1, 0, 0);
993 up(&priv->scan.lock);
994 } else {
995 /* Scan is in progress. Delay reporting.
996 * Scan complete will trigger bss_loss_work
997 */
998 priv->delayed_link_loss = 1;
999 /* Also start a watchdog. */
1000 queue_delayed_work(priv->workqueue,
1001 &priv->bss_loss_work, 5*HZ);
1002 }
1003 break;
1004 case WSM_EVENT_BSS_REGAINED:
1005 pr_debug("[CQM] BSS regained.\n");
1006 cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
1007 cancel_work_sync(&priv->unjoin_work);
1008 break;
1009 case WSM_EVENT_RADAR_DETECTED:
1010 wiphy_info(priv->hw->wiphy, "radar pulse detected\n");
1011 break;
1012 case WSM_EVENT_RCPI_RSSI:
1013 {
1014 /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
1015 * RSSI = RCPI / 2 - 110
1016 */
1017 int rcpi_rssi = (int)(event->evt.data & 0xFF);
1018 int cqm_evt;
1019 if (priv->cqm_use_rssi)
1020 rcpi_rssi = (s8)rcpi_rssi;
1021 else
1022 rcpi_rssi = rcpi_rssi / 2 - 110;
1023
1024 cqm_evt = (rcpi_rssi <= priv->cqm_rssi_thold) ?
1025 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW :
1026 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
1027 pr_debug("[CQM] RSSI event: %d.\n", rcpi_rssi);
1028 ieee80211_cqm_rssi_notify(priv->vif, cqm_evt,
1029 GFP_KERNEL);
1030 break;
1031 }
1032 case WSM_EVENT_BT_INACTIVE:
1033 pr_warn("Unhandled BT INACTIVE from LMAC\n");
1034 break;
1035 case WSM_EVENT_BT_ACTIVE:
1036 pr_warn("Unhandled BT ACTIVE from LMAC\n");
1037 break;
1038 }
1039 }
1040 __cw1200_free_event_queue(&list);
1041}
1042
1043void cw1200_bss_loss_work(struct work_struct *work)
1044{
1045 struct cw1200_common *priv =
1046 container_of(work, struct cw1200_common, bss_loss_work.work);
1047
1048 pr_debug("[CQM] Reporting connection loss.\n");
1049 wsm_lock_tx(priv);
1050 if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
1051 wsm_unlock_tx(priv);
1052}
1053
1054void cw1200_bss_params_work(struct work_struct *work)
1055{
1056 struct cw1200_common *priv =
1057 container_of(work, struct cw1200_common, bss_params_work);
1058 mutex_lock(&priv->conf_mutex);
1059
1060 priv->bss_params.reset_beacon_loss = 1;
1061 wsm_set_bss_params(priv, &priv->bss_params);
1062 priv->bss_params.reset_beacon_loss = 0;
1063
1064 mutex_unlock(&priv->conf_mutex);
1065}
1066
1067/* ******************************************************************** */
1068/* Internal API */
1069
1070/* This function is called to Parse the SDD file
1071 * to extract listen_interval and PTA related information
1072 * sdd is a TLV: u8 id, u8 len, u8 data[]
1073 */
1074static int cw1200_parse_sdd_file(struct cw1200_common *priv)
1075{
1076 const u8 *p = priv->sdd->data;
1077 int ret = 0;
1078
1079 while (p + 2 <= priv->sdd->data + priv->sdd->size) {
1080 if (p + p[1] + 2 > priv->sdd->data + priv->sdd->size) {
1081 pr_warn("Malformed sdd structure\n");
1082 return -1;
1083 }
1084 switch (p[0]) {
1085 case SDD_PTA_CFG_ELT_ID: {
1086 u16 v;
1087 if (p[1] < 4) {
1088 pr_warn("SDD_PTA_CFG_ELT_ID malformed\n");
1089 ret = -1;
1090 break;
1091 }
1092 v = le16_to_cpu(*((__le16 *)(p + 2)));
1093 if (!v) /* non-zero means this is enabled */
1094 break;
1095
1096 v = le16_to_cpu(*((__le16 *)(p + 4)));
1097 priv->conf_listen_interval = (v >> 7) & 0x1F;
1098 pr_debug("PTA found; Listen Interval %d\n",
1099 priv->conf_listen_interval);
1100 break;
1101 }
1102 case SDD_REFERENCE_FREQUENCY_ELT_ID: {
1103 u16 clk = le16_to_cpu(*((__le16 *)(p + 2)));
1104 if (clk != priv->hw_refclk)
1105 pr_warn("SDD file doesn't match configured refclk (%d vs %d)\n",
1106 clk, priv->hw_refclk);
1107 break;
1108 }
1109 default:
1110 break;
1111 }
1112 p += p[1] + 2;
1113 }
1114
1115 if (!priv->bt_present) {
1116 pr_debug("PTA element NOT found.\n");
1117 priv->conf_listen_interval = 0;
1118 }
1119 return ret;
1120}
1121
1122int cw1200_setup_mac(struct cw1200_common *priv)
1123{
1124 int ret = 0;
1125
1126 /* NOTE: There is a bug in FW: it reports signal
1127 * as RSSI if RSSI subscription is enabled.
1128 * It's not enough to set WSM_RCPI_RSSI_USE_RSSI.
1129 *
1130 * NOTE2: RSSI based reports have been switched to RCPI, since
1131 * FW has a bug and RSSI reported values are not stable,
1132 * what can leads to signal level oscilations in user-end applications
1133 */
1134 struct wsm_rcpi_rssi_threshold threshold = {
1135 .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE |
1136 WSM_RCPI_RSSI_DONT_USE_UPPER |
1137 WSM_RCPI_RSSI_DONT_USE_LOWER,
1138 .rollingAverageCount = 16,
1139 };
1140
1141 struct wsm_configuration cfg = {
1142 .dot11StationId = &priv->mac_addr[0],
1143 };
1144
1145 /* Remember the decission here to make sure, we will handle
1146 * the RCPI/RSSI value correctly on WSM_EVENT_RCPI_RSS
1147 */
1148 if (threshold.rssiRcpiMode & WSM_RCPI_RSSI_USE_RSSI)
1149 priv->cqm_use_rssi = true;
1150
1151 if (!priv->sdd) {
1152 ret = request_firmware(&priv->sdd, priv->sdd_path, priv->pdev);
1153 if (ret) {
1154 pr_err("Can't load sdd file %s.\n", priv->sdd_path);
1155 return ret;
1156 }
1157 cw1200_parse_sdd_file(priv);
1158 }
1159
1160 cfg.dpdData = priv->sdd->data;
1161 cfg.dpdData_size = priv->sdd->size;
1162 ret = wsm_configuration(priv, &cfg);
1163 if (ret)
1164 return ret;
1165
1166 /* Configure RSSI/SCPI reporting as RSSI. */
1167 wsm_set_rcpi_rssi_threshold(priv, &threshold);
1168
1169 return 0;
1170}
1171
1172static void cw1200_join_complete(struct cw1200_common *priv)
1173{
1174 pr_debug("[STA] Join complete (%d)\n", priv->join_complete_status);
1175
1176 priv->join_pending = false;
1177 if (priv->join_complete_status) {
1178 priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
1179 cw1200_update_listening(priv, priv->listening);
1180 cw1200_do_unjoin(priv);
1181 ieee80211_connection_loss(priv->vif);
1182 } else {
1183 if (priv->mode == NL80211_IFTYPE_ADHOC)
1184 priv->join_status = CW1200_JOIN_STATUS_IBSS;
1185 else
1186 priv->join_status = CW1200_JOIN_STATUS_PRE_STA;
1187 }
1188 wsm_unlock_tx(priv); /* Clearing the lock held before do_join() */
1189}
1190
1191void cw1200_join_complete_work(struct work_struct *work)
1192{
1193 struct cw1200_common *priv =
1194 container_of(work, struct cw1200_common, join_complete_work);
1195 mutex_lock(&priv->conf_mutex);
1196 cw1200_join_complete(priv);
1197 mutex_unlock(&priv->conf_mutex);
1198}
1199
1200void cw1200_join_complete_cb(struct cw1200_common *priv,
1201 struct wsm_join_complete *arg)
1202{
1203 pr_debug("[STA] cw1200_join_complete_cb called, status=%d.\n",
1204 arg->status);
1205
1206 if (cancel_delayed_work(&priv->join_timeout)) {
1207 priv->join_complete_status = arg->status;
1208 queue_work(priv->workqueue, &priv->join_complete_work);
1209 }
1210}
1211
1212/* MUST be called with tx_lock held! It will be unlocked for us. */
1213static void cw1200_do_join(struct cw1200_common *priv)
1214{
1215 const u8 *bssid;
1216 struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
1217 struct cfg80211_bss *bss = NULL;
1218 struct wsm_protected_mgmt_policy mgmt_policy;
1219 struct wsm_join join = {
1220 .mode = conf->ibss_joined ?
1221 WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS,
1222 .preamble_type = WSM_JOIN_PREAMBLE_LONG,
1223 .probe_for_join = 1,
1224 .atim_window = 0,
1225 .basic_rate_set = cw1200_rate_mask_to_wsm(priv,
1226 conf->basic_rates),
1227 };
1228 if (delayed_work_pending(&priv->join_timeout)) {
1229 pr_warn("[STA] - Join request already pending, skipping..\n");
1230 wsm_unlock_tx(priv);
1231 return;
1232 }
1233
1234 if (priv->join_status)
1235 cw1200_do_unjoin(priv);
1236
1237 bssid = priv->vif->bss_conf.bssid;
1238
1239 bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel,
1240 bssid, NULL, 0, 0, 0);
1241
1242 if (!bss && !conf->ibss_joined) {
1243 wsm_unlock_tx(priv);
1244 return;
1245 }
1246
1247 mutex_lock(&priv->conf_mutex);
1248
1249 /* Under the conf lock: check scan status and
1250 * bail out if it is in progress.
1251 */
1252 if (atomic_read(&priv->scan.in_progress)) {
1253 wsm_unlock_tx(priv);
1254 goto done_put;
1255 }
1256
1257 priv->join_pending = true;
1258
1259 /* Sanity check basic rates */
1260 if (!join.basic_rate_set)
1261 join.basic_rate_set = 7;
1262
1263 /* Sanity check beacon interval */
1264 if (!priv->beacon_int)
1265 priv->beacon_int = 1;
1266
1267 join.beacon_interval = priv->beacon_int;
1268
1269 /* BT Coex related changes */
1270 if (priv->bt_present) {
1271 if (((priv->conf_listen_interval * 100) %
1272 priv->beacon_int) == 0)
1273 priv->listen_interval =
1274 ((priv->conf_listen_interval * 100) /
1275 priv->beacon_int);
1276 else
1277 priv->listen_interval =
1278 ((priv->conf_listen_interval * 100) /
1279 priv->beacon_int + 1);
1280 }
1281
1282 if (priv->hw->conf.ps_dtim_period)
1283 priv->join_dtim_period = priv->hw->conf.ps_dtim_period;
1284 join.dtim_period = priv->join_dtim_period;
1285
1286 join.channel_number = priv->channel->hw_value;
1287 join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
1288 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
1289
1290 memcpy(join.bssid, bssid, sizeof(join.bssid));
1291
1292 pr_debug("[STA] Join BSSID: %pM DTIM: %d, interval: %d\n",
1293 join.bssid,
1294 join.dtim_period, priv->beacon_int);
1295
1296 if (!conf->ibss_joined) {
1297 const u8 *ssidie;
1298 rcu_read_lock();
1299 ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
1300 if (ssidie) {
1301 join.ssid_len = ssidie[1];
1302 memcpy(join.ssid, &ssidie[2], join.ssid_len);
1303 }
1304 rcu_read_unlock();
1305 }
1306
1307 if (priv->vif->p2p) {
1308 join.flags |= WSM_JOIN_FLAGS_P2P_GO;
1309 join.basic_rate_set =
1310 cw1200_rate_mask_to_wsm(priv, 0xFF0);
1311 }
1312
1313 /* Enable asynchronous join calls */
1314 if (!conf->ibss_joined) {
1315 join.flags |= WSM_JOIN_FLAGS_FORCE;
1316 join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND;
1317 }
1318
1319 wsm_flush_tx(priv);
1320
1321 /* Stay Awake for Join and Auth Timeouts and a bit more */
1322 cw1200_pm_stay_awake(&priv->pm_state,
1323 CW1200_JOIN_TIMEOUT + CW1200_AUTH_TIMEOUT);
1324
1325 cw1200_update_listening(priv, false);
1326
1327 /* Turn on Block ACKs */
1328 wsm_set_block_ack_policy(priv, priv->ba_tx_tid_mask,
1329 priv->ba_rx_tid_mask);
1330
1331 /* Set up timeout */
1332 if (join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND) {
1333 priv->join_status = CW1200_JOIN_STATUS_JOINING;
1334 queue_delayed_work(priv->workqueue,
1335 &priv->join_timeout,
1336 CW1200_JOIN_TIMEOUT);
1337 }
1338
1339 /* 802.11w protected mgmt frames */
1340 mgmt_policy.protectedMgmtEnable = 0;
1341 mgmt_policy.unprotectedMgmtFramesAllowed = 1;
1342 mgmt_policy.encryptionForAuthFrame = 1;
1343 wsm_set_protected_mgmt_policy(priv, &mgmt_policy);
1344
1345 /* Perform actual join */
1346 if (wsm_join(priv, &join)) {
1347 pr_err("[STA] cw1200_join_work: wsm_join failed!\n");
1348 cancel_delayed_work_sync(&priv->join_timeout);
1349 cw1200_update_listening(priv, priv->listening);
1350 /* Tx lock still held, unjoin will clear it. */
1351 if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
1352 wsm_unlock_tx(priv);
1353 } else {
1354 if (!(join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND))
1355 cw1200_join_complete(priv); /* Will clear tx_lock */
1356
1357 /* Upload keys */
1358 cw1200_upload_keys(priv);
1359
1360 /* Due to beacon filtering it is possible that the
1361 * AP's beacon is not known for the mac80211 stack.
1362 * Disable filtering temporary to make sure the stack
1363 * receives at least one
1364 */
1365 priv->disable_beacon_filter = true;
1366 }
1367 cw1200_update_filtering(priv);
1368
1369done_put:
1370 mutex_unlock(&priv->conf_mutex);
1371 if (bss)
1372 cfg80211_put_bss(priv->hw->wiphy, bss);
1373}
1374
1375void cw1200_join_timeout(struct work_struct *work)
1376{
1377 struct cw1200_common *priv =
1378 container_of(work, struct cw1200_common, join_timeout.work);
1379 pr_debug("[WSM] Join timed out.\n");
1380 wsm_lock_tx(priv);
1381 if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
1382 wsm_unlock_tx(priv);
1383}
1384
1385static void cw1200_do_unjoin(struct cw1200_common *priv)
1386{
1387 struct wsm_reset reset = {
1388 .reset_statistics = true,
1389 };
1390
1391 cancel_delayed_work_sync(&priv->join_timeout);
1392
1393 mutex_lock(&priv->conf_mutex);
1394 priv->join_pending = false;
1395
1396 if (atomic_read(&priv->scan.in_progress)) {
1397 if (priv->delayed_unjoin)
1398 wiphy_dbg(priv->hw->wiphy, "Delayed unjoin is already scheduled.\n");
1399 else
1400 priv->delayed_unjoin = true;
1401 goto done;
1402 }
1403
1404 priv->delayed_link_loss = false;
1405
1406 if (!priv->join_status)
1407 goto done;
1408
1409 if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
1410 wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
1411 priv->join_status);
1412 BUG_ON(1);
1413 }
1414
1415 cancel_work_sync(&priv->update_filtering_work);
1416 cancel_work_sync(&priv->set_beacon_wakeup_period_work);
1417 priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
1418
1419 /* Unjoin is a reset. */
1420 wsm_flush_tx(priv);
1421 wsm_keep_alive_period(priv, 0);
1422 wsm_reset(priv, &reset);
1423 wsm_set_output_power(priv, priv->output_power * 10);
1424 priv->join_dtim_period = 0;
1425 cw1200_setup_mac(priv);
1426 cw1200_free_event_queue(priv);
1427 cancel_work_sync(&priv->event_handler);
1428 cw1200_update_listening(priv, priv->listening);
1429 cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
1430
1431 /* Disable Block ACKs */
1432 wsm_set_block_ack_policy(priv, 0, 0);
1433
1434 priv->disable_beacon_filter = false;
1435 cw1200_update_filtering(priv);
1436 memset(&priv->association_mode, 0,
1437 sizeof(priv->association_mode));
1438 memset(&priv->bss_params, 0, sizeof(priv->bss_params));
1439 priv->setbssparams_done = false;
1440 memset(&priv->firmware_ps_mode, 0,
1441 sizeof(priv->firmware_ps_mode));
1442
1443 pr_debug("[STA] Unjoin completed.\n");
1444
1445done:
1446 mutex_unlock(&priv->conf_mutex);
1447}
1448
1449void cw1200_unjoin_work(struct work_struct *work)
1450{
1451 struct cw1200_common *priv =
1452 container_of(work, struct cw1200_common, unjoin_work);
1453
1454 cw1200_do_unjoin(priv);
1455
1456 /* Tell the stack we're dead */
1457 ieee80211_connection_loss(priv->vif);
1458
1459 wsm_unlock_tx(priv);
1460}
1461
1462int cw1200_enable_listening(struct cw1200_common *priv)
1463{
1464 struct wsm_start start = {
1465 .mode = WSM_START_MODE_P2P_DEV,
1466 .band = WSM_PHY_BAND_2_4G,
1467 .beacon_interval = 100,
1468 .dtim_period = 1,
1469 .probe_delay = 0,
1470 .basic_rate_set = 0x0F,
1471 };
1472
1473 if (priv->channel) {
1474 start.band = priv->channel->band == IEEE80211_BAND_5GHZ ?
1475 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
1476 start.channel_number = priv->channel->hw_value;
1477 } else {
1478 start.band = WSM_PHY_BAND_2_4G;
1479 start.channel_number = 1;
1480 }
1481
1482 return wsm_start(priv, &start);
1483}
1484
1485int cw1200_disable_listening(struct cw1200_common *priv)
1486{
1487 int ret;
1488 struct wsm_reset reset = {
1489 .reset_statistics = true,
1490 };
1491 ret = wsm_reset(priv, &reset);
1492 return ret;
1493}
1494
1495void cw1200_update_listening(struct cw1200_common *priv, bool enabled)
1496{
1497 if (enabled) {
1498 if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) {
1499 if (!cw1200_enable_listening(priv))
1500 priv->join_status = CW1200_JOIN_STATUS_MONITOR;
1501 wsm_set_probe_responder(priv, true);
1502 }
1503 } else {
1504 if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
1505 if (!cw1200_disable_listening(priv))
1506 priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
1507 wsm_set_probe_responder(priv, false);
1508 }
1509 }
1510}
1511
1512int cw1200_set_uapsd_param(struct cw1200_common *priv,
1513 const struct wsm_edca_params *arg)
1514{
1515 int ret;
1516 u16 uapsd_flags = 0;
1517
1518 /* Here's the mapping AC [queue, bit]
1519 * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
1520 */
1521
1522 if (arg->uapsd_enable[0])
1523 uapsd_flags |= 1 << 3;
1524
1525 if (arg->uapsd_enable[1])
1526 uapsd_flags |= 1 << 2;
1527
1528 if (arg->uapsd_enable[2])
1529 uapsd_flags |= 1 << 1;
1530
1531 if (arg->uapsd_enable[3])
1532 uapsd_flags |= 1;
1533
1534 /* Currently pseudo U-APSD operation is not supported, so setting
1535 * MinAutoTriggerInterval, MaxAutoTriggerInterval and
1536 * AutoTriggerStep to 0
1537 */
1538
1539 priv->uapsd_info.uapsd_flags = cpu_to_le16(uapsd_flags);
1540 priv->uapsd_info.min_auto_trigger_interval = 0;
1541 priv->uapsd_info.max_auto_trigger_interval = 0;
1542 priv->uapsd_info.auto_trigger_step = 0;
1543
1544 ret = wsm_set_uapsd_info(priv, &priv->uapsd_info);
1545 return ret;
1546}
1547
1548/* ******************************************************************** */
1549/* AP API */
1550
1551int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1552 struct ieee80211_sta *sta)
1553{
1554 struct cw1200_common *priv = hw->priv;
1555 struct cw1200_sta_priv *sta_priv =
1556 (struct cw1200_sta_priv *)&sta->drv_priv;
1557 struct cw1200_link_entry *entry;
1558 struct sk_buff *skb;
1559
1560 if (priv->mode != NL80211_IFTYPE_AP)
1561 return 0;
1562
1563 sta_priv->link_id = cw1200_find_link_id(priv, sta->addr);
1564 if (WARN_ON(!sta_priv->link_id)) {
1565 wiphy_info(priv->hw->wiphy,
1566 "[AP] No more link IDs available.\n");
1567 return -ENOENT;
1568 }
1569
1570 entry = &priv->link_id_db[sta_priv->link_id - 1];
1571 spin_lock_bh(&priv->ps_state_lock);
1572 if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
1573 IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
1574 priv->sta_asleep_mask |= BIT(sta_priv->link_id);
1575 entry->status = CW1200_LINK_HARD;
1576 while ((skb = skb_dequeue(&entry->rx_queue)))
1577 ieee80211_rx_irqsafe(priv->hw, skb);
1578 spin_unlock_bh(&priv->ps_state_lock);
1579 return 0;
1580}
1581
1582int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1583 struct ieee80211_sta *sta)
1584{
1585 struct cw1200_common *priv = hw->priv;
1586 struct cw1200_sta_priv *sta_priv =
1587 (struct cw1200_sta_priv *)&sta->drv_priv;
1588 struct cw1200_link_entry *entry;
1589
1590 if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id)
1591 return 0;
1592
1593 entry = &priv->link_id_db[sta_priv->link_id - 1];
1594 spin_lock_bh(&priv->ps_state_lock);
1595 entry->status = CW1200_LINK_RESERVE;
1596 entry->timestamp = jiffies;
1597 wsm_lock_tx_async(priv);
1598 if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
1599 wsm_unlock_tx(priv);
1600 spin_unlock_bh(&priv->ps_state_lock);
1601 flush_workqueue(priv->workqueue);
1602 return 0;
1603}
1604
1605static void __cw1200_sta_notify(struct ieee80211_hw *dev,
1606 struct ieee80211_vif *vif,
1607 enum sta_notify_cmd notify_cmd,
1608 int link_id)
1609{
1610 struct cw1200_common *priv = dev->priv;
1611 u32 bit, prev;
1612
1613 /* Zero link id means "for all link IDs" */
1614 if (link_id)
1615 bit = BIT(link_id);
1616 else if (WARN_ON_ONCE(notify_cmd != STA_NOTIFY_AWAKE))
1617 bit = 0;
1618 else
1619 bit = priv->link_id_map;
1620 prev = priv->sta_asleep_mask & bit;
1621
1622 switch (notify_cmd) {
1623 case STA_NOTIFY_SLEEP:
1624 if (!prev) {
1625 if (priv->buffered_multicasts &&
1626 !priv->sta_asleep_mask)
1627 queue_work(priv->workqueue,
1628 &priv->multicast_start_work);
1629 priv->sta_asleep_mask |= bit;
1630 }
1631 break;
1632 case STA_NOTIFY_AWAKE:
1633 if (prev) {
1634 priv->sta_asleep_mask &= ~bit;
1635 priv->pspoll_mask &= ~bit;
1636 if (priv->tx_multicast && link_id &&
1637 !priv->sta_asleep_mask)
1638 queue_work(priv->workqueue,
1639 &priv->multicast_stop_work);
1640 cw1200_bh_wakeup(priv);
1641 }
1642 break;
1643 }
1644}
1645
1646void cw1200_sta_notify(struct ieee80211_hw *dev,
1647 struct ieee80211_vif *vif,
1648 enum sta_notify_cmd notify_cmd,
1649 struct ieee80211_sta *sta)
1650{
1651 struct cw1200_common *priv = dev->priv;
1652 struct cw1200_sta_priv *sta_priv =
1653 (struct cw1200_sta_priv *)&sta->drv_priv;
1654
1655 spin_lock_bh(&priv->ps_state_lock);
1656 __cw1200_sta_notify(dev, vif, notify_cmd, sta_priv->link_id);
1657 spin_unlock_bh(&priv->ps_state_lock);
1658}
1659
1660static void cw1200_ps_notify(struct cw1200_common *priv,
1661 int link_id, bool ps)
1662{
1663 if (link_id > CW1200_MAX_STA_IN_AP_MODE)
1664 return;
1665
1666 pr_debug("%s for LinkId: %d. STAs asleep: %.8X\n",
1667 ps ? "Stop" : "Start",
1668 link_id, priv->sta_asleep_mask);
1669
1670 __cw1200_sta_notify(priv->hw, priv->vif,
1671 ps ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, link_id);
1672}
1673
1674static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set)
1675{
1676 struct sk_buff *skb;
1677 struct wsm_update_ie update_ie = {
1678 .what = WSM_UPDATE_IE_BEACON,
1679 .count = 1,
1680 };
1681 u16 tim_offset, tim_length;
1682
1683 pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis");
1684
1685 skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
1686 &tim_offset, &tim_length);
1687 if (!skb) {
1688 if (!__cw1200_flush(priv, true))
1689 wsm_unlock_tx(priv);
1690 return -ENOENT;
1691 }
1692
1693 if (tim_offset && tim_length >= 6) {
1694 /* Ignore DTIM count from mac80211:
1695 * firmware handles DTIM internally.
1696 */
1697 skb->data[tim_offset + 2] = 0;
1698
1699 /* Set/reset aid0 bit */
1700 if (aid0_bit_set)
1701 skb->data[tim_offset + 4] |= 1;
1702 else
1703 skb->data[tim_offset + 4] &= ~1;
1704 }
1705
1706 update_ie.ies = &skb->data[tim_offset];
1707 update_ie.length = tim_length;
1708 wsm_update_ie(priv, &update_ie);
1709
1710 dev_kfree_skb(skb);
1711
1712 return 0;
1713}
1714
1715void cw1200_set_tim_work(struct work_struct *work)
1716{
1717 struct cw1200_common *priv =
1718 container_of(work, struct cw1200_common, set_tim_work);
1719 (void)cw1200_set_tim_impl(priv, priv->aid0_bit_set);
1720}
1721
1722int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
1723 bool set)
1724{
1725 struct cw1200_common *priv = dev->priv;
1726 queue_work(priv->workqueue, &priv->set_tim_work);
1727 return 0;
1728}
1729
1730void cw1200_set_cts_work(struct work_struct *work)
1731{
1732 struct cw1200_common *priv =
1733 container_of(work, struct cw1200_common, set_cts_work);
1734
1735 u8 erp_ie[3] = {WLAN_EID_ERP_INFO, 0x1, 0};
1736 struct wsm_update_ie update_ie = {
1737 .what = WSM_UPDATE_IE_BEACON,
1738 .count = 1,
1739 .ies = erp_ie,
1740 .length = 3,
1741 };
1742 u32 erp_info;
1743 __le32 use_cts_prot;
1744 mutex_lock(&priv->conf_mutex);
1745 erp_info = priv->erp_info;
1746 mutex_unlock(&priv->conf_mutex);
1747 use_cts_prot =
1748 erp_info & WLAN_ERP_USE_PROTECTION ?
1749 __cpu_to_le32(1) : 0;
1750
1751 erp_ie[ERP_INFO_BYTE_OFFSET] = erp_info;
1752
1753 pr_debug("[STA] ERP information 0x%x\n", erp_info);
1754
1755 wsm_write_mib(priv, WSM_MIB_ID_NON_ERP_PROTECTION,
1756 &use_cts_prot, sizeof(use_cts_prot));
1757 wsm_update_ie(priv, &update_ie);
1758
1759 return;
1760}
1761
1762static int cw1200_set_btcoexinfo(struct cw1200_common *priv)
1763{
1764 struct wsm_override_internal_txrate arg;
1765 int ret = 0;
1766
1767 if (priv->mode == NL80211_IFTYPE_STATION) {
1768 /* Plumb PSPOLL and NULL template */
1769 cw1200_upload_pspoll(priv);
1770 cw1200_upload_null(priv);
1771 cw1200_upload_qosnull(priv);
1772 } else {
1773 return 0;
1774 }
1775
1776 memset(&arg, 0, sizeof(struct wsm_override_internal_txrate));
1777
1778 if (!priv->vif->p2p) {
1779 /* STATION mode */
1780 if (priv->bss_params.operational_rate_set & ~0xF) {
1781 pr_debug("[STA] STA has ERP rates\n");
1782 /* G or BG mode */
1783 arg.internalTxRate = (__ffs(
1784 priv->bss_params.operational_rate_set & ~0xF));
1785 } else {
1786 pr_debug("[STA] STA has non ERP rates\n");
1787 /* B only mode */
1788 arg.internalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set)));
1789 }
1790 arg.nonErpInternalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set)));
1791 } else {
1792 /* P2P mode */
1793 arg.internalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF));
1794 arg.nonErpInternalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF));
1795 }
1796
1797 pr_debug("[STA] BTCOEX_INFO MODE %d, internalTxRate : %x, nonErpInternalTxRate: %x\n",
1798 priv->mode,
1799 arg.internalTxRate,
1800 arg.nonErpInternalTxRate);
1801
1802 ret = wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
1803 &arg, sizeof(arg));
1804
1805 return ret;
1806}
1807
1808void cw1200_bss_info_changed(struct ieee80211_hw *dev,
1809 struct ieee80211_vif *vif,
1810 struct ieee80211_bss_conf *info,
1811 u32 changed)
1812{
1813 struct cw1200_common *priv = dev->priv;
1814 bool do_join = false;
1815
1816 mutex_lock(&priv->conf_mutex);
1817
1818 pr_debug("BSS CHANGED: %08x\n", changed);
1819
1820 /* TODO: BSS_CHANGED_QOS */
1821 /* TODO: BSS_CHANGED_TXPOWER */
1822
1823 if (changed & BSS_CHANGED_ARP_FILTER) {
1824 struct wsm_mib_arp_ipv4_filter filter = {0};
1825 int i;
1826
1827 pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n",
1828 info->arp_addr_cnt);
1829
1830 /* Currently only one IP address is supported by firmware.
1831 * In case of more IPs arp filtering will be disabled.
1832 */
1833 if (info->arp_addr_cnt > 0 &&
1834 info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) {
1835 for (i = 0; i < info->arp_addr_cnt; i++) {
1836 filter.ipv4addrs[i] = info->arp_addr_list[i];
1837 pr_debug("[STA] addr[%d]: 0x%X\n",
1838 i, filter.ipv4addrs[i]);
1839 }
1840 filter.enable = __cpu_to_le32(1);
1841 }
1842
1843 pr_debug("[STA] arp ip filter enable: %d\n",
1844 __le32_to_cpu(filter.enable));
1845
1846 wsm_set_arp_ipv4_filter(priv, &filter);
1847 }
1848
1849 if (changed &
1850 (BSS_CHANGED_BEACON |
1851 BSS_CHANGED_AP_PROBE_RESP |
1852 BSS_CHANGED_BSSID |
1853 BSS_CHANGED_SSID |
1854 BSS_CHANGED_IBSS)) {
1855 pr_debug("BSS_CHANGED_BEACON\n");
1856 priv->beacon_int = info->beacon_int;
1857 cw1200_update_beaconing(priv);
1858 cw1200_upload_beacon(priv);
1859 }
1860
1861 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1862 pr_debug("BSS_CHANGED_BEACON_ENABLED (%d)\n", info->enable_beacon);
1863
1864 if (priv->enable_beacon != info->enable_beacon) {
1865 cw1200_enable_beaconing(priv, info->enable_beacon);
1866 priv->enable_beacon = info->enable_beacon;
1867 }
1868 }
1869
1870 if (changed & BSS_CHANGED_BEACON_INT) {
1871 pr_debug("CHANGED_BEACON_INT\n");
1872 if (info->ibss_joined)
1873 do_join = true;
1874 else if (priv->join_status == CW1200_JOIN_STATUS_AP)
1875 cw1200_update_beaconing(priv);
1876 }
1877
1878 /* assoc/disassoc, or maybe AID changed */
1879 if (changed & BSS_CHANGED_ASSOC) {
1880 wsm_lock_tx(priv);
1881 priv->wep_default_key_id = -1;
1882 wsm_unlock_tx(priv);
1883 }
1884
1885 if (changed & BSS_CHANGED_BSSID) {
1886 pr_debug("BSS_CHANGED_BSSID\n");
1887 do_join = true;
1888 }
1889
1890 if (changed &
1891 (BSS_CHANGED_ASSOC |
1892 BSS_CHANGED_BSSID |
1893 BSS_CHANGED_IBSS |
1894 BSS_CHANGED_BASIC_RATES |
1895 BSS_CHANGED_HT)) {
1896 pr_debug("BSS_CHANGED_ASSOC\n");
1897 if (info->assoc) {
1898 if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) {
1899 ieee80211_connection_loss(vif);
1900 mutex_unlock(&priv->conf_mutex);
1901 return;
1902 } else if (priv->join_status == CW1200_JOIN_STATUS_PRE_STA) {
1903 priv->join_status = CW1200_JOIN_STATUS_STA;
1904 }
1905 } else {
1906 do_join = true;
1907 }
1908
1909 if (info->assoc || info->ibss_joined) {
1910 struct ieee80211_sta *sta = NULL;
1911 __le32 htprot = 0;
1912
1913 if (info->dtim_period)
1914 priv->join_dtim_period = info->dtim_period;
1915 priv->beacon_int = info->beacon_int;
1916
1917 rcu_read_lock();
1918
1919 if (info->bssid && !info->ibss_joined)
1920 sta = ieee80211_find_sta(vif, info->bssid);
1921 if (sta) {
1922 priv->ht_info.ht_cap = sta->ht_cap;
1923 priv->bss_params.operational_rate_set =
1924 cw1200_rate_mask_to_wsm(priv,
1925 sta->supp_rates[priv->channel->band]);
1926 priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef);
1927 priv->ht_info.operation_mode = info->ht_operation_mode;
1928 } else {
1929 memset(&priv->ht_info, 0,
1930 sizeof(priv->ht_info));
1931 priv->bss_params.operational_rate_set = -1;
1932 }
1933 rcu_read_unlock();
1934
1935 /* Non Greenfield stations present */
1936 if (priv->ht_info.operation_mode &
1937 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
1938 htprot |= cpu_to_le32(WSM_NON_GREENFIELD_STA_PRESENT);
1939
1940 /* Set HT protection method */
1941 htprot |= cpu_to_le32((priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2);
1942
1943 /* TODO:
1944 * STBC_param.dual_cts
1945 * STBC_param.LSIG_TXOP_FILL
1946 */
1947
1948 wsm_write_mib(priv, WSM_MIB_ID_SET_HT_PROTECTION,
1949 &htprot, sizeof(htprot));
1950
1951 priv->association_mode.greenfield =
1952 cw1200_ht_greenfield(&priv->ht_info);
1953 priv->association_mode.flags =
1954 WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES |
1955 WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE |
1956 WSM_ASSOCIATION_MODE_USE_HT_MODE |
1957 WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET |
1958 WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING;
1959 priv->association_mode.preamble =
1960 info->use_short_preamble ?
1961 WSM_JOIN_PREAMBLE_SHORT :
1962 WSM_JOIN_PREAMBLE_LONG;
1963 priv->association_mode.basic_rate_set = __cpu_to_le32(
1964 cw1200_rate_mask_to_wsm(priv,
1965 info->basic_rates));
1966 priv->association_mode.mpdu_start_spacing =
1967 cw1200_ht_ampdu_density(&priv->ht_info);
1968
1969 cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
1970 cancel_work_sync(&priv->unjoin_work);
1971
1972 priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count;
1973 priv->bss_params.aid = info->aid;
1974
1975 if (priv->join_dtim_period < 1)
1976 priv->join_dtim_period = 1;
1977
1978 pr_debug("[STA] DTIM %d, interval: %d\n",
1979 priv->join_dtim_period, priv->beacon_int);
1980 pr_debug("[STA] Preamble: %d, Greenfield: %d, Aid: %d, Rates: 0x%.8X, Basic: 0x%.8X\n",
1981 priv->association_mode.preamble,
1982 priv->association_mode.greenfield,
1983 priv->bss_params.aid,
1984 priv->bss_params.operational_rate_set,
1985 priv->association_mode.basic_rate_set);
1986 wsm_set_association_mode(priv, &priv->association_mode);
1987
1988 if (!info->ibss_joined) {
1989 wsm_keep_alive_period(priv, 30 /* sec */);
1990 wsm_set_bss_params(priv, &priv->bss_params);
1991 priv->setbssparams_done = true;
1992 cw1200_set_beacon_wakeup_period_work(&priv->set_beacon_wakeup_period_work);
1993 cw1200_set_pm(priv, &priv->powersave_mode);
1994 }
1995 if (priv->vif->p2p) {
1996 pr_debug("[STA] Setting p2p powersave configuration.\n");
1997 wsm_set_p2p_ps_modeinfo(priv,
1998 &priv->p2p_ps_modeinfo);
1999 }
2000 if (priv->bt_present)
2001 cw1200_set_btcoexinfo(priv);
2002 } else {
2003 memset(&priv->association_mode, 0,
2004 sizeof(priv->association_mode));
2005 memset(&priv->bss_params, 0, sizeof(priv->bss_params));
2006 }
2007 }
2008
2009 /* ERP Protection */
2010 if (changed & (BSS_CHANGED_ASSOC |
2011 BSS_CHANGED_ERP_CTS_PROT |
2012 BSS_CHANGED_ERP_PREAMBLE)) {
2013 u32 prev_erp_info = priv->erp_info;
2014 if (info->use_cts_prot)
2015 priv->erp_info |= WLAN_ERP_USE_PROTECTION;
2016 else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
2017 priv->erp_info &= ~WLAN_ERP_USE_PROTECTION;
2018
2019 if (info->use_short_preamble)
2020 priv->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
2021 else
2022 priv->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
2023
2024 pr_debug("[STA] ERP Protection: %x\n", priv->erp_info);
2025
2026 if (prev_erp_info != priv->erp_info)
2027 queue_work(priv->workqueue, &priv->set_cts_work);
2028 }
2029
2030 /* ERP Slottime */
2031 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT)) {
2032 __le32 slot_time = info->use_short_slot ?
2033 __cpu_to_le32(9) : __cpu_to_le32(20);
2034 pr_debug("[STA] Slot time: %d us.\n",
2035 __le32_to_cpu(slot_time));
2036 wsm_write_mib(priv, WSM_MIB_ID_DOT11_SLOT_TIME,
2037 &slot_time, sizeof(slot_time));
2038 }
2039
2040 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
2041 struct wsm_rcpi_rssi_threshold threshold = {
2042 .rollingAverageCount = 8,
2043 };
2044 pr_debug("[CQM] RSSI threshold subscribe: %d +- %d\n",
2045 info->cqm_rssi_thold, info->cqm_rssi_hyst);
2046 priv->cqm_rssi_thold = info->cqm_rssi_thold;
2047 priv->cqm_rssi_hyst = info->cqm_rssi_hyst;
2048
2049 if (info->cqm_rssi_thold || info->cqm_rssi_hyst) {
2050 /* RSSI subscription enabled */
2051 /* TODO: It's not a correct way of setting threshold.
2052 * Upper and lower must be set equal here and adjusted
2053 * in callback. However current implementation is much
2054 * more relaible and stable.
2055 */
2056
2057 /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
2058 * RSSI = RCPI / 2 - 110
2059 */
2060 if (priv->cqm_use_rssi) {
2061 threshold.upperThreshold =
2062 info->cqm_rssi_thold + info->cqm_rssi_hyst;
2063 threshold.lowerThreshold =
2064 info->cqm_rssi_thold;
2065 threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI;
2066 } else {
2067 threshold.upperThreshold = (info->cqm_rssi_thold + info->cqm_rssi_hyst + 110) * 2;
2068 threshold.lowerThreshold = (info->cqm_rssi_thold + 110) * 2;
2069 }
2070 threshold.rssiRcpiMode |= WSM_RCPI_RSSI_THRESHOLD_ENABLE;
2071 } else {
2072 /* There is a bug in FW, see sta.c. We have to enable
2073 * dummy subscription to get correct RSSI values.
2074 */
2075 threshold.rssiRcpiMode |=
2076 WSM_RCPI_RSSI_THRESHOLD_ENABLE |
2077 WSM_RCPI_RSSI_DONT_USE_UPPER |
2078 WSM_RCPI_RSSI_DONT_USE_LOWER;
2079 if (priv->cqm_use_rssi)
2080 threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI;
2081 }
2082 wsm_set_rcpi_rssi_threshold(priv, &threshold);
2083 }
2084 mutex_unlock(&priv->conf_mutex);
2085
2086 if (do_join) {
2087 wsm_lock_tx(priv);
2088 cw1200_do_join(priv); /* Will unlock it for us */
2089 }
2090}
2091
2092void cw1200_multicast_start_work(struct work_struct *work)
2093{
2094 struct cw1200_common *priv =
2095 container_of(work, struct cw1200_common, multicast_start_work);
2096 long tmo = priv->join_dtim_period *
2097 (priv->beacon_int + 20) * HZ / 1024;
2098
2099 cancel_work_sync(&priv->multicast_stop_work);
2100
2101 if (!priv->aid0_bit_set) {
2102 wsm_lock_tx(priv);
2103 cw1200_set_tim_impl(priv, true);
2104 priv->aid0_bit_set = true;
2105 mod_timer(&priv->mcast_timeout, jiffies + tmo);
2106 wsm_unlock_tx(priv);
2107 }
2108}
2109
2110void cw1200_multicast_stop_work(struct work_struct *work)
2111{
2112 struct cw1200_common *priv =
2113 container_of(work, struct cw1200_common, multicast_stop_work);
2114
2115 if (priv->aid0_bit_set) {
2116 del_timer_sync(&priv->mcast_timeout);
2117 wsm_lock_tx(priv);
2118 priv->aid0_bit_set = false;
2119 cw1200_set_tim_impl(priv, false);
2120 wsm_unlock_tx(priv);
2121 }
2122}
2123
2124void cw1200_mcast_timeout(unsigned long arg)
2125{
2126 struct cw1200_common *priv =
2127 (struct cw1200_common *)arg;
2128
2129 wiphy_warn(priv->hw->wiphy,
2130 "Multicast delivery timeout.\n");
2131 spin_lock_bh(&priv->ps_state_lock);
2132 priv->tx_multicast = priv->aid0_bit_set &&
2133 priv->buffered_multicasts;
2134 if (priv->tx_multicast)
2135 cw1200_bh_wakeup(priv);
2136 spin_unlock_bh(&priv->ps_state_lock);
2137}
2138
2139int cw1200_ampdu_action(struct ieee80211_hw *hw,
2140 struct ieee80211_vif *vif,
2141 enum ieee80211_ampdu_mlme_action action,
2142 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2143 u8 buf_size)
2144{
2145 /* Aggregation is implemented fully in firmware,
2146 * including block ack negotiation. Do not allow
2147 * mac80211 stack to do anything: it interferes with
2148 * the firmware.
2149 */
2150
2151 /* Note that we still need this function stubbed. */
2152 return -ENOTSUPP;
2153}
2154
2155/* ******************************************************************** */
2156/* WSM callback */
2157void cw1200_suspend_resume(struct cw1200_common *priv,
2158 struct wsm_suspend_resume *arg)
2159{
2160 pr_debug("[AP] %s: %s\n",
2161 arg->stop ? "stop" : "start",
2162 arg->multicast ? "broadcast" : "unicast");
2163
2164 if (arg->multicast) {
2165 bool cancel_tmo = false;
2166 spin_lock_bh(&priv->ps_state_lock);
2167 if (arg->stop) {
2168 priv->tx_multicast = false;
2169 } else {
2170 /* Firmware sends this indication every DTIM if there
2171 * is a STA in powersave connected. There is no reason
2172 * to suspend, following wakeup will consume much more
2173 * power than it could be saved.
2174 */
2175 cw1200_pm_stay_awake(&priv->pm_state,
2176 priv->join_dtim_period *
2177 (priv->beacon_int + 20) * HZ / 1024);
2178 priv->tx_multicast = (priv->aid0_bit_set &&
2179 priv->buffered_multicasts);
2180 if (priv->tx_multicast) {
2181 cancel_tmo = true;
2182 cw1200_bh_wakeup(priv);
2183 }
2184 }
2185 spin_unlock_bh(&priv->ps_state_lock);
2186 if (cancel_tmo)
2187 del_timer_sync(&priv->mcast_timeout);
2188 } else {
2189 spin_lock_bh(&priv->ps_state_lock);
2190 cw1200_ps_notify(priv, arg->link_id, arg->stop);
2191 spin_unlock_bh(&priv->ps_state_lock);
2192 if (!arg->stop)
2193 cw1200_bh_wakeup(priv);
2194 }
2195 return;
2196}
2197
2198/* ******************************************************************** */
2199/* AP privates */
2200
2201static int cw1200_upload_beacon(struct cw1200_common *priv)
2202{
2203 int ret = 0;
2204 struct ieee80211_mgmt *mgmt;
2205 struct wsm_template_frame frame = {
2206 .frame_type = WSM_FRAME_TYPE_BEACON,
2207 };
2208
2209 u16 tim_offset;
2210 u16 tim_len;
2211
2212 if (priv->mode == NL80211_IFTYPE_STATION ||
2213 priv->mode == NL80211_IFTYPE_MONITOR ||
2214 priv->mode == NL80211_IFTYPE_UNSPECIFIED)
2215 goto done;
2216
2217 if (priv->vif->p2p)
2218 frame.rate = WSM_TRANSMIT_RATE_6;
2219
2220 frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
2221 &tim_offset, &tim_len);
2222 if (!frame.skb)
2223 return -ENOMEM;
2224
2225 ret = wsm_set_template_frame(priv, &frame);
2226
2227 if (ret)
2228 goto done;
2229
2230 /* TODO: Distill probe resp; remove TIM
2231 * and any other beacon-specific IEs
2232 */
2233 mgmt = (void *)frame.skb->data;
2234 mgmt->frame_control =
2235 __cpu_to_le16(IEEE80211_FTYPE_MGMT |
2236 IEEE80211_STYPE_PROBE_RESP);
2237
2238 frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE;
2239 if (priv->vif->p2p) {
2240 ret = wsm_set_probe_responder(priv, true);
2241 } else {
2242 ret = wsm_set_template_frame(priv, &frame);
2243 wsm_set_probe_responder(priv, false);
2244 }
2245
2246done:
2247 dev_kfree_skb(frame.skb);
2248
2249 return ret;
2250}
2251
2252static int cw1200_upload_pspoll(struct cw1200_common *priv)
2253{
2254 int ret = 0;
2255 struct wsm_template_frame frame = {
2256 .frame_type = WSM_FRAME_TYPE_PS_POLL,
2257 .rate = 0xFF,
2258 };
2259
2260
2261 frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif);
2262 if (!frame.skb)
2263 return -ENOMEM;
2264
2265 ret = wsm_set_template_frame(priv, &frame);
2266
2267 dev_kfree_skb(frame.skb);
2268
2269 return ret;
2270}
2271
2272static int cw1200_upload_null(struct cw1200_common *priv)
2273{
2274 int ret = 0;
2275 struct wsm_template_frame frame = {
2276 .frame_type = WSM_FRAME_TYPE_NULL,
2277 .rate = 0xFF,
2278 };
2279
2280 frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
2281 if (!frame.skb)
2282 return -ENOMEM;
2283
2284 ret = wsm_set_template_frame(priv, &frame);
2285
2286 dev_kfree_skb(frame.skb);
2287
2288 return ret;
2289}
2290
2291static int cw1200_upload_qosnull(struct cw1200_common *priv)
2292{
2293 int ret = 0;
2294 /* TODO: This needs to be implemented
2295
2296 struct wsm_template_frame frame = {
2297 .frame_type = WSM_FRAME_TYPE_QOS_NULL,
2298 .rate = 0xFF,
2299 };
2300
2301 frame.skb = ieee80211_qosnullfunc_get(priv->hw, priv->vif);
2302 if (!frame.skb)
2303 return -ENOMEM;
2304
2305 ret = wsm_set_template_frame(priv, &frame);
2306
2307 dev_kfree_skb(frame.skb);
2308
2309 */
2310 return ret;
2311}
2312
2313static int cw1200_enable_beaconing(struct cw1200_common *priv,
2314 bool enable)
2315{
2316 struct wsm_beacon_transmit transmit = {
2317 .enable_beaconing = enable,
2318 };
2319
2320 return wsm_beacon_transmit(priv, &transmit);
2321}
2322
2323static int cw1200_start_ap(struct cw1200_common *priv)
2324{
2325 int ret;
2326 struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
2327 struct wsm_start start = {
2328 .mode = priv->vif->p2p ?
2329 WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
2330 .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
2331 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
2332 .channel_number = priv->channel->hw_value,
2333 .beacon_interval = conf->beacon_int,
2334 .dtim_period = conf->dtim_period,
2335 .preamble = conf->use_short_preamble ?
2336 WSM_JOIN_PREAMBLE_SHORT :
2337 WSM_JOIN_PREAMBLE_LONG,
2338 .probe_delay = 100,
2339 .basic_rate_set = cw1200_rate_mask_to_wsm(priv,
2340 conf->basic_rates),
2341 };
2342 struct wsm_operational_mode mode = {
2343 .power_mode = cw1200_power_mode,
2344 .disable_more_flag_usage = true,
2345 };
2346
2347 memset(start.ssid, 0, sizeof(start.ssid));
2348 if (!conf->hidden_ssid) {
2349 start.ssid_len = conf->ssid_len;
2350 memcpy(start.ssid, conf->ssid, start.ssid_len);
2351 }
2352
2353 priv->beacon_int = conf->beacon_int;
2354 priv->join_dtim_period = conf->dtim_period;
2355
2356 memset(&priv->link_id_db, 0, sizeof(priv->link_id_db));
2357
2358 pr_debug("[AP] ch: %d(%d), bcn: %d(%d), brt: 0x%.8X, ssid: %.*s.\n",
2359 start.channel_number, start.band,
2360 start.beacon_interval, start.dtim_period,
2361 start.basic_rate_set,
2362 start.ssid_len, start.ssid);
2363 ret = wsm_start(priv, &start);
2364 if (!ret)
2365 ret = cw1200_upload_keys(priv);
2366 if (!ret && priv->vif->p2p) {
2367 pr_debug("[AP] Setting p2p powersave configuration.\n");
2368 wsm_set_p2p_ps_modeinfo(priv, &priv->p2p_ps_modeinfo);
2369 }
2370 if (!ret) {
2371 wsm_set_block_ack_policy(priv, 0, 0);
2372 priv->join_status = CW1200_JOIN_STATUS_AP;
2373 cw1200_update_filtering(priv);
2374 }
2375 wsm_set_operational_mode(priv, &mode);
2376 return ret;
2377}
2378
2379static int cw1200_update_beaconing(struct cw1200_common *priv)
2380{
2381 struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
2382 struct wsm_reset reset = {
2383 .link_id = 0,
2384 .reset_statistics = true,
2385 };
2386
2387 if (priv->mode == NL80211_IFTYPE_AP) {
2388 /* TODO: check if changed channel, band */
2389 if (priv->join_status != CW1200_JOIN_STATUS_AP ||
2390 priv->beacon_int != conf->beacon_int) {
2391 pr_debug("ap restarting\n");
2392 wsm_lock_tx(priv);
2393 if (priv->join_status != CW1200_JOIN_STATUS_PASSIVE)
2394 wsm_reset(priv, &reset);
2395 priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
2396 cw1200_start_ap(priv);
2397 wsm_unlock_tx(priv);
2398 } else
2399 pr_debug("ap started join_status: %d\n",
2400 priv->join_status);
2401 }
2402 return 0;
2403}
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
new file mode 100644
index 000000000000..35babb62cc6a
--- /dev/null
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -0,0 +1,123 @@
1/*
2 * Mac80211 STA interface for ST-Ericsson CW1200 mac80211 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef STA_H_INCLUDED
13#define STA_H_INCLUDED
14
15/* ******************************************************************** */
16/* mac80211 API */
17
18int cw1200_start(struct ieee80211_hw *dev);
19void cw1200_stop(struct ieee80211_hw *dev);
20int cw1200_add_interface(struct ieee80211_hw *dev,
21 struct ieee80211_vif *vif);
22void cw1200_remove_interface(struct ieee80211_hw *dev,
23 struct ieee80211_vif *vif);
24int cw1200_change_interface(struct ieee80211_hw *dev,
25 struct ieee80211_vif *vif,
26 enum nl80211_iftype new_type,
27 bool p2p);
28int cw1200_config(struct ieee80211_hw *dev, u32 changed);
29void cw1200_configure_filter(struct ieee80211_hw *dev,
30 unsigned int changed_flags,
31 unsigned int *total_flags,
32 u64 multicast);
33int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
34 u16 queue, const struct ieee80211_tx_queue_params *params);
35int cw1200_get_stats(struct ieee80211_hw *dev,
36 struct ieee80211_low_level_stats *stats);
37int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
38 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
39 struct ieee80211_key_conf *key);
40
41int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
42
43void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
44
45u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
46 struct netdev_hw_addr_list *mc_list);
47
48int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg);
49
50/* ******************************************************************** */
51/* WSM callbacks */
52
53void cw1200_join_complete_cb(struct cw1200_common *priv,
54 struct wsm_join_complete *arg);
55
56/* ******************************************************************** */
57/* WSM events */
58
59void cw1200_free_event_queue(struct cw1200_common *priv);
60void cw1200_event_handler(struct work_struct *work);
61void cw1200_bss_loss_work(struct work_struct *work);
62void cw1200_bss_params_work(struct work_struct *work);
63void cw1200_keep_alive_work(struct work_struct *work);
64void cw1200_tx_failure_work(struct work_struct *work);
65
66void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, int init, int good,
67 int bad);
68static inline void cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
69 int init, int good, int bad)
70{
71 spin_lock(&priv->bss_loss_lock);
72 __cw1200_cqm_bssloss_sm(priv, init, good, bad);
73 spin_unlock(&priv->bss_loss_lock);
74}
75
76/* ******************************************************************** */
77/* Internal API */
78
79int cw1200_setup_mac(struct cw1200_common *priv);
80void cw1200_join_timeout(struct work_struct *work);
81void cw1200_unjoin_work(struct work_struct *work);
82void cw1200_join_complete_work(struct work_struct *work);
83void cw1200_wep_key_work(struct work_struct *work);
84void cw1200_update_listening(struct cw1200_common *priv, bool enabled);
85void cw1200_update_filtering(struct cw1200_common *priv);
86void cw1200_update_filtering_work(struct work_struct *work);
87void cw1200_set_beacon_wakeup_period_work(struct work_struct *work);
88int cw1200_enable_listening(struct cw1200_common *priv);
89int cw1200_disable_listening(struct cw1200_common *priv);
90int cw1200_set_uapsd_param(struct cw1200_common *priv,
91 const struct wsm_edca_params *arg);
92void cw1200_ba_work(struct work_struct *work);
93void cw1200_ba_timer(unsigned long arg);
94
95/* AP stuffs */
96int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
97 bool set);
98int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
99 struct ieee80211_sta *sta);
100int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
101 struct ieee80211_sta *sta);
102void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
103 enum sta_notify_cmd notify_cmd,
104 struct ieee80211_sta *sta);
105void cw1200_bss_info_changed(struct ieee80211_hw *dev,
106 struct ieee80211_vif *vif,
107 struct ieee80211_bss_conf *info,
108 u32 changed);
109int cw1200_ampdu_action(struct ieee80211_hw *hw,
110 struct ieee80211_vif *vif,
111 enum ieee80211_ampdu_mlme_action action,
112 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
113 u8 buf_size);
114
115void cw1200_suspend_resume(struct cw1200_common *priv,
116 struct wsm_suspend_resume *arg);
117void cw1200_set_tim_work(struct work_struct *work);
118void cw1200_set_cts_work(struct work_struct *work);
119void cw1200_multicast_start_work(struct work_struct *work);
120void cw1200_multicast_stop_work(struct work_struct *work);
121void cw1200_mcast_timeout(unsigned long arg);
122
123#endif
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
new file mode 100644
index 000000000000..5862c373d714
--- /dev/null
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -0,0 +1,1473 @@
1/*
2 * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <net/mac80211.h>
13#include <linux/etherdevice.h>
14#include <linux/skbuff.h>
15
16#include "cw1200.h"
17#include "wsm.h"
18#include "bh.h"
19#include "sta.h"
20#include "debug.h"
21
22#define CW1200_INVALID_RATE_ID (0xFF)
23
24static int cw1200_handle_action_rx(struct cw1200_common *priv,
25 struct sk_buff *skb);
26static const struct ieee80211_rate *
27cw1200_get_tx_rate(const struct cw1200_common *priv,
28 const struct ieee80211_tx_rate *rate);
29
30/* ******************************************************************** */
31/* TX queue lock / unlock */
32
33static inline void cw1200_tx_queues_lock(struct cw1200_common *priv)
34{
35 int i;
36 for (i = 0; i < 4; ++i)
37 cw1200_queue_lock(&priv->tx_queue[i]);
38}
39
40static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv)
41{
42 int i;
43 for (i = 0; i < 4; ++i)
44 cw1200_queue_unlock(&priv->tx_queue[i]);
45}
46
47/* ******************************************************************** */
48/* TX policy cache implementation */
49
50static void tx_policy_dump(struct tx_policy *policy)
51{
52 pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n",
53 policy->raw[0] & 0x0F, policy->raw[0] >> 4,
54 policy->raw[1] & 0x0F, policy->raw[1] >> 4,
55 policy->raw[2] & 0x0F, policy->raw[2] >> 4,
56 policy->raw[3] & 0x0F, policy->raw[3] >> 4,
57 policy->raw[4] & 0x0F, policy->raw[4] >> 4,
58 policy->raw[5] & 0x0F, policy->raw[5] >> 4,
59 policy->raw[6] & 0x0F, policy->raw[6] >> 4,
60 policy->raw[7] & 0x0F, policy->raw[7] >> 4,
61 policy->raw[8] & 0x0F, policy->raw[8] >> 4,
62 policy->raw[9] & 0x0F, policy->raw[9] >> 4,
63 policy->raw[10] & 0x0F, policy->raw[10] >> 4,
64 policy->raw[11] & 0x0F, policy->raw[11] >> 4,
65 policy->defined);
66}
67
68static void tx_policy_build(const struct cw1200_common *priv,
69 /* [out] */ struct tx_policy *policy,
70 struct ieee80211_tx_rate *rates, size_t count)
71{
72 int i, j;
73 unsigned limit = priv->short_frame_max_tx_count;
74 unsigned total = 0;
75 BUG_ON(rates[0].idx < 0);
76 memset(policy, 0, sizeof(*policy));
77
78 /* Sort rates in descending order. */
79 for (i = 1; i < count; ++i) {
80 if (rates[i].idx < 0) {
81 count = i;
82 break;
83 }
84 if (rates[i].idx > rates[i - 1].idx) {
85 struct ieee80211_tx_rate tmp = rates[i - 1];
86 rates[i - 1] = rates[i];
87 rates[i] = tmp;
88 }
89 }
90
91 /* Eliminate duplicates. */
92 total = rates[0].count;
93 for (i = 0, j = 1; j < count; ++j) {
94 if (rates[j].idx == rates[i].idx) {
95 rates[i].count += rates[j].count;
96 } else if (rates[j].idx > rates[i].idx) {
97 break;
98 } else {
99 ++i;
100 if (i != j)
101 rates[i] = rates[j];
102 }
103 total += rates[j].count;
104 }
105 count = i + 1;
106
107 /* Re-fill policy trying to keep every requested rate and with
108 * respect to the global max tx retransmission count.
109 */
110 if (limit < count)
111 limit = count;
112 if (total > limit) {
113 for (i = 0; i < count; ++i) {
114 int left = count - i - 1;
115 if (rates[i].count > limit - left)
116 rates[i].count = limit - left;
117 limit -= rates[i].count;
118 }
119 }
120
121 /* HACK!!! Device has problems (at least) switching from
122 * 54Mbps CTS to 1Mbps. This switch takes enormous amount
123 * of time (100-200 ms), leading to valuable throughput drop.
124 * As a workaround, additional g-rates are injected to the
125 * policy.
126 */
127 if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
128 rates[0].idx > 4 && rates[0].count > 2 &&
129 rates[1].idx < 2) {
130 int mid_rate = (rates[0].idx + 4) >> 1;
131
132 /* Decrease number of retries for the initial rate */
133 rates[0].count -= 2;
134
135 if (mid_rate != 4) {
136 /* Keep fallback rate at 1Mbps. */
137 rates[3] = rates[1];
138
139 /* Inject 1 transmission on lowest g-rate */
140 rates[2].idx = 4;
141 rates[2].count = 1;
142 rates[2].flags = rates[1].flags;
143
144 /* Inject 1 transmission on mid-rate */
145 rates[1].idx = mid_rate;
146 rates[1].count = 1;
147
148 /* Fallback to 1 Mbps is a really bad thing,
149 * so let's try to increase probability of
150 * successful transmission on the lowest g rate
151 * even more
152 */
153 if (rates[0].count >= 3) {
154 --rates[0].count;
155 ++rates[2].count;
156 }
157
158 /* Adjust amount of rates defined */
159 count += 2;
160 } else {
161 /* Keep fallback rate at 1Mbps. */
162 rates[2] = rates[1];
163
164 /* Inject 2 transmissions on lowest g-rate */
165 rates[1].idx = 4;
166 rates[1].count = 2;
167
168 /* Adjust amount of rates defined */
169 count += 1;
170 }
171 }
172
173 policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1;
174
175 for (i = 0; i < count; ++i) {
176 register unsigned rateid, off, shift, retries;
177
178 rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value;
179 off = rateid >> 3; /* eq. rateid / 8 */
180 shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */
181
182 retries = rates[i].count;
183 if (retries > 0x0F) {
184 rates[i].count = 0x0f;
185 retries = 0x0F;
186 }
187 policy->tbl[off] |= __cpu_to_le32(retries << shift);
188 policy->retry_count += retries;
189 }
190
191 pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d\n",
192 count,
193 rates[0].idx, rates[0].count,
194 rates[1].idx, rates[1].count,
195 rates[2].idx, rates[2].count,
196 rates[3].idx, rates[3].count);
197}
198
199static inline bool tx_policy_is_equal(const struct tx_policy *wanted,
200 const struct tx_policy *cached)
201{
202 size_t count = wanted->defined >> 1;
203 if (wanted->defined > cached->defined)
204 return false;
205 if (count) {
206 if (memcmp(wanted->raw, cached->raw, count))
207 return false;
208 }
209 if (wanted->defined & 1) {
210 if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F))
211 return false;
212 }
213 return true;
214}
215
216static int tx_policy_find(struct tx_policy_cache *cache,
217 const struct tx_policy *wanted)
218{
219 /* O(n) complexity. Not so good, but there's only 8 entries in
220 * the cache.
221 * Also lru helps to reduce search time.
222 */
223 struct tx_policy_cache_entry *it;
224 /* First search for policy in "used" list */
225 list_for_each_entry(it, &cache->used, link) {
226 if (tx_policy_is_equal(wanted, &it->policy))
227 return it - cache->cache;
228 }
229 /* Then - in "free list" */
230 list_for_each_entry(it, &cache->free, link) {
231 if (tx_policy_is_equal(wanted, &it->policy))
232 return it - cache->cache;
233 }
234 return -1;
235}
236
237static inline void tx_policy_use(struct tx_policy_cache *cache,
238 struct tx_policy_cache_entry *entry)
239{
240 ++entry->policy.usage_count;
241 list_move(&entry->link, &cache->used);
242}
243
244static inline int tx_policy_release(struct tx_policy_cache *cache,
245 struct tx_policy_cache_entry *entry)
246{
247 int ret = --entry->policy.usage_count;
248 if (!ret)
249 list_move(&entry->link, &cache->free);
250 return ret;
251}
252
253void tx_policy_clean(struct cw1200_common *priv)
254{
255 int idx, locked;
256 struct tx_policy_cache *cache = &priv->tx_policy_cache;
257 struct tx_policy_cache_entry *entry;
258
259 cw1200_tx_queues_lock(priv);
260 spin_lock_bh(&cache->lock);
261 locked = list_empty(&cache->free);
262
263 for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) {
264 entry = &cache->cache[idx];
265 /* Policy usage count should be 0 at this time as all queues
266 should be empty
267 */
268 if (WARN_ON(entry->policy.usage_count)) {
269 entry->policy.usage_count = 0;
270 list_move(&entry->link, &cache->free);
271 }
272 memset(&entry->policy, 0, sizeof(entry->policy));
273 }
274 if (locked)
275 cw1200_tx_queues_unlock(priv);
276
277 cw1200_tx_queues_unlock(priv);
278 spin_unlock_bh(&cache->lock);
279}
280
281/* ******************************************************************** */
282/* External TX policy cache API */
283
284void tx_policy_init(struct cw1200_common *priv)
285{
286 struct tx_policy_cache *cache = &priv->tx_policy_cache;
287 int i;
288
289 memset(cache, 0, sizeof(*cache));
290
291 spin_lock_init(&cache->lock);
292 INIT_LIST_HEAD(&cache->used);
293 INIT_LIST_HEAD(&cache->free);
294
295 for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i)
296 list_add(&cache->cache[i].link, &cache->free);
297}
298
299static int tx_policy_get(struct cw1200_common *priv,
300 struct ieee80211_tx_rate *rates,
301 size_t count, bool *renew)
302{
303 int idx;
304 struct tx_policy_cache *cache = &priv->tx_policy_cache;
305 struct tx_policy wanted;
306
307 tx_policy_build(priv, &wanted, rates, count);
308
309 spin_lock_bh(&cache->lock);
310 if (WARN_ON_ONCE(list_empty(&cache->free))) {
311 spin_unlock_bh(&cache->lock);
312 return CW1200_INVALID_RATE_ID;
313 }
314 idx = tx_policy_find(cache, &wanted);
315 if (idx >= 0) {
316 pr_debug("[TX policy] Used TX policy: %d\n", idx);
317 *renew = false;
318 } else {
319 struct tx_policy_cache_entry *entry;
320 *renew = true;
321 /* If policy is not found create a new one
322 * using the oldest entry in "free" list
323 */
324 entry = list_entry(cache->free.prev,
325 struct tx_policy_cache_entry, link);
326 entry->policy = wanted;
327 idx = entry - cache->cache;
328 pr_debug("[TX policy] New TX policy: %d\n", idx);
329 tx_policy_dump(&entry->policy);
330 }
331 tx_policy_use(cache, &cache->cache[idx]);
332 if (list_empty(&cache->free)) {
333 /* Lock TX queues. */
334 cw1200_tx_queues_lock(priv);
335 }
336 spin_unlock_bh(&cache->lock);
337 return idx;
338}
339
340static void tx_policy_put(struct cw1200_common *priv, int idx)
341{
342 int usage, locked;
343 struct tx_policy_cache *cache = &priv->tx_policy_cache;
344
345 spin_lock_bh(&cache->lock);
346 locked = list_empty(&cache->free);
347 usage = tx_policy_release(cache, &cache->cache[idx]);
348 if (locked && !usage) {
349 /* Unlock TX queues. */
350 cw1200_tx_queues_unlock(priv);
351 }
352 spin_unlock_bh(&cache->lock);
353}
354
355static int tx_policy_upload(struct cw1200_common *priv)
356{
357 struct tx_policy_cache *cache = &priv->tx_policy_cache;
358 int i;
359 struct wsm_set_tx_rate_retry_policy arg = {
360 .num = 0,
361 };
362 spin_lock_bh(&cache->lock);
363
364 /* Upload only modified entries. */
365 for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) {
366 struct tx_policy *src = &cache->cache[i].policy;
367 if (src->retry_count && !src->uploaded) {
368 struct wsm_tx_rate_retry_policy *dst =
369 &arg.tbl[arg.num];
370 dst->index = i;
371 dst->short_retries = priv->short_frame_max_tx_count;
372 dst->long_retries = priv->long_frame_max_tx_count;
373
374 dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED |
375 WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT;
376 memcpy(dst->rate_count_indices, src->tbl,
377 sizeof(dst->rate_count_indices));
378 src->uploaded = 1;
379 ++arg.num;
380 }
381 }
382 spin_unlock_bh(&cache->lock);
383 cw1200_debug_tx_cache_miss(priv);
384 pr_debug("[TX policy] Upload %d policies\n", arg.num);
385 return wsm_set_tx_rate_retry_policy(priv, &arg);
386}
387
388void tx_policy_upload_work(struct work_struct *work)
389{
390 struct cw1200_common *priv =
391 container_of(work, struct cw1200_common, tx_policy_upload_work);
392
393 pr_debug("[TX] TX policy upload.\n");
394 tx_policy_upload(priv);
395
396 wsm_unlock_tx(priv);
397 cw1200_tx_queues_unlock(priv);
398}
399
400/* ******************************************************************** */
401/* cw1200 TX implementation */
402
403struct cw1200_txinfo {
404 struct sk_buff *skb;
405 unsigned queue;
406 struct ieee80211_tx_info *tx_info;
407 const struct ieee80211_rate *rate;
408 struct ieee80211_hdr *hdr;
409 size_t hdrlen;
410 const u8 *da;
411 struct cw1200_sta_priv *sta_priv;
412 struct ieee80211_sta *sta;
413 struct cw1200_txpriv txpriv;
414};
415
416u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates)
417{
418 u32 ret = 0;
419 int i;
420 for (i = 0; i < 32; ++i) {
421 if (rates & BIT(i))
422 ret |= BIT(priv->rates[i].hw_value);
423 }
424 return ret;
425}
426
427static const struct ieee80211_rate *
428cw1200_get_tx_rate(const struct cw1200_common *priv,
429 const struct ieee80211_tx_rate *rate)
430{
431 if (rate->idx < 0)
432 return NULL;
433 if (rate->flags & IEEE80211_TX_RC_MCS)
434 return &priv->mcs_rates[rate->idx];
435 return &priv->hw->wiphy->bands[priv->channel->band]->
436 bitrates[rate->idx];
437}
438
439static int
440cw1200_tx_h_calc_link_ids(struct cw1200_common *priv,
441 struct cw1200_txinfo *t)
442{
443 if (t->sta && t->sta_priv->link_id)
444 t->txpriv.raw_link_id =
445 t->txpriv.link_id =
446 t->sta_priv->link_id;
447 else if (priv->mode != NL80211_IFTYPE_AP)
448 t->txpriv.raw_link_id =
449 t->txpriv.link_id = 0;
450 else if (is_multicast_ether_addr(t->da)) {
451 if (priv->enable_beacon) {
452 t->txpriv.raw_link_id = 0;
453 t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM;
454 } else {
455 t->txpriv.raw_link_id = 0;
456 t->txpriv.link_id = 0;
457 }
458 } else {
459 t->txpriv.link_id = cw1200_find_link_id(priv, t->da);
460 if (!t->txpriv.link_id)
461 t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da);
462 if (!t->txpriv.link_id) {
463 wiphy_err(priv->hw->wiphy,
464 "No more link IDs available.\n");
465 return -ENOENT;
466 }
467 t->txpriv.raw_link_id = t->txpriv.link_id;
468 }
469 if (t->txpriv.raw_link_id)
470 priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp =
471 jiffies;
472 if (t->sta && (t->sta->uapsd_queues & BIT(t->queue)))
473 t->txpriv.link_id = CW1200_LINK_ID_UAPSD;
474 return 0;
475}
476
477static void
478cw1200_tx_h_pm(struct cw1200_common *priv,
479 struct cw1200_txinfo *t)
480{
481 if (ieee80211_is_auth(t->hdr->frame_control)) {
482 u32 mask = ~BIT(t->txpriv.raw_link_id);
483 spin_lock_bh(&priv->ps_state_lock);
484 priv->sta_asleep_mask &= mask;
485 priv->pspoll_mask &= mask;
486 spin_unlock_bh(&priv->ps_state_lock);
487 }
488}
489
490static void
491cw1200_tx_h_calc_tid(struct cw1200_common *priv,
492 struct cw1200_txinfo *t)
493{
494 if (ieee80211_is_data_qos(t->hdr->frame_control)) {
495 u8 *qos = ieee80211_get_qos_ctl(t->hdr);
496 t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK;
497 } else if (ieee80211_is_data(t->hdr->frame_control)) {
498 t->txpriv.tid = 0;
499 }
500}
501
502static int
503cw1200_tx_h_crypt(struct cw1200_common *priv,
504 struct cw1200_txinfo *t)
505{
506 if (!t->tx_info->control.hw_key ||
507 !ieee80211_has_protected(t->hdr->frame_control))
508 return 0;
509
510 t->hdrlen += t->tx_info->control.hw_key->iv_len;
511 skb_put(t->skb, t->tx_info->control.hw_key->icv_len);
512
513 if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
514 skb_put(t->skb, 8); /* MIC space */
515
516 return 0;
517}
518
519static int
520cw1200_tx_h_align(struct cw1200_common *priv,
521 struct cw1200_txinfo *t,
522 u8 *flags)
523{
524 size_t offset = (size_t)t->skb->data & 3;
525
526 if (!offset)
527 return 0;
528
529 if (offset & 1) {
530 wiphy_err(priv->hw->wiphy,
531 "Bug: attempt to transmit a frame with wrong alignment: %zu\n",
532 offset);
533 return -EINVAL;
534 }
535
536 if (skb_headroom(t->skb) < offset) {
537 wiphy_err(priv->hw->wiphy,
538 "Bug: no space allocated for DMA alignment. headroom: %d\n",
539 skb_headroom(t->skb));
540 return -ENOMEM;
541 }
542 skb_push(t->skb, offset);
543 t->hdrlen += offset;
544 t->txpriv.offset += offset;
545 *flags |= WSM_TX_2BYTES_SHIFT;
546 cw1200_debug_tx_align(priv);
547 return 0;
548}
549
550static int
551cw1200_tx_h_action(struct cw1200_common *priv,
552 struct cw1200_txinfo *t)
553{
554 struct ieee80211_mgmt *mgmt =
555 (struct ieee80211_mgmt *)t->hdr;
556 if (ieee80211_is_action(t->hdr->frame_control) &&
557 mgmt->u.action.category == WLAN_CATEGORY_BACK)
558 return 1;
559 else
560 return 0;
561}
562
563/* Add WSM header */
564static struct wsm_tx *
565cw1200_tx_h_wsm(struct cw1200_common *priv,
566 struct cw1200_txinfo *t)
567{
568 struct wsm_tx *wsm;
569
570 if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
571 wiphy_err(priv->hw->wiphy,
572 "Bug: no space allocated for WSM header. headroom: %d\n",
573 skb_headroom(t->skb));
574 return NULL;
575 }
576
577 wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx));
578 t->txpriv.offset += sizeof(struct wsm_tx);
579 memset(wsm, 0, sizeof(*wsm));
580 wsm->hdr.len = __cpu_to_le16(t->skb->len);
581 wsm->hdr.id = __cpu_to_le16(0x0004);
582 wsm->queue_id = wsm_queue_id_to_wsm(t->queue);
583 return wsm;
584}
585
586/* BT Coex specific handling */
587static void
588cw1200_tx_h_bt(struct cw1200_common *priv,
589 struct cw1200_txinfo *t,
590 struct wsm_tx *wsm)
591{
592 u8 priority = 0;
593
594 if (!priv->bt_present)
595 return;
596
597 if (ieee80211_is_nullfunc(t->hdr->frame_control)) {
598 priority = WSM_EPTA_PRIORITY_MGT;
599 } else if (ieee80211_is_data(t->hdr->frame_control)) {
600 /* Skip LLC SNAP header (+6) */
601 u8 *payload = &t->skb->data[t->hdrlen];
602 __be16 *ethertype = (__be16 *)&payload[6];
603 if (be16_to_cpu(*ethertype) == ETH_P_PAE)
604 priority = WSM_EPTA_PRIORITY_EAPOL;
605 } else if (ieee80211_is_assoc_req(t->hdr->frame_control) ||
606 ieee80211_is_reassoc_req(t->hdr->frame_control)) {
607 struct ieee80211_mgmt *mgt_frame =
608 (struct ieee80211_mgmt *)t->hdr;
609
610 if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) <
611 priv->listen_interval) {
612 pr_debug("Modified Listen Interval to %d from %d\n",
613 priv->listen_interval,
614 mgt_frame->u.assoc_req.listen_interval);
615 /* Replace listen interval derieved from
616 * the one read from SDD
617 */
618 mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval);
619 }
620 }
621
622 if (!priority) {
623 if (ieee80211_is_action(t->hdr->frame_control))
624 priority = WSM_EPTA_PRIORITY_ACTION;
625 else if (ieee80211_is_mgmt(t->hdr->frame_control))
626 priority = WSM_EPTA_PRIORITY_MGT;
627 else if ((wsm->queue_id == WSM_QUEUE_VOICE))
628 priority = WSM_EPTA_PRIORITY_VOICE;
629 else if ((wsm->queue_id == WSM_QUEUE_VIDEO))
630 priority = WSM_EPTA_PRIORITY_VIDEO;
631 else
632 priority = WSM_EPTA_PRIORITY_DATA;
633 }
634
635 pr_debug("[TX] EPTA priority %d.\n", priority);
636
637 wsm->flags |= priority << 1;
638}
639
640static int
641cw1200_tx_h_rate_policy(struct cw1200_common *priv,
642 struct cw1200_txinfo *t,
643 struct wsm_tx *wsm)
644{
645 bool tx_policy_renew = false;
646
647 t->txpriv.rate_id = tx_policy_get(priv,
648 t->tx_info->control.rates, IEEE80211_TX_MAX_RATES,
649 &tx_policy_renew);
650 if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID)
651 return -EFAULT;
652
653 wsm->flags |= t->txpriv.rate_id << 4;
654
655 t->rate = cw1200_get_tx_rate(priv,
656 &t->tx_info->control.rates[0]),
657 wsm->max_tx_rate = t->rate->hw_value;
658 if (t->rate->flags & IEEE80211_TX_RC_MCS) {
659 if (cw1200_ht_greenfield(&priv->ht_info))
660 wsm->ht_tx_parameters |=
661 __cpu_to_le32(WSM_HT_TX_GREENFIELD);
662 else
663 wsm->ht_tx_parameters |=
664 __cpu_to_le32(WSM_HT_TX_MIXED);
665 }
666
667 if (tx_policy_renew) {
668 pr_debug("[TX] TX policy renew.\n");
669 /* It's not so optimal to stop TX queues every now and then.
670 * Better to reimplement task scheduling with
671 * a counter. TODO.
672 */
673 wsm_lock_tx_async(priv);
674 cw1200_tx_queues_lock(priv);
675 if (queue_work(priv->workqueue,
676 &priv->tx_policy_upload_work) <= 0) {
677 cw1200_tx_queues_unlock(priv);
678 wsm_unlock_tx(priv);
679 }
680 }
681 return 0;
682}
683
684static bool
685cw1200_tx_h_pm_state(struct cw1200_common *priv,
686 struct cw1200_txinfo *t)
687{
688 int was_buffered = 1;
689
690 if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM &&
691 !priv->buffered_multicasts) {
692 priv->buffered_multicasts = true;
693 if (priv->sta_asleep_mask)
694 queue_work(priv->workqueue,
695 &priv->multicast_start_work);
696 }
697
698 if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID)
699 was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++;
700
701 return !was_buffered;
702}
703
704/* ******************************************************************** */
705
706void cw1200_tx(struct ieee80211_hw *dev,
707 struct ieee80211_tx_control *control,
708 struct sk_buff *skb)
709{
710 struct cw1200_common *priv = dev->priv;
711 struct cw1200_txinfo t = {
712 .skb = skb,
713 .queue = skb_get_queue_mapping(skb),
714 .tx_info = IEEE80211_SKB_CB(skb),
715 .hdr = (struct ieee80211_hdr *)skb->data,
716 .txpriv.tid = CW1200_MAX_TID,
717 .txpriv.rate_id = CW1200_INVALID_RATE_ID,
718 };
719 struct ieee80211_sta *sta;
720 struct wsm_tx *wsm;
721 bool tid_update = 0;
722 u8 flags = 0;
723 int ret;
724
725 if (priv->bh_error)
726 goto drop;
727
728 t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control);
729 t.da = ieee80211_get_DA(t.hdr);
730 if (control) {
731 t.sta = control->sta;
732 t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv;
733 }
734
735 if (WARN_ON(t.queue >= 4))
736 goto drop;
737
738 ret = cw1200_tx_h_calc_link_ids(priv, &t);
739 if (ret)
740 goto drop;
741
742 pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n",
743 skb->len, t.queue, t.txpriv.link_id,
744 t.txpriv.raw_link_id);
745
746 cw1200_tx_h_pm(priv, &t);
747 cw1200_tx_h_calc_tid(priv, &t);
748 ret = cw1200_tx_h_crypt(priv, &t);
749 if (ret)
750 goto drop;
751 ret = cw1200_tx_h_align(priv, &t, &flags);
752 if (ret)
753 goto drop;
754 ret = cw1200_tx_h_action(priv, &t);
755 if (ret)
756 goto drop;
757 wsm = cw1200_tx_h_wsm(priv, &t);
758 if (!wsm) {
759 ret = -ENOMEM;
760 goto drop;
761 }
762 wsm->flags |= flags;
763 cw1200_tx_h_bt(priv, &t, wsm);
764 ret = cw1200_tx_h_rate_policy(priv, &t, wsm);
765 if (ret)
766 goto drop;
767
768 rcu_read_lock();
769 sta = rcu_dereference(t.sta);
770
771 spin_lock_bh(&priv->ps_state_lock);
772 {
773 tid_update = cw1200_tx_h_pm_state(priv, &t);
774 BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue],
775 t.skb, &t.txpriv));
776 }
777 spin_unlock_bh(&priv->ps_state_lock);
778
779 if (tid_update && sta)
780 ieee80211_sta_set_buffered(sta, t.txpriv.tid, true);
781
782 rcu_read_unlock();
783
784 cw1200_bh_wakeup(priv);
785
786 return;
787
788drop:
789 cw1200_skb_dtor(priv, skb, &t.txpriv);
790 return;
791}
792
793/* ******************************************************************** */
794
795static int cw1200_handle_action_rx(struct cw1200_common *priv,
796 struct sk_buff *skb)
797{
798 struct ieee80211_mgmt *mgmt = (void *)skb->data;
799
800 /* Filter block ACK negotiation: fully controlled by firmware */
801 if (mgmt->u.action.category == WLAN_CATEGORY_BACK)
802 return 1;
803
804 return 0;
805}
806
807static int cw1200_handle_pspoll(struct cw1200_common *priv,
808 struct sk_buff *skb)
809{
810 struct ieee80211_sta *sta;
811 struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
812 int link_id = 0;
813 u32 pspoll_mask = 0;
814 int drop = 1;
815 int i;
816
817 if (priv->join_status != CW1200_JOIN_STATUS_AP)
818 goto done;
819 if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN))
820 goto done;
821
822 rcu_read_lock();
823 sta = ieee80211_find_sta(priv->vif, pspoll->ta);
824 if (sta) {
825 struct cw1200_sta_priv *sta_priv;
826 sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv;
827 link_id = sta_priv->link_id;
828 pspoll_mask = BIT(sta_priv->link_id);
829 }
830 rcu_read_unlock();
831 if (!link_id)
832 goto done;
833
834 priv->pspoll_mask |= pspoll_mask;
835 drop = 0;
836
837 /* Do not report pspols if data for given link id is queued already. */
838 for (i = 0; i < 4; ++i) {
839 if (cw1200_queue_get_num_queued(&priv->tx_queue[i],
840 pspoll_mask)) {
841 cw1200_bh_wakeup(priv);
842 drop = 1;
843 break;
844 }
845 }
846 pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd");
847done:
848 return drop;
849}
850
851/* ******************************************************************** */
852
853void cw1200_tx_confirm_cb(struct cw1200_common *priv,
854 int link_id,
855 struct wsm_tx_confirm *arg)
856{
857 u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id);
858 struct cw1200_queue *queue = &priv->tx_queue[queue_id];
859 struct sk_buff *skb;
860 const struct cw1200_txpriv *txpriv;
861
862 pr_debug("[TX] TX confirm: %d, %d.\n",
863 arg->status, arg->ack_failures);
864
865 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
866 /* STA is stopped. */
867 return;
868 }
869
870 if (WARN_ON(queue_id >= 4))
871 return;
872
873 if (arg->status)
874 pr_debug("TX failed: %d.\n", arg->status);
875
876 if ((arg->status == WSM_REQUEUE) &&
877 (arg->flags & WSM_TX_STATUS_REQUEUE)) {
878 /* "Requeue" means "implicit suspend" */
879 struct wsm_suspend_resume suspend = {
880 .link_id = link_id,
881 .stop = 1,
882 .multicast = !link_id,
883 };
884 cw1200_suspend_resume(priv, &suspend);
885 wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n",
886 link_id,
887 cw1200_queue_get_generation(arg->packet_id) + 1,
888 priv->sta_asleep_mask);
889 cw1200_queue_requeue(queue, arg->packet_id);
890 spin_lock_bh(&priv->ps_state_lock);
891 if (!link_id) {
892 priv->buffered_multicasts = true;
893 if (priv->sta_asleep_mask) {
894 queue_work(priv->workqueue,
895 &priv->multicast_start_work);
896 }
897 }
898 spin_unlock_bh(&priv->ps_state_lock);
899 } else if (!cw1200_queue_get_skb(queue, arg->packet_id,
900 &skb, &txpriv)) {
901 struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb);
902 int tx_count = arg->ack_failures;
903 u8 ht_flags = 0;
904 int i;
905
906 if (cw1200_ht_greenfield(&priv->ht_info))
907 ht_flags |= IEEE80211_TX_RC_GREEN_FIELD;
908
909 spin_lock(&priv->bss_loss_lock);
910 if (priv->bss_loss_state &&
911 arg->packet_id == priv->bss_loss_confirm_id) {
912 if (arg->status) {
913 /* Recovery failed */
914 __cw1200_cqm_bssloss_sm(priv, 0, 0, 1);
915 } else {
916 /* Recovery succeeded */
917 __cw1200_cqm_bssloss_sm(priv, 0, 1, 0);
918 }
919 }
920 spin_unlock(&priv->bss_loss_lock);
921
922 if (!arg->status) {
923 tx->flags |= IEEE80211_TX_STAT_ACK;
924 ++tx_count;
925 cw1200_debug_txed(priv);
926 if (arg->flags & WSM_TX_STATUS_AGGREGATION) {
927 /* Do not report aggregation to mac80211:
928 * it confuses minstrel a lot.
929 */
930 /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */
931 cw1200_debug_txed_agg(priv);
932 }
933 } else {
934 if (tx_count)
935 ++tx_count;
936 }
937
938 for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
939 if (tx->status.rates[i].count >= tx_count) {
940 tx->status.rates[i].count = tx_count;
941 break;
942 }
943 tx_count -= tx->status.rates[i].count;
944 if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS)
945 tx->status.rates[i].flags |= ht_flags;
946 }
947
948 for (++i; i < IEEE80211_TX_MAX_RATES; ++i) {
949 tx->status.rates[i].count = 0;
950 tx->status.rates[i].idx = -1;
951 }
952
953 /* Pull off any crypto trailers that we added on */
954 if (tx->control.hw_key) {
955 skb_trim(skb, skb->len - tx->control.hw_key->icv_len);
956 if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
957 skb_trim(skb, skb->len - 8); /* MIC space */
958 }
959 cw1200_queue_remove(queue, arg->packet_id);
960 }
961 /* XXX TODO: Only wake if there are pending transmits.. */
962 cw1200_bh_wakeup(priv);
963}
964
965static void cw1200_notify_buffered_tx(struct cw1200_common *priv,
966 struct sk_buff *skb, int link_id, int tid)
967{
968 struct ieee80211_sta *sta;
969 struct ieee80211_hdr *hdr;
970 u8 *buffered;
971 u8 still_buffered = 0;
972
973 if (link_id && tid < CW1200_MAX_TID) {
974 buffered = priv->link_id_db
975 [link_id - 1].buffered;
976
977 spin_lock_bh(&priv->ps_state_lock);
978 if (!WARN_ON(!buffered[tid]))
979 still_buffered = --buffered[tid];
980 spin_unlock_bh(&priv->ps_state_lock);
981
982 if (!still_buffered && tid < CW1200_MAX_TID) {
983 hdr = (struct ieee80211_hdr *)skb->data;
984 rcu_read_lock();
985 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
986 if (sta)
987 ieee80211_sta_set_buffered(sta, tid, false);
988 rcu_read_unlock();
989 }
990 }
991}
992
993void cw1200_skb_dtor(struct cw1200_common *priv,
994 struct sk_buff *skb,
995 const struct cw1200_txpriv *txpriv)
996{
997 skb_pull(skb, txpriv->offset);
998 if (txpriv->rate_id != CW1200_INVALID_RATE_ID) {
999 cw1200_notify_buffered_tx(priv, skb,
1000 txpriv->raw_link_id, txpriv->tid);
1001 tx_policy_put(priv, txpriv->rate_id);
1002 }
1003 ieee80211_tx_status(priv->hw, skb);
1004}
1005
1006void cw1200_rx_cb(struct cw1200_common *priv,
1007 struct wsm_rx *arg,
1008 int link_id,
1009 struct sk_buff **skb_p)
1010{
1011 struct sk_buff *skb = *skb_p;
1012 struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
1013 struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
1014 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1015 struct cw1200_link_entry *entry = NULL;
1016 unsigned long grace_period;
1017
1018 bool early_data = false;
1019 bool p2p = priv->vif && priv->vif->p2p;
1020 size_t hdrlen;
1021 hdr->flag = 0;
1022
1023 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
1024 /* STA is stopped. */
1025 goto drop;
1026 }
1027
1028 if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) {
1029 entry = &priv->link_id_db[link_id - 1];
1030 if (entry->status == CW1200_LINK_SOFT &&
1031 ieee80211_is_data(frame->frame_control))
1032 early_data = true;
1033 entry->timestamp = jiffies;
1034 } else if (p2p &&
1035 ieee80211_is_action(frame->frame_control) &&
1036 (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
1037 pr_debug("[RX] Going to MAP&RESET link ID\n");
1038 WARN_ON(work_pending(&priv->linkid_reset_work));
1039 memcpy(&priv->action_frame_sa[0],
1040 ieee80211_get_SA(frame), ETH_ALEN);
1041 priv->action_linkid = 0;
1042 schedule_work(&priv->linkid_reset_work);
1043 }
1044
1045 if (link_id && p2p &&
1046 ieee80211_is_action(frame->frame_control) &&
1047 (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
1048 /* Link ID already exists for the ACTION frame.
1049 * Reset and Remap
1050 */
1051 WARN_ON(work_pending(&priv->linkid_reset_work));
1052 memcpy(&priv->action_frame_sa[0],
1053 ieee80211_get_SA(frame), ETH_ALEN);
1054 priv->action_linkid = link_id;
1055 schedule_work(&priv->linkid_reset_work);
1056 }
1057 if (arg->status) {
1058 if (arg->status == WSM_STATUS_MICFAILURE) {
1059 pr_debug("[RX] MIC failure.\n");
1060 hdr->flag |= RX_FLAG_MMIC_ERROR;
1061 } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) {
1062 pr_debug("[RX] No key found.\n");
1063 goto drop;
1064 } else {
1065 pr_debug("[RX] Receive failure: %d.\n",
1066 arg->status);
1067 goto drop;
1068 }
1069 }
1070
1071 if (skb->len < sizeof(struct ieee80211_pspoll)) {
1072 wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n");
1073 goto drop;
1074 }
1075
1076 if (ieee80211_is_pspoll(frame->frame_control))
1077 if (cw1200_handle_pspoll(priv, skb))
1078 goto drop;
1079
1080 hdr->band = ((arg->channel_number & 0xff00) ||
1081 (arg->channel_number > 14)) ?
1082 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
1083 hdr->freq = ieee80211_channel_to_frequency(
1084 arg->channel_number,
1085 hdr->band);
1086
1087 if (arg->rx_rate >= 14) {
1088 hdr->flag |= RX_FLAG_HT;
1089 hdr->rate_idx = arg->rx_rate - 14;
1090 } else if (arg->rx_rate >= 4) {
1091 hdr->rate_idx = arg->rx_rate - 2;
1092 } else {
1093 hdr->rate_idx = arg->rx_rate;
1094 }
1095
1096 hdr->signal = (s8)arg->rcpi_rssi;
1097 hdr->antenna = 0;
1098
1099 hdrlen = ieee80211_hdrlen(frame->frame_control);
1100
1101 if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
1102 size_t iv_len = 0, icv_len = 0;
1103
1104 hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED;
1105
1106 /* Oops... There is no fast way to ask mac80211 about
1107 * IV/ICV lengths. Even defineas are not exposed.
1108 */
1109 switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
1110 case WSM_RX_STATUS_WEP:
1111 iv_len = 4 /* WEP_IV_LEN */;
1112 icv_len = 4 /* WEP_ICV_LEN */;
1113 break;
1114 case WSM_RX_STATUS_TKIP:
1115 iv_len = 8 /* TKIP_IV_LEN */;
1116 icv_len = 4 /* TKIP_ICV_LEN */
1117 + 8 /*MICHAEL_MIC_LEN*/;
1118 hdr->flag |= RX_FLAG_MMIC_STRIPPED;
1119 break;
1120 case WSM_RX_STATUS_AES:
1121 iv_len = 8 /* CCMP_HDR_LEN */;
1122 icv_len = 8 /* CCMP_MIC_LEN */;
1123 break;
1124 case WSM_RX_STATUS_WAPI:
1125 iv_len = 18 /* WAPI_HDR_LEN */;
1126 icv_len = 16 /* WAPI_MIC_LEN */;
1127 break;
1128 default:
1129 pr_warn("Unknown encryption type %d\n",
1130 WSM_RX_STATUS_ENCRYPTION(arg->flags));
1131 goto drop;
1132 }
1133
1134 /* Firmware strips ICV in case of MIC failure. */
1135 if (arg->status == WSM_STATUS_MICFAILURE)
1136 icv_len = 0;
1137
1138 if (skb->len < hdrlen + iv_len + icv_len) {
1139 wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n");
1140 goto drop;
1141 }
1142
1143 /* Remove IV, ICV and MIC */
1144 skb_trim(skb, skb->len - icv_len);
1145 memmove(skb->data + iv_len, skb->data, hdrlen);
1146 skb_pull(skb, iv_len);
1147 }
1148
1149 /* Remove TSF from the end of frame */
1150 if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) {
1151 memcpy(&hdr->mactime, skb->data + skb->len - 8, 8);
1152 hdr->mactime = le64_to_cpu(hdr->mactime);
1153 if (skb->len >= 8)
1154 skb_trim(skb, skb->len - 8);
1155 } else {
1156 hdr->mactime = 0;
1157 }
1158
1159 cw1200_debug_rxed(priv);
1160 if (arg->flags & WSM_RX_STATUS_AGGREGATE)
1161 cw1200_debug_rxed_agg(priv);
1162
1163 if (ieee80211_is_action(frame->frame_control) &&
1164 (arg->flags & WSM_RX_STATUS_ADDRESS1)) {
1165 if (cw1200_handle_action_rx(priv, skb))
1166 return;
1167 } else if (ieee80211_is_beacon(frame->frame_control) &&
1168 !arg->status &&
1169 !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
1170 ETH_ALEN)) {
1171 const u8 *tim_ie;
1172 u8 *ies = ((struct ieee80211_mgmt *)
1173 (skb->data))->u.beacon.variable;
1174 size_t ies_len = skb->len - (ies - (u8 *)(skb->data));
1175
1176 tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
1177 if (tim_ie) {
1178 struct ieee80211_tim_ie *tim =
1179 (struct ieee80211_tim_ie *)&tim_ie[2];
1180
1181 if (priv->join_dtim_period != tim->dtim_period) {
1182 priv->join_dtim_period = tim->dtim_period;
1183 queue_work(priv->workqueue,
1184 &priv->set_beacon_wakeup_period_work);
1185 }
1186 }
1187
1188 /* Disable beacon filter once we're associated... */
1189 if (priv->disable_beacon_filter &&
1190 (priv->vif->bss_conf.assoc ||
1191 priv->vif->bss_conf.ibss_joined)) {
1192 priv->disable_beacon_filter = false;
1193 queue_work(priv->workqueue,
1194 &priv->update_filtering_work);
1195 }
1196 }
1197
1198 /* Stay awake after frame is received to give
1199 * userspace chance to react and acquire appropriate
1200 * wakelock.
1201 */
1202 if (ieee80211_is_auth(frame->frame_control))
1203 grace_period = 5 * HZ;
1204 else if (ieee80211_is_deauth(frame->frame_control))
1205 grace_period = 5 * HZ;
1206 else
1207 grace_period = 1 * HZ;
1208 cw1200_pm_stay_awake(&priv->pm_state, grace_period);
1209
1210 if (early_data) {
1211 spin_lock_bh(&priv->ps_state_lock);
1212 /* Double-check status with lock held */
1213 if (entry->status == CW1200_LINK_SOFT)
1214 skb_queue_tail(&entry->rx_queue, skb);
1215 else
1216 ieee80211_rx_irqsafe(priv->hw, skb);
1217 spin_unlock_bh(&priv->ps_state_lock);
1218 } else {
1219 ieee80211_rx_irqsafe(priv->hw, skb);
1220 }
1221 *skb_p = NULL;
1222
1223 return;
1224
1225drop:
1226 /* TODO: update failure counters */
1227 return;
1228}
1229
1230/* ******************************************************************** */
1231/* Security */
1232
1233int cw1200_alloc_key(struct cw1200_common *priv)
1234{
1235 int idx;
1236
1237 idx = ffs(~priv->key_map) - 1;
1238 if (idx < 0 || idx > WSM_KEY_MAX_INDEX)
1239 return -1;
1240
1241 priv->key_map |= BIT(idx);
1242 priv->keys[idx].index = idx;
1243 return idx;
1244}
1245
1246void cw1200_free_key(struct cw1200_common *priv, int idx)
1247{
1248 BUG_ON(!(priv->key_map & BIT(idx)));
1249 memset(&priv->keys[idx], 0, sizeof(priv->keys[idx]));
1250 priv->key_map &= ~BIT(idx);
1251}
1252
1253void cw1200_free_keys(struct cw1200_common *priv)
1254{
1255 memset(&priv->keys, 0, sizeof(priv->keys));
1256 priv->key_map = 0;
1257}
1258
1259int cw1200_upload_keys(struct cw1200_common *priv)
1260{
1261 int idx, ret = 0;
1262 for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx)
1263 if (priv->key_map & BIT(idx)) {
1264 ret = wsm_add_key(priv, &priv->keys[idx]);
1265 if (ret < 0)
1266 break;
1267 }
1268 return ret;
1269}
1270
1271/* Workaround for WFD test case 6.1.10 */
1272void cw1200_link_id_reset(struct work_struct *work)
1273{
1274 struct cw1200_common *priv =
1275 container_of(work, struct cw1200_common, linkid_reset_work);
1276 int temp_linkid;
1277
1278 if (!priv->action_linkid) {
1279 /* In GO mode we can receive ACTION frames without a linkID */
1280 temp_linkid = cw1200_alloc_link_id(priv,
1281 &priv->action_frame_sa[0]);
1282 WARN_ON(!temp_linkid);
1283 if (temp_linkid) {
1284 /* Make sure we execute the WQ */
1285 flush_workqueue(priv->workqueue);
1286 /* Release the link ID */
1287 spin_lock_bh(&priv->ps_state_lock);
1288 priv->link_id_db[temp_linkid - 1].prev_status =
1289 priv->link_id_db[temp_linkid - 1].status;
1290 priv->link_id_db[temp_linkid - 1].status =
1291 CW1200_LINK_RESET;
1292 spin_unlock_bh(&priv->ps_state_lock);
1293 wsm_lock_tx_async(priv);
1294 if (queue_work(priv->workqueue,
1295 &priv->link_id_work) <= 0)
1296 wsm_unlock_tx(priv);
1297 }
1298 } else {
1299 spin_lock_bh(&priv->ps_state_lock);
1300 priv->link_id_db[priv->action_linkid - 1].prev_status =
1301 priv->link_id_db[priv->action_linkid - 1].status;
1302 priv->link_id_db[priv->action_linkid - 1].status =
1303 CW1200_LINK_RESET_REMAP;
1304 spin_unlock_bh(&priv->ps_state_lock);
1305 wsm_lock_tx_async(priv);
1306 if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
1307 wsm_unlock_tx(priv);
1308 flush_workqueue(priv->workqueue);
1309 }
1310}
1311
1312int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac)
1313{
1314 int i, ret = 0;
1315 spin_lock_bh(&priv->ps_state_lock);
1316 for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
1317 if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) &&
1318 priv->link_id_db[i].status) {
1319 priv->link_id_db[i].timestamp = jiffies;
1320 ret = i + 1;
1321 break;
1322 }
1323 }
1324 spin_unlock_bh(&priv->ps_state_lock);
1325 return ret;
1326}
1327
1328int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac)
1329{
1330 int i, ret = 0;
1331 unsigned long max_inactivity = 0;
1332 unsigned long now = jiffies;
1333
1334 spin_lock_bh(&priv->ps_state_lock);
1335 for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
1336 if (!priv->link_id_db[i].status) {
1337 ret = i + 1;
1338 break;
1339 } else if (priv->link_id_db[i].status != CW1200_LINK_HARD &&
1340 !priv->tx_queue_stats.link_map_cache[i + 1]) {
1341 unsigned long inactivity =
1342 now - priv->link_id_db[i].timestamp;
1343 if (inactivity < max_inactivity)
1344 continue;
1345 max_inactivity = inactivity;
1346 ret = i + 1;
1347 }
1348 }
1349 if (ret) {
1350 struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1];
1351 pr_debug("[AP] STA added, link_id: %d\n", ret);
1352 entry->status = CW1200_LINK_RESERVE;
1353 memcpy(&entry->mac, mac, ETH_ALEN);
1354 memset(&entry->buffered, 0, CW1200_MAX_TID);
1355 skb_queue_head_init(&entry->rx_queue);
1356 wsm_lock_tx_async(priv);
1357 if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
1358 wsm_unlock_tx(priv);
1359 } else {
1360 wiphy_info(priv->hw->wiphy,
1361 "[AP] Early: no more link IDs available.\n");
1362 }
1363
1364 spin_unlock_bh(&priv->ps_state_lock);
1365 return ret;
1366}
1367
1368void cw1200_link_id_work(struct work_struct *work)
1369{
1370 struct cw1200_common *priv =
1371 container_of(work, struct cw1200_common, link_id_work);
1372 wsm_flush_tx(priv);
1373 cw1200_link_id_gc_work(&priv->link_id_gc_work.work);
1374 wsm_unlock_tx(priv);
1375}
1376
1377void cw1200_link_id_gc_work(struct work_struct *work)
1378{
1379 struct cw1200_common *priv =
1380 container_of(work, struct cw1200_common, link_id_gc_work.work);
1381 struct wsm_reset reset = {
1382 .reset_statistics = false,
1383 };
1384 struct wsm_map_link map_link = {
1385 .link_id = 0,
1386 };
1387 unsigned long now = jiffies;
1388 unsigned long next_gc = -1;
1389 long ttl;
1390 bool need_reset;
1391 u32 mask;
1392 int i;
1393
1394 if (priv->join_status != CW1200_JOIN_STATUS_AP)
1395 return;
1396
1397 wsm_lock_tx(priv);
1398 spin_lock_bh(&priv->ps_state_lock);
1399 for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
1400 need_reset = false;
1401 mask = BIT(i + 1);
1402 if (priv->link_id_db[i].status == CW1200_LINK_RESERVE ||
1403 (priv->link_id_db[i].status == CW1200_LINK_HARD &&
1404 !(priv->link_id_map & mask))) {
1405 if (priv->link_id_map & mask) {
1406 priv->sta_asleep_mask &= ~mask;
1407 priv->pspoll_mask &= ~mask;
1408 need_reset = true;
1409 }
1410 priv->link_id_map |= mask;
1411 if (priv->link_id_db[i].status != CW1200_LINK_HARD)
1412 priv->link_id_db[i].status = CW1200_LINK_SOFT;
1413 memcpy(map_link.mac_addr, priv->link_id_db[i].mac,
1414 ETH_ALEN);
1415 spin_unlock_bh(&priv->ps_state_lock);
1416 if (need_reset) {
1417 reset.link_id = i + 1;
1418 wsm_reset(priv, &reset);
1419 }
1420 map_link.link_id = i + 1;
1421 wsm_map_link(priv, &map_link);
1422 next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT);
1423 spin_lock_bh(&priv->ps_state_lock);
1424 } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) {
1425 ttl = priv->link_id_db[i].timestamp - now +
1426 CW1200_LINK_ID_GC_TIMEOUT;
1427 if (ttl <= 0) {
1428 need_reset = true;
1429 priv->link_id_db[i].status = CW1200_LINK_OFF;
1430 priv->link_id_map &= ~mask;
1431 priv->sta_asleep_mask &= ~mask;
1432 priv->pspoll_mask &= ~mask;
1433 memset(map_link.mac_addr, 0, ETH_ALEN);
1434 spin_unlock_bh(&priv->ps_state_lock);
1435 reset.link_id = i + 1;
1436 wsm_reset(priv, &reset);
1437 spin_lock_bh(&priv->ps_state_lock);
1438 } else {
1439 next_gc = min_t(unsigned long, next_gc, ttl);
1440 }
1441 } else if (priv->link_id_db[i].status == CW1200_LINK_RESET ||
1442 priv->link_id_db[i].status ==
1443 CW1200_LINK_RESET_REMAP) {
1444 int status = priv->link_id_db[i].status;
1445 priv->link_id_db[i].status =
1446 priv->link_id_db[i].prev_status;
1447 priv->link_id_db[i].timestamp = now;
1448 reset.link_id = i + 1;
1449 spin_unlock_bh(&priv->ps_state_lock);
1450 wsm_reset(priv, &reset);
1451 if (status == CW1200_LINK_RESET_REMAP) {
1452 memcpy(map_link.mac_addr,
1453 priv->link_id_db[i].mac,
1454 ETH_ALEN);
1455 map_link.link_id = i + 1;
1456 wsm_map_link(priv, &map_link);
1457 next_gc = min(next_gc,
1458 CW1200_LINK_ID_GC_TIMEOUT);
1459 }
1460 spin_lock_bh(&priv->ps_state_lock);
1461 }
1462 if (need_reset) {
1463 skb_queue_purge(&priv->link_id_db[i].rx_queue);
1464 pr_debug("[AP] STA removed, link_id: %d\n",
1465 reset.link_id);
1466 }
1467 }
1468 spin_unlock_bh(&priv->ps_state_lock);
1469 if (next_gc != -1)
1470 queue_delayed_work(priv->workqueue,
1471 &priv->link_id_gc_work, next_gc);
1472 wsm_unlock_tx(priv);
1473}
diff --git a/drivers/net/wireless/cw1200/txrx.h b/drivers/net/wireless/cw1200/txrx.h
new file mode 100644
index 000000000000..492a4e14213b
--- /dev/null
+++ b/drivers/net/wireless/cw1200/txrx.h
@@ -0,0 +1,106 @@
1/*
2 * Datapath interface for ST-Ericsson CW1200 mac80211 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef CW1200_TXRX_H
13#define CW1200_TXRX_H
14
15#include <linux/list.h>
16
17/* extern */ struct ieee80211_hw;
18/* extern */ struct sk_buff;
19/* extern */ struct wsm_tx;
20/* extern */ struct wsm_rx;
21/* extern */ struct wsm_tx_confirm;
22/* extern */ struct cw1200_txpriv;
23
24struct tx_policy {
25 union {
26 __le32 tbl[3];
27 u8 raw[12];
28 };
29 u8 defined;
30 u8 usage_count;
31 u8 retry_count;
32 u8 uploaded;
33};
34
35struct tx_policy_cache_entry {
36 struct tx_policy policy;
37 struct list_head link;
38};
39
40#define TX_POLICY_CACHE_SIZE (8)
41struct tx_policy_cache {
42 struct tx_policy_cache_entry cache[TX_POLICY_CACHE_SIZE];
43 struct list_head used;
44 struct list_head free;
45 spinlock_t lock; /* Protect policy cache */
46};
47
48/* ******************************************************************** */
49/* TX policy cache */
50/* Intention of TX policy cache is an overcomplicated WSM API.
51 * Device does not accept per-PDU tx retry sequence.
52 * It uses "tx retry policy id" instead, so driver code has to sync
53 * linux tx retry sequences with a retry policy table in the device.
54 */
55void tx_policy_init(struct cw1200_common *priv);
56void tx_policy_upload_work(struct work_struct *work);
57void tx_policy_clean(struct cw1200_common *priv);
58
59/* ******************************************************************** */
60/* TX implementation */
61
62u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv,
63 u32 rates);
64void cw1200_tx(struct ieee80211_hw *dev,
65 struct ieee80211_tx_control *control,
66 struct sk_buff *skb);
67void cw1200_skb_dtor(struct cw1200_common *priv,
68 struct sk_buff *skb,
69 const struct cw1200_txpriv *txpriv);
70
71/* ******************************************************************** */
72/* WSM callbacks */
73
74void cw1200_tx_confirm_cb(struct cw1200_common *priv,
75 int link_id,
76 struct wsm_tx_confirm *arg);
77void cw1200_rx_cb(struct cw1200_common *priv,
78 struct wsm_rx *arg,
79 int link_id,
80 struct sk_buff **skb_p);
81
82/* ******************************************************************** */
83/* Timeout */
84
85void cw1200_tx_timeout(struct work_struct *work);
86
87/* ******************************************************************** */
88/* Security */
89int cw1200_alloc_key(struct cw1200_common *priv);
90void cw1200_free_key(struct cw1200_common *priv, int idx);
91void cw1200_free_keys(struct cw1200_common *priv);
92int cw1200_upload_keys(struct cw1200_common *priv);
93
94/* ******************************************************************** */
95/* Workaround for WFD test case 6.1.10 */
96void cw1200_link_id_reset(struct work_struct *work);
97
98#define CW1200_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
99
100int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac);
101int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac);
102void cw1200_link_id_work(struct work_struct *work);
103void cw1200_link_id_gc_work(struct work_struct *work);
104
105
106#endif /* CW1200_TXRX_H */
diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c
new file mode 100644
index 000000000000..cbb74d7a9be5
--- /dev/null
+++ b/drivers/net/wireless/cw1200/wsm.c
@@ -0,0 +1,1822 @@
1/*
2 * WSM host interface (HI) implementation for
3 * ST-Ericsson CW1200 mac80211 drivers.
4 *
5 * Copyright (c) 2010, ST-Ericsson
6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/skbuff.h>
14#include <linux/wait.h>
15#include <linux/delay.h>
16#include <linux/sched.h>
17#include <linux/random.h>
18
19#include "cw1200.h"
20#include "wsm.h"
21#include "bh.h"
22#include "sta.h"
23#include "debug.h"
24
25#define WSM_CMD_TIMEOUT (2 * HZ) /* With respect to interrupt loss */
26#define WSM_CMD_START_TIMEOUT (7 * HZ)
27#define WSM_CMD_RESET_TIMEOUT (3 * HZ) /* 2 sec. timeout was observed. */
28#define WSM_CMD_MAX_TIMEOUT (3 * HZ)
29
30#define WSM_SKIP(buf, size) \
31 do { \
32 if ((buf)->data + size > (buf)->end) \
33 goto underflow; \
34 (buf)->data += size; \
35 } while (0)
36
37#define WSM_GET(buf, ptr, size) \
38 do { \
39 if ((buf)->data + size > (buf)->end) \
40 goto underflow; \
41 memcpy(ptr, (buf)->data, size); \
42 (buf)->data += size; \
43 } while (0)
44
45#define __WSM_GET(buf, type, type2, cvt) \
46 ({ \
47 type val; \
48 if ((buf)->data + sizeof(type) > (buf)->end) \
49 goto underflow; \
50 val = cvt(*(type2 *)(buf)->data); \
51 (buf)->data += sizeof(type); \
52 val; \
53 })
54
55#define WSM_GET8(buf) __WSM_GET(buf, u8, u8, (u8))
56#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16, __le16_to_cpu)
57#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32, __le32_to_cpu)
58
59#define WSM_PUT(buf, ptr, size) \
60 do { \
61 if ((buf)->data + size > (buf)->end) \
62 if (wsm_buf_reserve((buf), size)) \
63 goto nomem; \
64 memcpy((buf)->data, ptr, size); \
65 (buf)->data += size; \
66 } while (0)
67
68#define __WSM_PUT(buf, val, type, type2, cvt) \
69 do { \
70 if ((buf)->data + sizeof(type) > (buf)->end) \
71 if (wsm_buf_reserve((buf), sizeof(type))) \
72 goto nomem; \
73 *(type2 *)(buf)->data = cvt(val); \
74 (buf)->data += sizeof(type); \
75 } while (0)
76
77#define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, u8, (u8))
78#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __le16, __cpu_to_le16)
79#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __le32, __cpu_to_le32)
80
81static void wsm_buf_reset(struct wsm_buf *buf);
82static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size);
83
84static int wsm_cmd_send(struct cw1200_common *priv,
85 struct wsm_buf *buf,
86 void *arg, u16 cmd, long tmo);
87
88#define wsm_cmd_lock(__priv) mutex_lock(&((__priv)->wsm_cmd_mux))
89#define wsm_cmd_unlock(__priv) mutex_unlock(&((__priv)->wsm_cmd_mux))
90
91/* ******************************************************************** */
92/* WSM API implementation */
93
94static int wsm_generic_confirm(struct cw1200_common *priv,
95 void *arg,
96 struct wsm_buf *buf)
97{
98 u32 status = WSM_GET32(buf);
99 if (status != WSM_STATUS_SUCCESS)
100 return -EINVAL;
101 return 0;
102
103underflow:
104 WARN_ON(1);
105 return -EINVAL;
106}
107
108int wsm_configuration(struct cw1200_common *priv, struct wsm_configuration *arg)
109{
110 int ret;
111 struct wsm_buf *buf = &priv->wsm_cmd_buf;
112
113 wsm_cmd_lock(priv);
114
115 WSM_PUT32(buf, arg->dot11MaxTransmitMsduLifeTime);
116 WSM_PUT32(buf, arg->dot11MaxReceiveLifeTime);
117 WSM_PUT32(buf, arg->dot11RtsThreshold);
118
119 /* DPD block. */
120 WSM_PUT16(buf, arg->dpdData_size + 12);
121 WSM_PUT16(buf, 1); /* DPD version */
122 WSM_PUT(buf, arg->dot11StationId, ETH_ALEN);
123 WSM_PUT16(buf, 5); /* DPD flags */
124 WSM_PUT(buf, arg->dpdData, arg->dpdData_size);
125
126 ret = wsm_cmd_send(priv, buf, arg,
127 WSM_CONFIGURATION_REQ_ID, WSM_CMD_TIMEOUT);
128
129 wsm_cmd_unlock(priv);
130 return ret;
131
132nomem:
133 wsm_cmd_unlock(priv);
134 return -ENOMEM;
135}
136
137static int wsm_configuration_confirm(struct cw1200_common *priv,
138 struct wsm_configuration *arg,
139 struct wsm_buf *buf)
140{
141 int i;
142 int status;
143
144 status = WSM_GET32(buf);
145 if (WARN_ON(status != WSM_STATUS_SUCCESS))
146 return -EINVAL;
147
148 WSM_GET(buf, arg->dot11StationId, ETH_ALEN);
149 arg->dot11FrequencyBandsSupported = WSM_GET8(buf);
150 WSM_SKIP(buf, 1);
151 arg->supportedRateMask = WSM_GET32(buf);
152 for (i = 0; i < 2; ++i) {
153 arg->txPowerRange[i].min_power_level = WSM_GET32(buf);
154 arg->txPowerRange[i].max_power_level = WSM_GET32(buf);
155 arg->txPowerRange[i].stepping = WSM_GET32(buf);
156 }
157 return 0;
158
159underflow:
160 WARN_ON(1);
161 return -EINVAL;
162}
163
164/* ******************************************************************** */
165
166int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg)
167{
168 int ret;
169 struct wsm_buf *buf = &priv->wsm_cmd_buf;
170 u16 cmd = WSM_RESET_REQ_ID | WSM_TX_LINK_ID(arg->link_id);
171
172 wsm_cmd_lock(priv);
173
174 WSM_PUT32(buf, arg->reset_statistics ? 0 : 1);
175 ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_RESET_TIMEOUT);
176 wsm_cmd_unlock(priv);
177 return ret;
178
179nomem:
180 wsm_cmd_unlock(priv);
181 return -ENOMEM;
182}
183
184/* ******************************************************************** */
185
186struct wsm_mib {
187 u16 mib_id;
188 void *buf;
189 size_t buf_size;
190};
191
192int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *_buf,
193 size_t buf_size)
194{
195 int ret;
196 struct wsm_buf *buf = &priv->wsm_cmd_buf;
197 struct wsm_mib mib_buf = {
198 .mib_id = mib_id,
199 .buf = _buf,
200 .buf_size = buf_size,
201 };
202 wsm_cmd_lock(priv);
203
204 WSM_PUT16(buf, mib_id);
205 WSM_PUT16(buf, 0);
206
207 ret = wsm_cmd_send(priv, buf, &mib_buf,
208 WSM_READ_MIB_REQ_ID, WSM_CMD_TIMEOUT);
209 wsm_cmd_unlock(priv);
210 return ret;
211
212nomem:
213 wsm_cmd_unlock(priv);
214 return -ENOMEM;
215}
216
217static int wsm_read_mib_confirm(struct cw1200_common *priv,
218 struct wsm_mib *arg,
219 struct wsm_buf *buf)
220{
221 u16 size;
222 if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS))
223 return -EINVAL;
224
225 if (WARN_ON(WSM_GET16(buf) != arg->mib_id))
226 return -EINVAL;
227
228 size = WSM_GET16(buf);
229 if (size > arg->buf_size)
230 size = arg->buf_size;
231
232 WSM_GET(buf, arg->buf, size);
233 arg->buf_size = size;
234 return 0;
235
236underflow:
237 WARN_ON(1);
238 return -EINVAL;
239}
240
241/* ******************************************************************** */
242
243int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *_buf,
244 size_t buf_size)
245{
246 int ret;
247 struct wsm_buf *buf = &priv->wsm_cmd_buf;
248 struct wsm_mib mib_buf = {
249 .mib_id = mib_id,
250 .buf = _buf,
251 .buf_size = buf_size,
252 };
253
254 wsm_cmd_lock(priv);
255
256 WSM_PUT16(buf, mib_id);
257 WSM_PUT16(buf, buf_size);
258 WSM_PUT(buf, _buf, buf_size);
259
260 ret = wsm_cmd_send(priv, buf, &mib_buf,
261 WSM_WRITE_MIB_REQ_ID, WSM_CMD_TIMEOUT);
262 wsm_cmd_unlock(priv);
263 return ret;
264
265nomem:
266 wsm_cmd_unlock(priv);
267 return -ENOMEM;
268}
269
270static int wsm_write_mib_confirm(struct cw1200_common *priv,
271 struct wsm_mib *arg,
272 struct wsm_buf *buf)
273{
274 int ret;
275
276 ret = wsm_generic_confirm(priv, arg, buf);
277 if (ret)
278 return ret;
279
280 if (arg->mib_id == WSM_MIB_ID_OPERATIONAL_POWER_MODE) {
281 /* OperationalMode: update PM status. */
282 const char *p = arg->buf;
283 cw1200_enable_powersave(priv, (p[0] & 0x0F) ? true : false);
284 }
285 return 0;
286}
287
288/* ******************************************************************** */
289
290int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg)
291{
292 int i;
293 int ret;
294 struct wsm_buf *buf = &priv->wsm_cmd_buf;
295
296 if (arg->num_channels > 48)
297 return -EINVAL;
298
299 if (arg->num_ssids > 2)
300 return -EINVAL;
301
302 if (arg->band > 1)
303 return -EINVAL;
304
305 wsm_cmd_lock(priv);
306
307 WSM_PUT8(buf, arg->band);
308 WSM_PUT8(buf, arg->type);
309 WSM_PUT8(buf, arg->flags);
310 WSM_PUT8(buf, arg->max_tx_rate);
311 WSM_PUT32(buf, arg->auto_scan_interval);
312 WSM_PUT8(buf, arg->num_probes);
313 WSM_PUT8(buf, arg->num_channels);
314 WSM_PUT8(buf, arg->num_ssids);
315 WSM_PUT8(buf, arg->probe_delay);
316
317 for (i = 0; i < arg->num_channels; ++i) {
318 WSM_PUT16(buf, arg->ch[i].number);
319 WSM_PUT16(buf, 0);
320 WSM_PUT32(buf, arg->ch[i].min_chan_time);
321 WSM_PUT32(buf, arg->ch[i].max_chan_time);
322 WSM_PUT32(buf, 0);
323 }
324
325 for (i = 0; i < arg->num_ssids; ++i) {
326 WSM_PUT32(buf, arg->ssids[i].length);
327 WSM_PUT(buf, &arg->ssids[i].ssid[0],
328 sizeof(arg->ssids[i].ssid));
329 }
330
331 ret = wsm_cmd_send(priv, buf, NULL,
332 WSM_START_SCAN_REQ_ID, WSM_CMD_TIMEOUT);
333 wsm_cmd_unlock(priv);
334 return ret;
335
336nomem:
337 wsm_cmd_unlock(priv);
338 return -ENOMEM;
339}
340
341/* ******************************************************************** */
342
343int wsm_stop_scan(struct cw1200_common *priv)
344{
345 int ret;
346 struct wsm_buf *buf = &priv->wsm_cmd_buf;
347 wsm_cmd_lock(priv);
348 ret = wsm_cmd_send(priv, buf, NULL,
349 WSM_STOP_SCAN_REQ_ID, WSM_CMD_TIMEOUT);
350 wsm_cmd_unlock(priv);
351 return ret;
352}
353
354
355static int wsm_tx_confirm(struct cw1200_common *priv,
356 struct wsm_buf *buf,
357 int link_id)
358{
359 struct wsm_tx_confirm tx_confirm;
360
361 tx_confirm.packet_id = WSM_GET32(buf);
362 tx_confirm.status = WSM_GET32(buf);
363 tx_confirm.tx_rate = WSM_GET8(buf);
364 tx_confirm.ack_failures = WSM_GET8(buf);
365 tx_confirm.flags = WSM_GET16(buf);
366 tx_confirm.media_delay = WSM_GET32(buf);
367 tx_confirm.tx_queue_delay = WSM_GET32(buf);
368
369 cw1200_tx_confirm_cb(priv, link_id, &tx_confirm);
370 return 0;
371
372underflow:
373 WARN_ON(1);
374 return -EINVAL;
375}
376
377static int wsm_multi_tx_confirm(struct cw1200_common *priv,
378 struct wsm_buf *buf, int link_id)
379{
380 int ret;
381 int count;
382 int i;
383
384 count = WSM_GET32(buf);
385 if (WARN_ON(count <= 0))
386 return -EINVAL;
387
388 if (count > 1) {
389 /* We already released one buffer, now for the rest */
390 ret = wsm_release_tx_buffer(priv, count - 1);
391 if (ret < 0)
392 return ret;
393 else if (ret > 0)
394 cw1200_bh_wakeup(priv);
395 }
396
397 cw1200_debug_txed_multi(priv, count);
398 for (i = 0; i < count; ++i) {
399 ret = wsm_tx_confirm(priv, buf, link_id);
400 if (ret)
401 return ret;
402 }
403 return ret;
404
405underflow:
406 WARN_ON(1);
407 return -EINVAL;
408}
409
410/* ******************************************************************** */
411
412static int wsm_join_confirm(struct cw1200_common *priv,
413 struct wsm_join_cnf *arg,
414 struct wsm_buf *buf)
415{
416 arg->status = WSM_GET32(buf);
417 if (WARN_ON(arg->status) != WSM_STATUS_SUCCESS)
418 return -EINVAL;
419
420 arg->min_power_level = WSM_GET32(buf);
421 arg->max_power_level = WSM_GET32(buf);
422
423 return 0;
424
425underflow:
426 WARN_ON(1);
427 return -EINVAL;
428}
429
430int wsm_join(struct cw1200_common *priv, struct wsm_join *arg)
431{
432 int ret;
433 struct wsm_buf *buf = &priv->wsm_cmd_buf;
434 struct wsm_join_cnf resp;
435 wsm_cmd_lock(priv);
436
437 WSM_PUT8(buf, arg->mode);
438 WSM_PUT8(buf, arg->band);
439 WSM_PUT16(buf, arg->channel_number);
440 WSM_PUT(buf, &arg->bssid[0], sizeof(arg->bssid));
441 WSM_PUT16(buf, arg->atim_window);
442 WSM_PUT8(buf, arg->preamble_type);
443 WSM_PUT8(buf, arg->probe_for_join);
444 WSM_PUT8(buf, arg->dtim_period);
445 WSM_PUT8(buf, arg->flags);
446 WSM_PUT32(buf, arg->ssid_len);
447 WSM_PUT(buf, &arg->ssid[0], sizeof(arg->ssid));
448 WSM_PUT32(buf, arg->beacon_interval);
449 WSM_PUT32(buf, arg->basic_rate_set);
450
451 priv->tx_burst_idx = -1;
452 ret = wsm_cmd_send(priv, buf, &resp,
453 WSM_JOIN_REQ_ID, WSM_CMD_TIMEOUT);
454 /* TODO: Update state based on resp.min|max_power_level */
455
456 priv->join_complete_status = resp.status;
457
458 wsm_cmd_unlock(priv);
459 return ret;
460
461nomem:
462 wsm_cmd_unlock(priv);
463 return -ENOMEM;
464}
465
466/* ******************************************************************** */
467
468int wsm_set_bss_params(struct cw1200_common *priv,
469 const struct wsm_set_bss_params *arg)
470{
471 int ret;
472 struct wsm_buf *buf = &priv->wsm_cmd_buf;
473
474 wsm_cmd_lock(priv);
475
476 WSM_PUT8(buf, (arg->reset_beacon_loss ? 0x1 : 0));
477 WSM_PUT8(buf, arg->beacon_lost_count);
478 WSM_PUT16(buf, arg->aid);
479 WSM_PUT32(buf, arg->operational_rate_set);
480
481 ret = wsm_cmd_send(priv, buf, NULL,
482 WSM_SET_BSS_PARAMS_REQ_ID, WSM_CMD_TIMEOUT);
483
484 wsm_cmd_unlock(priv);
485 return ret;
486
487nomem:
488 wsm_cmd_unlock(priv);
489 return -ENOMEM;
490}
491
492/* ******************************************************************** */
493
494int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg)
495{
496 int ret;
497 struct wsm_buf *buf = &priv->wsm_cmd_buf;
498
499 wsm_cmd_lock(priv);
500
501 WSM_PUT(buf, arg, sizeof(*arg));
502
503 ret = wsm_cmd_send(priv, buf, NULL,
504 WSM_ADD_KEY_REQ_ID, WSM_CMD_TIMEOUT);
505
506 wsm_cmd_unlock(priv);
507 return ret;
508
509nomem:
510 wsm_cmd_unlock(priv);
511 return -ENOMEM;
512}
513
514/* ******************************************************************** */
515
516int wsm_remove_key(struct cw1200_common *priv, const struct wsm_remove_key *arg)
517{
518 int ret;
519 struct wsm_buf *buf = &priv->wsm_cmd_buf;
520
521 wsm_cmd_lock(priv);
522
523 WSM_PUT8(buf, arg->index);
524 WSM_PUT8(buf, 0);
525 WSM_PUT16(buf, 0);
526
527 ret = wsm_cmd_send(priv, buf, NULL,
528 WSM_REMOVE_KEY_REQ_ID, WSM_CMD_TIMEOUT);
529
530 wsm_cmd_unlock(priv);
531 return ret;
532
533nomem:
534 wsm_cmd_unlock(priv);
535 return -ENOMEM;
536}
537
538/* ******************************************************************** */
539
540int wsm_set_tx_queue_params(struct cw1200_common *priv,
541 const struct wsm_set_tx_queue_params *arg, u8 id)
542{
543 int ret;
544 struct wsm_buf *buf = &priv->wsm_cmd_buf;
545 u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1};
546
547 wsm_cmd_lock(priv);
548
549 WSM_PUT8(buf, queue_id_to_wmm_aci[id]);
550 WSM_PUT8(buf, 0);
551 WSM_PUT8(buf, arg->ackPolicy);
552 WSM_PUT8(buf, 0);
553 WSM_PUT32(buf, arg->maxTransmitLifetime);
554 WSM_PUT16(buf, arg->allowedMediumTime);
555 WSM_PUT16(buf, 0);
556
557 ret = wsm_cmd_send(priv, buf, NULL, 0x0012, WSM_CMD_TIMEOUT);
558
559 wsm_cmd_unlock(priv);
560 return ret;
561
562nomem:
563 wsm_cmd_unlock(priv);
564 return -ENOMEM;
565}
566
567/* ******************************************************************** */
568
569int wsm_set_edca_params(struct cw1200_common *priv,
570 const struct wsm_edca_params *arg)
571{
572 int ret;
573 struct wsm_buf *buf = &priv->wsm_cmd_buf;
574
575 wsm_cmd_lock(priv);
576
577 /* Implemented according to specification. */
578
579 WSM_PUT16(buf, arg->params[3].cwmin);
580 WSM_PUT16(buf, arg->params[2].cwmin);
581 WSM_PUT16(buf, arg->params[1].cwmin);
582 WSM_PUT16(buf, arg->params[0].cwmin);
583
584 WSM_PUT16(buf, arg->params[3].cwmax);
585 WSM_PUT16(buf, arg->params[2].cwmax);
586 WSM_PUT16(buf, arg->params[1].cwmax);
587 WSM_PUT16(buf, arg->params[0].cwmax);
588
589 WSM_PUT8(buf, arg->params[3].aifns);
590 WSM_PUT8(buf, arg->params[2].aifns);
591 WSM_PUT8(buf, arg->params[1].aifns);
592 WSM_PUT8(buf, arg->params[0].aifns);
593
594 WSM_PUT16(buf, arg->params[3].txop_limit);
595 WSM_PUT16(buf, arg->params[2].txop_limit);
596 WSM_PUT16(buf, arg->params[1].txop_limit);
597 WSM_PUT16(buf, arg->params[0].txop_limit);
598
599 WSM_PUT32(buf, arg->params[3].max_rx_lifetime);
600 WSM_PUT32(buf, arg->params[2].max_rx_lifetime);
601 WSM_PUT32(buf, arg->params[1].max_rx_lifetime);
602 WSM_PUT32(buf, arg->params[0].max_rx_lifetime);
603
604 ret = wsm_cmd_send(priv, buf, NULL,
605 WSM_EDCA_PARAMS_REQ_ID, WSM_CMD_TIMEOUT);
606 wsm_cmd_unlock(priv);
607 return ret;
608
609nomem:
610 wsm_cmd_unlock(priv);
611 return -ENOMEM;
612}
613
614/* ******************************************************************** */
615
616int wsm_switch_channel(struct cw1200_common *priv,
617 const struct wsm_switch_channel *arg)
618{
619 int ret;
620 struct wsm_buf *buf = &priv->wsm_cmd_buf;
621
622 wsm_cmd_lock(priv);
623
624 WSM_PUT8(buf, arg->mode);
625 WSM_PUT8(buf, arg->switch_count);
626 WSM_PUT16(buf, arg->channel_number);
627
628 priv->channel_switch_in_progress = 1;
629
630 ret = wsm_cmd_send(priv, buf, NULL,
631 WSM_SWITCH_CHANNEL_REQ_ID, WSM_CMD_TIMEOUT);
632 if (ret)
633 priv->channel_switch_in_progress = 0;
634
635 wsm_cmd_unlock(priv);
636 return ret;
637
638nomem:
639 wsm_cmd_unlock(priv);
640 return -ENOMEM;
641}
642
643/* ******************************************************************** */
644
645int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg)
646{
647 int ret;
648 struct wsm_buf *buf = &priv->wsm_cmd_buf;
649 priv->ps_mode_switch_in_progress = 1;
650
651 wsm_cmd_lock(priv);
652
653 WSM_PUT8(buf, arg->mode);
654 WSM_PUT8(buf, arg->fast_psm_idle_period);
655 WSM_PUT8(buf, arg->ap_psm_change_period);
656 WSM_PUT8(buf, arg->min_auto_pspoll_period);
657
658 ret = wsm_cmd_send(priv, buf, NULL,
659 WSM_SET_PM_REQ_ID, WSM_CMD_TIMEOUT);
660
661 wsm_cmd_unlock(priv);
662 return ret;
663
664nomem:
665 wsm_cmd_unlock(priv);
666 return -ENOMEM;
667}
668
669/* ******************************************************************** */
670
671int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg)
672{
673 int ret;
674 struct wsm_buf *buf = &priv->wsm_cmd_buf;
675
676 wsm_cmd_lock(priv);
677
678 WSM_PUT8(buf, arg->mode);
679 WSM_PUT8(buf, arg->band);
680 WSM_PUT16(buf, arg->channel_number);
681 WSM_PUT32(buf, arg->ct_window);
682 WSM_PUT32(buf, arg->beacon_interval);
683 WSM_PUT8(buf, arg->dtim_period);
684 WSM_PUT8(buf, arg->preamble);
685 WSM_PUT8(buf, arg->probe_delay);
686 WSM_PUT8(buf, arg->ssid_len);
687 WSM_PUT(buf, arg->ssid, sizeof(arg->ssid));
688 WSM_PUT32(buf, arg->basic_rate_set);
689
690 priv->tx_burst_idx = -1;
691 ret = wsm_cmd_send(priv, buf, NULL,
692 WSM_START_REQ_ID, WSM_CMD_START_TIMEOUT);
693
694 wsm_cmd_unlock(priv);
695 return ret;
696
697nomem:
698 wsm_cmd_unlock(priv);
699 return -ENOMEM;
700}
701
702/* ******************************************************************** */
703
704int wsm_beacon_transmit(struct cw1200_common *priv,
705 const struct wsm_beacon_transmit *arg)
706{
707 int ret;
708 struct wsm_buf *buf = &priv->wsm_cmd_buf;
709
710 wsm_cmd_lock(priv);
711
712 WSM_PUT32(buf, arg->enable_beaconing ? 1 : 0);
713
714 ret = wsm_cmd_send(priv, buf, NULL,
715 WSM_BEACON_TRANSMIT_REQ_ID, WSM_CMD_TIMEOUT);
716
717 wsm_cmd_unlock(priv);
718 return ret;
719
720nomem:
721 wsm_cmd_unlock(priv);
722 return -ENOMEM;
723}
724
725/* ******************************************************************** */
726
727int wsm_start_find(struct cw1200_common *priv)
728{
729 int ret;
730 struct wsm_buf *buf = &priv->wsm_cmd_buf;
731
732 wsm_cmd_lock(priv);
733 ret = wsm_cmd_send(priv, buf, NULL, 0x0019, WSM_CMD_TIMEOUT);
734 wsm_cmd_unlock(priv);
735 return ret;
736}
737
738/* ******************************************************************** */
739
740int wsm_stop_find(struct cw1200_common *priv)
741{
742 int ret;
743 struct wsm_buf *buf = &priv->wsm_cmd_buf;
744
745 wsm_cmd_lock(priv);
746 ret = wsm_cmd_send(priv, buf, NULL, 0x001A, WSM_CMD_TIMEOUT);
747 wsm_cmd_unlock(priv);
748 return ret;
749}
750
751/* ******************************************************************** */
752
753int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg)
754{
755 int ret;
756 struct wsm_buf *buf = &priv->wsm_cmd_buf;
757 u16 cmd = 0x001C | WSM_TX_LINK_ID(arg->link_id);
758
759 wsm_cmd_lock(priv);
760
761 WSM_PUT(buf, &arg->mac_addr[0], sizeof(arg->mac_addr));
762 WSM_PUT16(buf, 0);
763
764 ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_TIMEOUT);
765
766 wsm_cmd_unlock(priv);
767 return ret;
768
769nomem:
770 wsm_cmd_unlock(priv);
771 return -ENOMEM;
772}
773
774/* ******************************************************************** */
775
776int wsm_update_ie(struct cw1200_common *priv,
777 const struct wsm_update_ie *arg)
778{
779 int ret;
780 struct wsm_buf *buf = &priv->wsm_cmd_buf;
781
782 wsm_cmd_lock(priv);
783
784 WSM_PUT16(buf, arg->what);
785 WSM_PUT16(buf, arg->count);
786 WSM_PUT(buf, arg->ies, arg->length);
787
788 ret = wsm_cmd_send(priv, buf, NULL, 0x001B, WSM_CMD_TIMEOUT);
789
790 wsm_cmd_unlock(priv);
791 return ret;
792
793nomem:
794 wsm_cmd_unlock(priv);
795 return -ENOMEM;
796}
797
798/* ******************************************************************** */
799int wsm_set_probe_responder(struct cw1200_common *priv, bool enable)
800{
801 priv->rx_filter.probeResponder = enable;
802 return wsm_set_rx_filter(priv, &priv->rx_filter);
803}
804
805/* ******************************************************************** */
806/* WSM indication events implementation */
807const char * const cw1200_fw_types[] = {
808 "ETF",
809 "WFM",
810 "WSM",
811 "HI test",
812 "Platform test"
813};
814
815static int wsm_startup_indication(struct cw1200_common *priv,
816 struct wsm_buf *buf)
817{
818 priv->wsm_caps.input_buffers = WSM_GET16(buf);
819 priv->wsm_caps.input_buffer_size = WSM_GET16(buf);
820 priv->wsm_caps.hw_id = WSM_GET16(buf);
821 priv->wsm_caps.hw_subid = WSM_GET16(buf);
822 priv->wsm_caps.status = WSM_GET16(buf);
823 priv->wsm_caps.fw_cap = WSM_GET16(buf);
824 priv->wsm_caps.fw_type = WSM_GET16(buf);
825 priv->wsm_caps.fw_api = WSM_GET16(buf);
826 priv->wsm_caps.fw_build = WSM_GET16(buf);
827 priv->wsm_caps.fw_ver = WSM_GET16(buf);
828 WSM_GET(buf, priv->wsm_caps.fw_label, sizeof(priv->wsm_caps.fw_label));
829 priv->wsm_caps.fw_label[sizeof(priv->wsm_caps.fw_label) - 1] = 0; /* Do not trust FW too much... */
830
831 if (WARN_ON(priv->wsm_caps.status))
832 return -EINVAL;
833
834 if (WARN_ON(priv->wsm_caps.fw_type > 4))
835 return -EINVAL;
836
837 pr_info("CW1200 WSM init done.\n"
838 " Input buffers: %d x %d bytes\n"
839 " Hardware: %d.%d\n"
840 " %s firmware [%s], ver: %d, build: %d,"
841 " api: %d, cap: 0x%.4X\n",
842 priv->wsm_caps.input_buffers,
843 priv->wsm_caps.input_buffer_size,
844 priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid,
845 cw1200_fw_types[priv->wsm_caps.fw_type],
846 priv->wsm_caps.fw_label, priv->wsm_caps.fw_ver,
847 priv->wsm_caps.fw_build,
848 priv->wsm_caps.fw_api, priv->wsm_caps.fw_cap);
849
850 /* Disable unsupported frequency bands */
851 if (!(priv->wsm_caps.fw_cap & 0x1))
852 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
853 if (!(priv->wsm_caps.fw_cap & 0x2))
854 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
855
856 priv->firmware_ready = 1;
857 wake_up(&priv->wsm_startup_done);
858 return 0;
859
860underflow:
861 WARN_ON(1);
862 return -EINVAL;
863}
864
865static int wsm_receive_indication(struct cw1200_common *priv,
866 int link_id,
867 struct wsm_buf *buf,
868 struct sk_buff **skb_p)
869{
870 struct wsm_rx rx;
871 struct ieee80211_hdr *hdr;
872 size_t hdr_len;
873 __le16 fctl;
874
875 rx.status = WSM_GET32(buf);
876 rx.channel_number = WSM_GET16(buf);
877 rx.rx_rate = WSM_GET8(buf);
878 rx.rcpi_rssi = WSM_GET8(buf);
879 rx.flags = WSM_GET32(buf);
880
881 /* FW Workaround: Drop probe resp or
882 beacon when RSSI is 0
883 */
884 hdr = (struct ieee80211_hdr *)(*skb_p)->data;
885
886 if (!rx.rcpi_rssi &&
887 (ieee80211_is_probe_resp(hdr->frame_control) ||
888 ieee80211_is_beacon(hdr->frame_control)))
889 return 0;
890
891 /* If no RSSI subscription has been made,
892 * convert RCPI to RSSI here
893 */
894 if (!priv->cqm_use_rssi)
895 rx.rcpi_rssi = rx.rcpi_rssi / 2 - 110;
896
897 fctl = *(__le16 *)buf->data;
898 hdr_len = buf->data - buf->begin;
899 skb_pull(*skb_p, hdr_len);
900 if (!rx.status && ieee80211_is_deauth(fctl)) {
901 if (priv->join_status == CW1200_JOIN_STATUS_STA) {
902 /* Shedule unjoin work */
903 pr_debug("[WSM] Issue unjoin command (RX).\n");
904 wsm_lock_tx_async(priv);
905 if (queue_work(priv->workqueue,
906 &priv->unjoin_work) <= 0)
907 wsm_unlock_tx(priv);
908 }
909 }
910 cw1200_rx_cb(priv, &rx, link_id, skb_p);
911 if (*skb_p)
912 skb_push(*skb_p, hdr_len);
913
914 return 0;
915
916underflow:
917 return -EINVAL;
918}
919
920static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf)
921{
922 int first;
923 struct cw1200_wsm_event *event;
924
925 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
926 /* STA is stopped. */
927 return 0;
928 }
929
930 event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL);
931 if (!event)
932 return -ENOMEM;
933
934 event->evt.id = WSM_GET32(buf);
935 event->evt.data = WSM_GET32(buf);
936
937 pr_debug("[WSM] Event: %d(%d)\n",
938 event->evt.id, event->evt.data);
939
940 spin_lock(&priv->event_queue_lock);
941 first = list_empty(&priv->event_queue);
942 list_add_tail(&event->link, &priv->event_queue);
943 spin_unlock(&priv->event_queue_lock);
944
945 if (first)
946 queue_work(priv->workqueue, &priv->event_handler);
947
948 return 0;
949
950underflow:
951 kfree(event);
952 return -EINVAL;
953}
954
955static int wsm_channel_switch_indication(struct cw1200_common *priv,
956 struct wsm_buf *buf)
957{
958 WARN_ON(WSM_GET32(buf));
959
960 priv->channel_switch_in_progress = 0;
961 wake_up(&priv->channel_switch_done);
962
963 wsm_unlock_tx(priv);
964
965 return 0;
966
967underflow:
968 return -EINVAL;
969}
970
971static int wsm_set_pm_indication(struct cw1200_common *priv,
972 struct wsm_buf *buf)
973{
974 /* TODO: Check buf (struct wsm_set_pm_complete) for validity */
975 if (priv->ps_mode_switch_in_progress) {
976 priv->ps_mode_switch_in_progress = 0;
977 wake_up(&priv->ps_mode_switch_done);
978 }
979 return 0;
980}
981
982static int wsm_scan_started(struct cw1200_common *priv, void *arg,
983 struct wsm_buf *buf)
984{
985 u32 status = WSM_GET32(buf);
986 if (status != WSM_STATUS_SUCCESS) {
987 cw1200_scan_failed_cb(priv);
988 return -EINVAL;
989 }
990 return 0;
991
992underflow:
993 WARN_ON(1);
994 return -EINVAL;
995}
996
997static int wsm_scan_complete_indication(struct cw1200_common *priv,
998 struct wsm_buf *buf)
999{
1000 struct wsm_scan_complete arg;
1001 arg.status = WSM_GET32(buf);
1002 arg.psm = WSM_GET8(buf);
1003 arg.num_channels = WSM_GET8(buf);
1004 cw1200_scan_complete_cb(priv, &arg);
1005
1006 return 0;
1007
1008underflow:
1009 return -EINVAL;
1010}
1011
1012static int wsm_join_complete_indication(struct cw1200_common *priv,
1013 struct wsm_buf *buf)
1014{
1015 struct wsm_join_complete arg;
1016 arg.status = WSM_GET32(buf);
1017 pr_debug("[WSM] Join complete indication, status: %d\n", arg.status);
1018 cw1200_join_complete_cb(priv, &arg);
1019
1020 return 0;
1021
1022underflow:
1023 return -EINVAL;
1024}
1025
1026static int wsm_find_complete_indication(struct cw1200_common *priv,
1027 struct wsm_buf *buf)
1028{
1029 pr_warn("Implement find_complete_indication\n");
1030 return 0;
1031}
1032
1033static int wsm_ba_timeout_indication(struct cw1200_common *priv,
1034 struct wsm_buf *buf)
1035{
1036 u32 dummy;
1037 u8 tid;
1038 u8 dummy2;
1039 u8 addr[ETH_ALEN];
1040
1041 dummy = WSM_GET32(buf);
1042 tid = WSM_GET8(buf);
1043 dummy2 = WSM_GET8(buf);
1044 WSM_GET(buf, addr, ETH_ALEN);
1045
1046 pr_info("BlockACK timeout, tid %d, addr %pM\n",
1047 tid, addr);
1048
1049 return 0;
1050
1051underflow:
1052 return -EINVAL;
1053}
1054
1055static int wsm_suspend_resume_indication(struct cw1200_common *priv,
1056 int link_id, struct wsm_buf *buf)
1057{
1058 u32 flags;
1059 struct wsm_suspend_resume arg;
1060
1061 flags = WSM_GET32(buf);
1062 arg.link_id = link_id;
1063 arg.stop = !(flags & 1);
1064 arg.multicast = !!(flags & 8);
1065 arg.queue = (flags >> 1) & 3;
1066
1067 cw1200_suspend_resume(priv, &arg);
1068
1069 return 0;
1070
1071underflow:
1072 return -EINVAL;
1073}
1074
1075
1076/* ******************************************************************** */
1077/* WSM TX */
1078
1079static int wsm_cmd_send(struct cw1200_common *priv,
1080 struct wsm_buf *buf,
1081 void *arg, u16 cmd, long tmo)
1082{
1083 size_t buf_len = buf->data - buf->begin;
1084 int ret;
1085
1086 /* Don't bother if we're dead. */
1087 if (priv->bh_error) {
1088 ret = 0;
1089 goto done;
1090 }
1091
1092 /* Block until the cmd buffer is completed. Tortuous. */
1093 spin_lock(&priv->wsm_cmd.lock);
1094 while (!priv->wsm_cmd.done) {
1095 spin_unlock(&priv->wsm_cmd.lock);
1096 spin_lock(&priv->wsm_cmd.lock);
1097 }
1098 priv->wsm_cmd.done = 0;
1099 spin_unlock(&priv->wsm_cmd.lock);
1100
1101 if (cmd == WSM_WRITE_MIB_REQ_ID ||
1102 cmd == WSM_READ_MIB_REQ_ID)
1103 pr_debug("[WSM] >>> 0x%.4X [MIB: 0x%.4X] (%zu)\n",
1104 cmd, __le16_to_cpu(((__le16 *)buf->begin)[2]),
1105 buf_len);
1106 else
1107 pr_debug("[WSM] >>> 0x%.4X (%zu)\n", cmd, buf_len);
1108
1109 /* Due to buggy SPI on CW1200, we need to
1110 * pad the message by a few bytes to ensure
1111 * that it's completely received.
1112 */
1113 buf_len += 4;
1114
1115 /* Fill HI message header */
1116 /* BH will add sequence number */
1117 ((__le16 *)buf->begin)[0] = __cpu_to_le16(buf_len);
1118 ((__le16 *)buf->begin)[1] = __cpu_to_le16(cmd);
1119
1120 spin_lock(&priv->wsm_cmd.lock);
1121 BUG_ON(priv->wsm_cmd.ptr);
1122 priv->wsm_cmd.ptr = buf->begin;
1123 priv->wsm_cmd.len = buf_len;
1124 priv->wsm_cmd.arg = arg;
1125 priv->wsm_cmd.cmd = cmd;
1126 spin_unlock(&priv->wsm_cmd.lock);
1127
1128 cw1200_bh_wakeup(priv);
1129
1130 /* Wait for command completion */
1131 ret = wait_event_timeout(priv->wsm_cmd_wq,
1132 priv->wsm_cmd.done, tmo);
1133
1134 if (!ret && !priv->wsm_cmd.done) {
1135 spin_lock(&priv->wsm_cmd.lock);
1136 priv->wsm_cmd.done = 1;
1137 priv->wsm_cmd.ptr = NULL;
1138 spin_unlock(&priv->wsm_cmd.lock);
1139 if (priv->bh_error) {
1140 /* Return ok to help system cleanup */
1141 ret = 0;
1142 } else {
1143 pr_err("CMD req (0x%04x) stuck in firmware, killing BH\n", priv->wsm_cmd.cmd);
1144 print_hex_dump_bytes("REQDUMP: ", DUMP_PREFIX_NONE,
1145 buf->begin, buf_len);
1146 pr_err("Outstanding outgoing frames: %d\n", priv->hw_bufs_used);
1147
1148 /* Kill BH thread to report the error to the top layer. */
1149 atomic_add(1, &priv->bh_term);
1150 wake_up(&priv->bh_wq);
1151 ret = -ETIMEDOUT;
1152 }
1153 } else {
1154 spin_lock(&priv->wsm_cmd.lock);
1155 BUG_ON(!priv->wsm_cmd.done);
1156 ret = priv->wsm_cmd.ret;
1157 spin_unlock(&priv->wsm_cmd.lock);
1158 }
1159done:
1160 wsm_buf_reset(buf);
1161 return ret;
1162}
1163
1164/* ******************************************************************** */
1165/* WSM TX port control */
1166
1167void wsm_lock_tx(struct cw1200_common *priv)
1168{
1169 wsm_cmd_lock(priv);
1170 if (atomic_add_return(1, &priv->tx_lock) == 1) {
1171 if (wsm_flush_tx(priv))
1172 pr_debug("[WSM] TX is locked.\n");
1173 }
1174 wsm_cmd_unlock(priv);
1175}
1176
1177void wsm_lock_tx_async(struct cw1200_common *priv)
1178{
1179 if (atomic_add_return(1, &priv->tx_lock) == 1)
1180 pr_debug("[WSM] TX is locked (async).\n");
1181}
1182
1183bool wsm_flush_tx(struct cw1200_common *priv)
1184{
1185 unsigned long timestamp = jiffies;
1186 bool pending = false;
1187 long timeout;
1188 int i;
1189
1190 /* Flush must be called with TX lock held. */
1191 BUG_ON(!atomic_read(&priv->tx_lock));
1192
1193 /* First check if we really need to do something.
1194 * It is safe to use unprotected access, as hw_bufs_used
1195 * can only decrements.
1196 */
1197 if (!priv->hw_bufs_used)
1198 return true;
1199
1200 if (priv->bh_error) {
1201 /* In case of failure do not wait for magic. */
1202 pr_err("[WSM] Fatal error occured, will not flush TX.\n");
1203 return false;
1204 } else {
1205 /* Get a timestamp of "oldest" frame */
1206 for (i = 0; i < 4; ++i)
1207 pending |= cw1200_queue_get_xmit_timestamp(
1208 &priv->tx_queue[i],
1209 &timestamp, 0xffffffff);
1210 /* If there's nothing pending, we're good */
1211 if (!pending)
1212 return true;
1213
1214 timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies;
1215 if (timeout < 0 || wait_event_timeout(priv->bh_evt_wq,
1216 !priv->hw_bufs_used,
1217 timeout) <= 0) {
1218 /* Hmmm... Not good. Frame had stuck in firmware. */
1219 priv->bh_error = 1;
1220 wiphy_err(priv->hw->wiphy, "[WSM] TX Frames (%d) stuck in firmware, killing BH\n", priv->hw_bufs_used);
1221 wake_up(&priv->bh_wq);
1222 return false;
1223 }
1224
1225 /* Ok, everything is flushed. */
1226 return true;
1227 }
1228}
1229
1230void wsm_unlock_tx(struct cw1200_common *priv)
1231{
1232 int tx_lock;
1233 tx_lock = atomic_sub_return(1, &priv->tx_lock);
1234 BUG_ON(tx_lock < 0);
1235
1236 if (tx_lock == 0) {
1237 if (!priv->bh_error)
1238 cw1200_bh_wakeup(priv);
1239 pr_debug("[WSM] TX is unlocked.\n");
1240 }
1241}
1242
1243/* ******************************************************************** */
1244/* WSM RX */
1245
1246int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len)
1247{
1248 struct wsm_buf buf;
1249 u32 reason;
1250 u32 reg[18];
1251 char fname[48];
1252 unsigned int i;
1253
1254 static const char * const reason_str[] = {
1255 "undefined instruction",
1256 "prefetch abort",
1257 "data abort",
1258 "unknown error",
1259 };
1260
1261 buf.begin = buf.data = data;
1262 buf.end = &buf.begin[len];
1263
1264 reason = WSM_GET32(&buf);
1265 for (i = 0; i < ARRAY_SIZE(reg); ++i)
1266 reg[i] = WSM_GET32(&buf);
1267 WSM_GET(&buf, fname, sizeof(fname));
1268
1269 if (reason < 4)
1270 wiphy_err(priv->hw->wiphy,
1271 "Firmware exception: %s.\n",
1272 reason_str[reason]);
1273 else
1274 wiphy_err(priv->hw->wiphy,
1275 "Firmware assert at %.*s, line %d\n",
1276 (int) sizeof(fname), fname, reg[1]);
1277
1278 for (i = 0; i < 12; i += 4)
1279 wiphy_err(priv->hw->wiphy,
1280 "R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X,\n",
1281 i + 0, reg[i + 0], i + 1, reg[i + 1],
1282 i + 2, reg[i + 2], i + 3, reg[i + 3]);
1283 wiphy_err(priv->hw->wiphy,
1284 "R12: 0x%.8X, SP: 0x%.8X, LR: 0x%.8X, PC: 0x%.8X,\n",
1285 reg[i + 0], reg[i + 1], reg[i + 2], reg[i + 3]);
1286 i += 4;
1287 wiphy_err(priv->hw->wiphy,
1288 "CPSR: 0x%.8X, SPSR: 0x%.8X\n",
1289 reg[i + 0], reg[i + 1]);
1290
1291 print_hex_dump_bytes("R1: ", DUMP_PREFIX_NONE,
1292 fname, sizeof(fname));
1293 return 0;
1294
1295underflow:
1296 wiphy_err(priv->hw->wiphy, "Firmware exception.\n");
1297 print_hex_dump_bytes("Exception: ", DUMP_PREFIX_NONE,
1298 data, len);
1299 return -EINVAL;
1300}
1301
1302int wsm_handle_rx(struct cw1200_common *priv, u16 id,
1303 struct wsm_hdr *wsm, struct sk_buff **skb_p)
1304{
1305 int ret = 0;
1306 struct wsm_buf wsm_buf;
1307 int link_id = (id >> 6) & 0x0F;
1308
1309 /* Strip link id. */
1310 id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
1311
1312 wsm_buf.begin = (u8 *)&wsm[0];
1313 wsm_buf.data = (u8 *)&wsm[1];
1314 wsm_buf.end = &wsm_buf.begin[__le16_to_cpu(wsm->len)];
1315
1316 pr_debug("[WSM] <<< 0x%.4X (%td)\n", id,
1317 wsm_buf.end - wsm_buf.begin);
1318
1319 if (id == WSM_TX_CONFIRM_IND_ID) {
1320 ret = wsm_tx_confirm(priv, &wsm_buf, link_id);
1321 } else if (id == WSM_MULTI_TX_CONFIRM_ID) {
1322 ret = wsm_multi_tx_confirm(priv, &wsm_buf, link_id);
1323 } else if (id & 0x0400) {
1324 void *wsm_arg;
1325 u16 wsm_cmd;
1326
1327 /* Do not trust FW too much. Protection against repeated
1328 * response and race condition removal (see above).
1329 */
1330 spin_lock(&priv->wsm_cmd.lock);
1331 wsm_arg = priv->wsm_cmd.arg;
1332 wsm_cmd = priv->wsm_cmd.cmd &
1333 ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
1334 priv->wsm_cmd.cmd = 0xFFFF;
1335 spin_unlock(&priv->wsm_cmd.lock);
1336
1337 if (WARN_ON((id & ~0x0400) != wsm_cmd)) {
1338 /* Note that any non-zero is a fatal retcode. */
1339 ret = -EINVAL;
1340 goto out;
1341 }
1342
1343 /* Note that wsm_arg can be NULL in case of timeout in
1344 * wsm_cmd_send().
1345 */
1346
1347 switch (id) {
1348 case WSM_READ_MIB_RESP_ID:
1349 if (wsm_arg)
1350 ret = wsm_read_mib_confirm(priv, wsm_arg,
1351 &wsm_buf);
1352 break;
1353 case WSM_WRITE_MIB_RESP_ID:
1354 if (wsm_arg)
1355 ret = wsm_write_mib_confirm(priv, wsm_arg,
1356 &wsm_buf);
1357 break;
1358 case WSM_START_SCAN_RESP_ID:
1359 if (wsm_arg)
1360 ret = wsm_scan_started(priv, wsm_arg, &wsm_buf);
1361 break;
1362 case WSM_CONFIGURATION_RESP_ID:
1363 if (wsm_arg)
1364 ret = wsm_configuration_confirm(priv, wsm_arg,
1365 &wsm_buf);
1366 break;
1367 case WSM_JOIN_RESP_ID:
1368 if (wsm_arg)
1369 ret = wsm_join_confirm(priv, wsm_arg, &wsm_buf);
1370 break;
1371 case WSM_STOP_SCAN_RESP_ID:
1372 case WSM_RESET_RESP_ID:
1373 case WSM_ADD_KEY_RESP_ID:
1374 case WSM_REMOVE_KEY_RESP_ID:
1375 case WSM_SET_PM_RESP_ID:
1376 case WSM_SET_BSS_PARAMS_RESP_ID:
1377 case 0x0412: /* set_tx_queue_params */
1378 case WSM_EDCA_PARAMS_RESP_ID:
1379 case WSM_SWITCH_CHANNEL_RESP_ID:
1380 case WSM_START_RESP_ID:
1381 case WSM_BEACON_TRANSMIT_RESP_ID:
1382 case 0x0419: /* start_find */
1383 case 0x041A: /* stop_find */
1384 case 0x041B: /* update_ie */
1385 case 0x041C: /* map_link */
1386 WARN_ON(wsm_arg != NULL);
1387 ret = wsm_generic_confirm(priv, wsm_arg, &wsm_buf);
1388 if (ret) {
1389 wiphy_warn(priv->hw->wiphy,
1390 "wsm_generic_confirm failed for request 0x%04x.\n",
1391 id & ~0x0400);
1392
1393 /* often 0x407 and 0x410 occur, this means we're dead.. */
1394 if (priv->join_status >= CW1200_JOIN_STATUS_JOINING) {
1395 wsm_lock_tx(priv);
1396 if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
1397 wsm_unlock_tx(priv);
1398 }
1399 }
1400 break;
1401 default:
1402 wiphy_warn(priv->hw->wiphy,
1403 "Unrecognized confirmation 0x%04x\n",
1404 id & ~0x0400);
1405 }
1406
1407 spin_lock(&priv->wsm_cmd.lock);
1408 priv->wsm_cmd.ret = ret;
1409 priv->wsm_cmd.done = 1;
1410 spin_unlock(&priv->wsm_cmd.lock);
1411
1412 ret = 0; /* Error response from device should ne stop BH. */
1413
1414 wake_up(&priv->wsm_cmd_wq);
1415 } else if (id & 0x0800) {
1416 switch (id) {
1417 case WSM_STARTUP_IND_ID:
1418 ret = wsm_startup_indication(priv, &wsm_buf);
1419 break;
1420 case WSM_RECEIVE_IND_ID:
1421 ret = wsm_receive_indication(priv, link_id,
1422 &wsm_buf, skb_p);
1423 break;
1424 case 0x0805:
1425 ret = wsm_event_indication(priv, &wsm_buf);
1426 break;
1427 case WSM_SCAN_COMPLETE_IND_ID:
1428 ret = wsm_scan_complete_indication(priv, &wsm_buf);
1429 break;
1430 case 0x0808:
1431 ret = wsm_ba_timeout_indication(priv, &wsm_buf);
1432 break;
1433 case 0x0809:
1434 ret = wsm_set_pm_indication(priv, &wsm_buf);
1435 break;
1436 case 0x080A:
1437 ret = wsm_channel_switch_indication(priv, &wsm_buf);
1438 break;
1439 case 0x080B:
1440 ret = wsm_find_complete_indication(priv, &wsm_buf);
1441 break;
1442 case 0x080C:
1443 ret = wsm_suspend_resume_indication(priv,
1444 link_id, &wsm_buf);
1445 break;
1446 case 0x080F:
1447 ret = wsm_join_complete_indication(priv, &wsm_buf);
1448 break;
1449 default:
1450 pr_warn("Unrecognised WSM ID %04x\n", id);
1451 }
1452 } else {
1453 WARN_ON(1);
1454 ret = -EINVAL;
1455 }
1456out:
1457 return ret;
1458}
1459
1460static bool wsm_handle_tx_data(struct cw1200_common *priv,
1461 struct wsm_tx *wsm,
1462 const struct ieee80211_tx_info *tx_info,
1463 const struct cw1200_txpriv *txpriv,
1464 struct cw1200_queue *queue)
1465{
1466 bool handled = false;
1467 const struct ieee80211_hdr *frame =
1468 (struct ieee80211_hdr *)&((u8 *)wsm)[txpriv->offset];
1469 __le16 fctl = frame->frame_control;
1470 enum {
1471 do_probe,
1472 do_drop,
1473 do_wep,
1474 do_tx,
1475 } action = do_tx;
1476
1477 switch (priv->mode) {
1478 case NL80211_IFTYPE_STATION:
1479 if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
1480 action = do_tx;
1481 else if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA)
1482 action = do_drop;
1483 break;
1484 case NL80211_IFTYPE_AP:
1485 if (!priv->join_status) {
1486 action = do_drop;
1487 } else if (!(BIT(txpriv->raw_link_id) &
1488 (BIT(0) | priv->link_id_map))) {
1489 wiphy_warn(priv->hw->wiphy,
1490 "A frame with expired link id is dropped.\n");
1491 action = do_drop;
1492 }
1493 if (cw1200_queue_get_generation(wsm->packet_id) >
1494 CW1200_MAX_REQUEUE_ATTEMPTS) {
1495 /* HACK!!! WSM324 firmware has tendency to requeue
1496 * multicast frames in a loop, causing performance
1497 * drop and high power consumption of the driver.
1498 * In this situation it is better just to drop
1499 * the problematic frame.
1500 */
1501 wiphy_warn(priv->hw->wiphy,
1502 "Too many attempts to requeue a frame; dropped.\n");
1503 action = do_drop;
1504 }
1505 break;
1506 case NL80211_IFTYPE_ADHOC:
1507 if (priv->join_status != CW1200_JOIN_STATUS_IBSS)
1508 action = do_drop;
1509 break;
1510 case NL80211_IFTYPE_MESH_POINT:
1511 action = do_tx; /* TODO: Test me! */
1512 break;
1513 case NL80211_IFTYPE_MONITOR:
1514 default:
1515 action = do_drop;
1516 break;
1517 }
1518
1519 if (action == do_tx) {
1520 if (ieee80211_is_nullfunc(fctl)) {
1521 spin_lock(&priv->bss_loss_lock);
1522 if (priv->bss_loss_state) {
1523 priv->bss_loss_confirm_id = wsm->packet_id;
1524 wsm->queue_id = WSM_QUEUE_VOICE;
1525 }
1526 spin_unlock(&priv->bss_loss_lock);
1527 } else if (ieee80211_is_probe_req(fctl)) {
1528 action = do_probe;
1529 } else if (ieee80211_is_deauth(fctl) &&
1530 priv->mode != NL80211_IFTYPE_AP) {
1531 pr_debug("[WSM] Issue unjoin command due to tx deauth.\n");
1532 wsm_lock_tx_async(priv);
1533 if (queue_work(priv->workqueue,
1534 &priv->unjoin_work) <= 0)
1535 wsm_unlock_tx(priv);
1536 } else if (ieee80211_has_protected(fctl) &&
1537 tx_info->control.hw_key &&
1538 tx_info->control.hw_key->keyidx != priv->wep_default_key_id &&
1539 (tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1540 tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
1541 action = do_wep;
1542 }
1543 }
1544
1545 switch (action) {
1546 case do_probe:
1547 /* An interesting FW "feature". Device filters probe responses.
1548 * The easiest way to get it back is to convert
1549 * probe request into WSM start_scan command.
1550 */
1551 pr_debug("[WSM] Convert probe request to scan.\n");
1552 wsm_lock_tx_async(priv);
1553 priv->pending_frame_id = wsm->packet_id;
1554 if (queue_delayed_work(priv->workqueue,
1555 &priv->scan.probe_work, 0) <= 0)
1556 wsm_unlock_tx(priv);
1557 handled = true;
1558 break;
1559 case do_drop:
1560 pr_debug("[WSM] Drop frame (0x%.4X).\n", fctl);
1561 BUG_ON(cw1200_queue_remove(queue, wsm->packet_id));
1562 handled = true;
1563 break;
1564 case do_wep:
1565 pr_debug("[WSM] Issue set_default_wep_key.\n");
1566 wsm_lock_tx_async(priv);
1567 priv->wep_default_key_id = tx_info->control.hw_key->keyidx;
1568 priv->pending_frame_id = wsm->packet_id;
1569 if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0)
1570 wsm_unlock_tx(priv);
1571 handled = true;
1572 break;
1573 case do_tx:
1574 pr_debug("[WSM] Transmit frame.\n");
1575 break;
1576 default:
1577 /* Do nothing */
1578 break;
1579 }
1580 return handled;
1581}
1582
1583static int cw1200_get_prio_queue(struct cw1200_common *priv,
1584 u32 link_id_map, int *total)
1585{
1586 static const int urgent = BIT(CW1200_LINK_ID_AFTER_DTIM) |
1587 BIT(CW1200_LINK_ID_UAPSD);
1588 struct wsm_edca_queue_params *edca;
1589 unsigned score, best = -1;
1590 int winner = -1;
1591 int queued;
1592 int i;
1593
1594 /* search for a winner using edca params */
1595 for (i = 0; i < 4; ++i) {
1596 queued = cw1200_queue_get_num_queued(&priv->tx_queue[i],
1597 link_id_map);
1598 if (!queued)
1599 continue;
1600 *total += queued;
1601 edca = &priv->edca.params[i];
1602 score = ((edca->aifns + edca->cwmin) << 16) +
1603 ((edca->cwmax - edca->cwmin) *
1604 (get_random_int() & 0xFFFF));
1605 if (score < best && (winner < 0 || i != 3)) {
1606 best = score;
1607 winner = i;
1608 }
1609 }
1610
1611 /* override winner if bursting */
1612 if (winner >= 0 && priv->tx_burst_idx >= 0 &&
1613 winner != priv->tx_burst_idx &&
1614 !cw1200_queue_get_num_queued(
1615 &priv->tx_queue[winner],
1616 link_id_map & urgent) &&
1617 cw1200_queue_get_num_queued(
1618 &priv->tx_queue[priv->tx_burst_idx],
1619 link_id_map))
1620 winner = priv->tx_burst_idx;
1621
1622 return winner;
1623}
1624
1625static int wsm_get_tx_queue_and_mask(struct cw1200_common *priv,
1626 struct cw1200_queue **queue_p,
1627 u32 *tx_allowed_mask_p,
1628 bool *more)
1629{
1630 int idx;
1631 u32 tx_allowed_mask;
1632 int total = 0;
1633
1634 /* Search for a queue with multicast frames buffered */
1635 if (priv->tx_multicast) {
1636 tx_allowed_mask = BIT(CW1200_LINK_ID_AFTER_DTIM);
1637 idx = cw1200_get_prio_queue(priv,
1638 tx_allowed_mask, &total);
1639 if (idx >= 0) {
1640 *more = total > 1;
1641 goto found;
1642 }
1643 }
1644
1645 /* Search for unicast traffic */
1646 tx_allowed_mask = ~priv->sta_asleep_mask;
1647 tx_allowed_mask |= BIT(CW1200_LINK_ID_UAPSD);
1648 if (priv->sta_asleep_mask) {
1649 tx_allowed_mask |= priv->pspoll_mask;
1650 tx_allowed_mask &= ~BIT(CW1200_LINK_ID_AFTER_DTIM);
1651 } else {
1652 tx_allowed_mask |= BIT(CW1200_LINK_ID_AFTER_DTIM);
1653 }
1654 idx = cw1200_get_prio_queue(priv,
1655 tx_allowed_mask, &total);
1656 if (idx < 0)
1657 return -ENOENT;
1658
1659found:
1660 *queue_p = &priv->tx_queue[idx];
1661 *tx_allowed_mask_p = tx_allowed_mask;
1662 return 0;
1663}
1664
1665int wsm_get_tx(struct cw1200_common *priv, u8 **data,
1666 size_t *tx_len, int *burst)
1667{
1668 struct wsm_tx *wsm = NULL;
1669 struct ieee80211_tx_info *tx_info;
1670 struct cw1200_queue *queue = NULL;
1671 int queue_num;
1672 u32 tx_allowed_mask = 0;
1673 const struct cw1200_txpriv *txpriv = NULL;
1674 int count = 0;
1675
1676 /* More is used only for broadcasts. */
1677 bool more = false;
1678
1679 if (priv->wsm_cmd.ptr) { /* CMD request */
1680 ++count;
1681 spin_lock(&priv->wsm_cmd.lock);
1682 BUG_ON(!priv->wsm_cmd.ptr);
1683 *data = priv->wsm_cmd.ptr;
1684 *tx_len = priv->wsm_cmd.len;
1685 *burst = 1;
1686 spin_unlock(&priv->wsm_cmd.lock);
1687 } else {
1688 for (;;) {
1689 int ret;
1690
1691 if (atomic_add_return(0, &priv->tx_lock))
1692 break;
1693
1694 spin_lock_bh(&priv->ps_state_lock);
1695
1696 ret = wsm_get_tx_queue_and_mask(priv, &queue,
1697 &tx_allowed_mask, &more);
1698 queue_num = queue - priv->tx_queue;
1699
1700 if (priv->buffered_multicasts &&
1701 (ret || !more) &&
1702 (priv->tx_multicast || !priv->sta_asleep_mask)) {
1703 priv->buffered_multicasts = false;
1704 if (priv->tx_multicast) {
1705 priv->tx_multicast = false;
1706 queue_work(priv->workqueue,
1707 &priv->multicast_stop_work);
1708 }
1709 }
1710
1711 spin_unlock_bh(&priv->ps_state_lock);
1712
1713 if (ret)
1714 break;
1715
1716 if (cw1200_queue_get(queue,
1717 tx_allowed_mask,
1718 &wsm, &tx_info, &txpriv))
1719 continue;
1720
1721 if (wsm_handle_tx_data(priv, wsm,
1722 tx_info, txpriv, queue))
1723 continue; /* Handled by WSM */
1724
1725 wsm->hdr.id &= __cpu_to_le16(
1726 ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX));
1727 wsm->hdr.id |= cpu_to_le16(
1728 WSM_TX_LINK_ID(txpriv->raw_link_id));
1729 priv->pspoll_mask &= ~BIT(txpriv->raw_link_id);
1730
1731 *data = (u8 *)wsm;
1732 *tx_len = __le16_to_cpu(wsm->hdr.len);
1733
1734 /* allow bursting if txop is set */
1735 if (priv->edca.params[queue_num].txop_limit)
1736 *burst = min(*burst,
1737 (int)cw1200_queue_get_num_queued(queue, tx_allowed_mask) + 1);
1738 else
1739 *burst = 1;
1740
1741 /* store index of bursting queue */
1742 if (*burst > 1)
1743 priv->tx_burst_idx = queue_num;
1744 else
1745 priv->tx_burst_idx = -1;
1746
1747 if (more) {
1748 struct ieee80211_hdr *hdr =
1749 (struct ieee80211_hdr *)
1750 &((u8 *)wsm)[txpriv->offset];
1751 /* more buffered multicast/broadcast frames
1752 * ==> set MoreData flag in IEEE 802.11 header
1753 * to inform PS STAs
1754 */
1755 hdr->frame_control |=
1756 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1757 }
1758
1759 pr_debug("[WSM] >>> 0x%.4X (%zu) %p %c\n",
1760 0x0004, *tx_len, *data,
1761 wsm->more ? 'M' : ' ');
1762 ++count;
1763 break;
1764 }
1765 }
1766
1767 return count;
1768}
1769
1770void wsm_txed(struct cw1200_common *priv, u8 *data)
1771{
1772 if (data == priv->wsm_cmd.ptr) {
1773 spin_lock(&priv->wsm_cmd.lock);
1774 priv->wsm_cmd.ptr = NULL;
1775 spin_unlock(&priv->wsm_cmd.lock);
1776 }
1777}
1778
1779/* ******************************************************************** */
1780/* WSM buffer */
1781
1782void wsm_buf_init(struct wsm_buf *buf)
1783{
1784 BUG_ON(buf->begin);
1785 buf->begin = kmalloc(FWLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
1786 buf->end = buf->begin ? &buf->begin[FWLOAD_BLOCK_SIZE] : buf->begin;
1787 wsm_buf_reset(buf);
1788}
1789
1790void wsm_buf_deinit(struct wsm_buf *buf)
1791{
1792 kfree(buf->begin);
1793 buf->begin = buf->data = buf->end = NULL;
1794}
1795
1796static void wsm_buf_reset(struct wsm_buf *buf)
1797{
1798 if (buf->begin) {
1799 buf->data = &buf->begin[4];
1800 *(u32 *)buf->begin = 0;
1801 } else {
1802 buf->data = buf->begin;
1803 }
1804}
1805
1806static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size)
1807{
1808 size_t pos = buf->data - buf->begin;
1809 size_t size = pos + extra_size;
1810
1811 size = round_up(size, FWLOAD_BLOCK_SIZE);
1812
1813 buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
1814 if (buf->begin) {
1815 buf->data = &buf->begin[pos];
1816 buf->end = &buf->begin[size];
1817 return 0;
1818 } else {
1819 buf->end = buf->data = buf->begin;
1820 return -ENOMEM;
1821 }
1822}
diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h
new file mode 100644
index 000000000000..7afc613c3706
--- /dev/null
+++ b/drivers/net/wireless/cw1200/wsm.h
@@ -0,0 +1,1870 @@
1/*
2 * WSM host interface (HI) interface for ST-Ericsson CW1200 mac80211 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on CW1200 UMAC WSM API, which is
8 * Copyright (C) ST-Ericsson SA 2010
9 * Author: Stewart Mathers <stewart.mathers@stericsson.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#ifndef CW1200_WSM_H_INCLUDED
17#define CW1200_WSM_H_INCLUDED
18
19#include <linux/spinlock.h>
20
21struct cw1200_common;
22
23/* Bands */
24/* Radio band 2.412 -2.484 GHz. */
25#define WSM_PHY_BAND_2_4G (0)
26
27/* Radio band 4.9375-5.8250 GHz. */
28#define WSM_PHY_BAND_5G (1)
29
30/* Transmit rates */
31/* 1 Mbps ERP-DSSS */
32#define WSM_TRANSMIT_RATE_1 (0)
33
34/* 2 Mbps ERP-DSSS */
35#define WSM_TRANSMIT_RATE_2 (1)
36
37/* 5.5 Mbps ERP-CCK */
38#define WSM_TRANSMIT_RATE_5 (2)
39
40/* 11 Mbps ERP-CCK */
41#define WSM_TRANSMIT_RATE_11 (3)
42
43/* 22 Mbps ERP-PBCC (Not supported) */
44/* #define WSM_TRANSMIT_RATE_22 (4) */
45
46/* 33 Mbps ERP-PBCC (Not supported) */
47/* #define WSM_TRANSMIT_RATE_33 (5) */
48
49/* 6 Mbps (3 Mbps) ERP-OFDM, BPSK coding rate 1/2 */
50#define WSM_TRANSMIT_RATE_6 (6)
51
52/* 9 Mbps (4.5 Mbps) ERP-OFDM, BPSK coding rate 3/4 */
53#define WSM_TRANSMIT_RATE_9 (7)
54
55/* 12 Mbps (6 Mbps) ERP-OFDM, QPSK coding rate 1/2 */
56#define WSM_TRANSMIT_RATE_12 (8)
57
58/* 18 Mbps (9 Mbps) ERP-OFDM, QPSK coding rate 3/4 */
59#define WSM_TRANSMIT_RATE_18 (9)
60
61/* 24 Mbps (12 Mbps) ERP-OFDM, 16QAM coding rate 1/2 */
62#define WSM_TRANSMIT_RATE_24 (10)
63
64/* 36 Mbps (18 Mbps) ERP-OFDM, 16QAM coding rate 3/4 */
65#define WSM_TRANSMIT_RATE_36 (11)
66
67/* 48 Mbps (24 Mbps) ERP-OFDM, 64QAM coding rate 1/2 */
68#define WSM_TRANSMIT_RATE_48 (12)
69
70/* 54 Mbps (27 Mbps) ERP-OFDM, 64QAM coding rate 3/4 */
71#define WSM_TRANSMIT_RATE_54 (13)
72
73/* 6.5 Mbps HT-OFDM, BPSK coding rate 1/2 */
74#define WSM_TRANSMIT_RATE_HT_6 (14)
75
76/* 13 Mbps HT-OFDM, QPSK coding rate 1/2 */
77#define WSM_TRANSMIT_RATE_HT_13 (15)
78
79/* 19.5 Mbps HT-OFDM, QPSK coding rate 3/4 */
80#define WSM_TRANSMIT_RATE_HT_19 (16)
81
82/* 26 Mbps HT-OFDM, 16QAM coding rate 1/2 */
83#define WSM_TRANSMIT_RATE_HT_26 (17)
84
85/* 39 Mbps HT-OFDM, 16QAM coding rate 3/4 */
86#define WSM_TRANSMIT_RATE_HT_39 (18)
87
88/* 52 Mbps HT-OFDM, 64QAM coding rate 2/3 */
89#define WSM_TRANSMIT_RATE_HT_52 (19)
90
91/* 58.5 Mbps HT-OFDM, 64QAM coding rate 3/4 */
92#define WSM_TRANSMIT_RATE_HT_58 (20)
93
94/* 65 Mbps HT-OFDM, 64QAM coding rate 5/6 */
95#define WSM_TRANSMIT_RATE_HT_65 (21)
96
97/* Scan types */
98/* Foreground scan */
99#define WSM_SCAN_TYPE_FOREGROUND (0)
100
101/* Background scan */
102#define WSM_SCAN_TYPE_BACKGROUND (1)
103
104/* Auto scan */
105#define WSM_SCAN_TYPE_AUTO (2)
106
107/* Scan flags */
108/* Forced background scan means if the station cannot */
109/* enter the power-save mode, it shall force to perform a */
110/* background scan. Only valid when ScanType is */
111/* background scan. */
112#define WSM_SCAN_FLAG_FORCE_BACKGROUND (BIT(0))
113
114/* The WLAN device scans one channel at a time so */
115/* that disturbance to the data traffic is minimized. */
116#define WSM_SCAN_FLAG_SPLIT_METHOD (BIT(1))
117
118/* Preamble Type. Long if not set. */
119#define WSM_SCAN_FLAG_SHORT_PREAMBLE (BIT(2))
120
121/* 11n Tx Mode. Mixed if not set. */
122#define WSM_SCAN_FLAG_11N_GREENFIELD (BIT(3))
123
124/* Scan constraints */
125/* Maximum number of channels to be scanned. */
126#define WSM_SCAN_MAX_NUM_OF_CHANNELS (48)
127
128/* The maximum number of SSIDs that the device can scan for. */
129#define WSM_SCAN_MAX_NUM_OF_SSIDS (2)
130
131/* Power management modes */
132/* 802.11 Active mode */
133#define WSM_PSM_ACTIVE (0)
134
135/* 802.11 PS mode */
136#define WSM_PSM_PS BIT(0)
137
138/* Fast Power Save bit */
139#define WSM_PSM_FAST_PS_FLAG BIT(7)
140
141/* Dynamic aka Fast power save */
142#define WSM_PSM_FAST_PS (BIT(0) | BIT(7))
143
144/* Undetermined */
145/* Note : Undetermined status is reported when the */
146/* NULL data frame used to advertise the PM mode to */
147/* the AP at Pre or Post Background Scan is not Acknowledged */
148#define WSM_PSM_UNKNOWN BIT(1)
149
150/* Queue IDs */
151/* best effort/legacy */
152#define WSM_QUEUE_BEST_EFFORT (0)
153
154/* background */
155#define WSM_QUEUE_BACKGROUND (1)
156
157/* video */
158#define WSM_QUEUE_VIDEO (2)
159
160/* voice */
161#define WSM_QUEUE_VOICE (3)
162
163/* HT TX parameters */
164/* Non-HT */
165#define WSM_HT_TX_NON_HT (0)
166
167/* Mixed format */
168#define WSM_HT_TX_MIXED (1)
169
170/* Greenfield format */
171#define WSM_HT_TX_GREENFIELD (2)
172
173/* STBC allowed */
174#define WSM_HT_TX_STBC (BIT(7))
175
176/* EPTA prioirty flags for BT Coex */
177/* default epta priority */
178#define WSM_EPTA_PRIORITY_DEFAULT 4
179/* use for normal data */
180#define WSM_EPTA_PRIORITY_DATA 4
181/* use for connect/disconnect/roaming*/
182#define WSM_EPTA_PRIORITY_MGT 5
183/* use for action frames */
184#define WSM_EPTA_PRIORITY_ACTION 5
185/* use for AC_VI data */
186#define WSM_EPTA_PRIORITY_VIDEO 5
187/* use for AC_VO data */
188#define WSM_EPTA_PRIORITY_VOICE 6
189/* use for EAPOL exchange */
190#define WSM_EPTA_PRIORITY_EAPOL 7
191
192/* TX status */
193/* Frame was sent aggregated */
194/* Only valid for WSM_SUCCESS status. */
195#define WSM_TX_STATUS_AGGREGATION (BIT(0))
196
197/* Host should requeue this frame later. */
198/* Valid only when status is WSM_REQUEUE. */
199#define WSM_TX_STATUS_REQUEUE (BIT(1))
200
201/* Normal Ack */
202#define WSM_TX_STATUS_NORMAL_ACK (0<<2)
203
204/* No Ack */
205#define WSM_TX_STATUS_NO_ACK (1<<2)
206
207/* No explicit acknowledgement */
208#define WSM_TX_STATUS_NO_EXPLICIT_ACK (2<<2)
209
210/* Block Ack */
211/* Only valid for WSM_SUCCESS status. */
212#define WSM_TX_STATUS_BLOCK_ACK (3<<2)
213
214/* RX status */
215/* Unencrypted */
216#define WSM_RX_STATUS_UNENCRYPTED (0<<0)
217
218/* WEP */
219#define WSM_RX_STATUS_WEP (1<<0)
220
221/* TKIP */
222#define WSM_RX_STATUS_TKIP (2<<0)
223
224/* AES */
225#define WSM_RX_STATUS_AES (3<<0)
226
227/* WAPI */
228#define WSM_RX_STATUS_WAPI (4<<0)
229
230/* Macro to fetch encryption subfield. */
231#define WSM_RX_STATUS_ENCRYPTION(status) ((status) & 0x07)
232
233/* Frame was part of an aggregation */
234#define WSM_RX_STATUS_AGGREGATE (BIT(3))
235
236/* Frame was first in the aggregation */
237#define WSM_RX_STATUS_AGGREGATE_FIRST (BIT(4))
238
239/* Frame was last in the aggregation */
240#define WSM_RX_STATUS_AGGREGATE_LAST (BIT(5))
241
242/* Indicates a defragmented frame */
243#define WSM_RX_STATUS_DEFRAGMENTED (BIT(6))
244
245/* Indicates a Beacon frame */
246#define WSM_RX_STATUS_BEACON (BIT(7))
247
248/* Indicates STA bit beacon TIM field */
249#define WSM_RX_STATUS_TIM (BIT(8))
250
251/* Indicates Beacon frame's virtual bitmap contains multicast bit */
252#define WSM_RX_STATUS_MULTICAST (BIT(9))
253
254/* Indicates frame contains a matching SSID */
255#define WSM_RX_STATUS_MATCHING_SSID (BIT(10))
256
257/* Indicates frame contains a matching BSSI */
258#define WSM_RX_STATUS_MATCHING_BSSI (BIT(11))
259
260/* Indicates More bit set in Framectl field */
261#define WSM_RX_STATUS_MORE_DATA (BIT(12))
262
263/* Indicates frame received during a measurement process */
264#define WSM_RX_STATUS_MEASUREMENT (BIT(13))
265
266/* Indicates frame received as an HT packet */
267#define WSM_RX_STATUS_HT (BIT(14))
268
269/* Indicates frame received with STBC */
270#define WSM_RX_STATUS_STBC (BIT(15))
271
272/* Indicates Address 1 field matches dot11StationId */
273#define WSM_RX_STATUS_ADDRESS1 (BIT(16))
274
275/* Indicates Group address present in the Address 1 field */
276#define WSM_RX_STATUS_GROUP (BIT(17))
277
278/* Indicates Broadcast address present in the Address 1 field */
279#define WSM_RX_STATUS_BROADCAST (BIT(18))
280
281/* Indicates group key used with encrypted frames */
282#define WSM_RX_STATUS_GROUP_KEY (BIT(19))
283
284/* Macro to fetch encryption key index. */
285#define WSM_RX_STATUS_KEY_IDX(status) (((status >> 20)) & 0x0F)
286
287/* Indicates TSF inclusion after 802.11 frame body */
288#define WSM_RX_STATUS_TSF_INCLUDED (BIT(24))
289
290/* Frame Control field starts at Frame offset + 2 */
291#define WSM_TX_2BYTES_SHIFT (BIT(7))
292
293/* Join mode */
294/* IBSS */
295#define WSM_JOIN_MODE_IBSS (0)
296
297/* BSS */
298#define WSM_JOIN_MODE_BSS (1)
299
300/* PLCP preamble type */
301/* For long preamble */
302#define WSM_JOIN_PREAMBLE_LONG (0)
303
304/* For short preamble (Long for 1Mbps) */
305#define WSM_JOIN_PREAMBLE_SHORT (1)
306
307/* For short preamble (Long for 1 and 2Mbps) */
308#define WSM_JOIN_PREAMBLE_SHORT_2 (2)
309
310/* Join flags */
311/* Unsynchronized */
312#define WSM_JOIN_FLAGS_UNSYNCRONIZED BIT(0)
313/* The BSS owner is a P2P GO */
314#define WSM_JOIN_FLAGS_P2P_GO BIT(1)
315/* Force to join BSS with the BSSID and the
316 * SSID specified without waiting for beacons. The
317 * ProbeForJoin parameter is ignored.
318 */
319#define WSM_JOIN_FLAGS_FORCE BIT(2)
320/* Give probe request/response higher
321 * priority over the BT traffic
322 */
323#define WSM_JOIN_FLAGS_PRIO BIT(3)
324/* Issue immediate join confirmation and use
325 * join complete to notify about completion
326 */
327#define WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND BIT(5)
328
329/* Key types */
330#define WSM_KEY_TYPE_WEP_DEFAULT (0)
331#define WSM_KEY_TYPE_WEP_PAIRWISE (1)
332#define WSM_KEY_TYPE_TKIP_GROUP (2)
333#define WSM_KEY_TYPE_TKIP_PAIRWISE (3)
334#define WSM_KEY_TYPE_AES_GROUP (4)
335#define WSM_KEY_TYPE_AES_PAIRWISE (5)
336#define WSM_KEY_TYPE_WAPI_GROUP (6)
337#define WSM_KEY_TYPE_WAPI_PAIRWISE (7)
338
339/* Key indexes */
340#define WSM_KEY_MAX_INDEX (10)
341
342/* ACK policy */
343#define WSM_ACK_POLICY_NORMAL (0)
344#define WSM_ACK_POLICY_NO_ACK (1)
345
346/* Start modes */
347#define WSM_START_MODE_AP (0) /* Mini AP */
348#define WSM_START_MODE_P2P_GO (1) /* P2P GO */
349#define WSM_START_MODE_P2P_DEV (2) /* P2P device */
350
351/* SetAssociationMode MIB flags */
352#define WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE (BIT(0))
353#define WSM_ASSOCIATION_MODE_USE_HT_MODE (BIT(1))
354#define WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET (BIT(2))
355#define WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING (BIT(3))
356#define WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES (BIT(4))
357
358/* RcpiRssiThreshold MIB flags */
359#define WSM_RCPI_RSSI_THRESHOLD_ENABLE (BIT(0))
360#define WSM_RCPI_RSSI_USE_RSSI (BIT(1))
361#define WSM_RCPI_RSSI_DONT_USE_UPPER (BIT(2))
362#define WSM_RCPI_RSSI_DONT_USE_LOWER (BIT(3))
363
364/* Update-ie constants */
365#define WSM_UPDATE_IE_BEACON (BIT(0))
366#define WSM_UPDATE_IE_PROBE_RESP (BIT(1))
367#define WSM_UPDATE_IE_PROBE_REQ (BIT(2))
368
369/* WSM events */
370/* Error */
371#define WSM_EVENT_ERROR (0)
372
373/* BSS lost */
374#define WSM_EVENT_BSS_LOST (1)
375
376/* BSS regained */
377#define WSM_EVENT_BSS_REGAINED (2)
378
379/* Radar detected */
380#define WSM_EVENT_RADAR_DETECTED (3)
381
382/* RCPI or RSSI threshold triggered */
383#define WSM_EVENT_RCPI_RSSI (4)
384
385/* BT inactive */
386#define WSM_EVENT_BT_INACTIVE (5)
387
388/* BT active */
389#define WSM_EVENT_BT_ACTIVE (6)
390
391/* MIB IDs */
392/* 4.1 dot11StationId */
393#define WSM_MIB_ID_DOT11_STATION_ID 0x0000
394
395/* 4.2 dot11MaxtransmitMsduLifeTime */
396#define WSM_MIB_ID_DOT11_MAX_TRANSMIT_LIFTIME 0x0001
397
398/* 4.3 dot11MaxReceiveLifeTime */
399#define WSM_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME 0x0002
400
401/* 4.4 dot11SlotTime */
402#define WSM_MIB_ID_DOT11_SLOT_TIME 0x0003
403
404/* 4.5 dot11GroupAddressesTable */
405#define WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE 0x0004
406#define WSM_MAX_GRP_ADDRTABLE_ENTRIES 8
407
408/* 4.6 dot11WepDefaultKeyId */
409#define WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID 0x0005
410
411/* 4.7 dot11CurrentTxPowerLevel */
412#define WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL 0x0006
413
414/* 4.8 dot11RTSThreshold */
415#define WSM_MIB_ID_DOT11_RTS_THRESHOLD 0x0007
416
417/* 4.9 NonErpProtection */
418#define WSM_MIB_ID_NON_ERP_PROTECTION 0x1000
419
420/* 4.10 ArpIpAddressesTable */
421#define WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE 0x1001
422#define WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES 1
423
424/* 4.11 TemplateFrame */
425#define WSM_MIB_ID_TEMPLATE_FRAME 0x1002
426
427/* 4.12 RxFilter */
428#define WSM_MIB_ID_RX_FILTER 0x1003
429
430/* 4.13 BeaconFilterTable */
431#define WSM_MIB_ID_BEACON_FILTER_TABLE 0x1004
432
433/* 4.14 BeaconFilterEnable */
434#define WSM_MIB_ID_BEACON_FILTER_ENABLE 0x1005
435
436/* 4.15 OperationalPowerMode */
437#define WSM_MIB_ID_OPERATIONAL_POWER_MODE 0x1006
438
439/* 4.16 BeaconWakeUpPeriod */
440#define WSM_MIB_ID_BEACON_WAKEUP_PERIOD 0x1007
441
442/* 4.17 RcpiRssiThreshold */
443#define WSM_MIB_ID_RCPI_RSSI_THRESHOLD 0x1009
444
445/* 4.18 StatisticsTable */
446#define WSM_MIB_ID_STATISTICS_TABLE 0x100A
447
448/* 4.19 IbssPsConfig */
449#define WSM_MIB_ID_IBSS_PS_CONFIG 0x100B
450
451/* 4.20 CountersTable */
452#define WSM_MIB_ID_COUNTERS_TABLE 0x100C
453
454/* 4.21 BlockAckPolicy */
455#define WSM_MIB_ID_BLOCK_ACK_POLICY 0x100E
456
457/* 4.22 OverrideInternalTxRate */
458#define WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE 0x100F
459
460/* 4.23 SetAssociationMode */
461#define WSM_MIB_ID_SET_ASSOCIATION_MODE 0x1010
462
463/* 4.24 UpdateEptaConfigData */
464#define WSM_MIB_ID_UPDATE_EPTA_CONFIG_DATA 0x1011
465
466/* 4.25 SelectCcaMethod */
467#define WSM_MIB_ID_SELECT_CCA_METHOD 0x1012
468
469/* 4.26 SetUpasdInformation */
470#define WSM_MIB_ID_SET_UAPSD_INFORMATION 0x1013
471
472/* 4.27 SetAutoCalibrationMode WBF00004073 */
473#define WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE 0x1015
474
475/* 4.28 SetTxRateRetryPolicy */
476#define WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY 0x1016
477
478/* 4.29 SetHostMessageTypeFilter */
479#define WSM_MIB_ID_SET_HOST_MSG_TYPE_FILTER 0x1017
480
481/* 4.30 P2PFindInfo */
482#define WSM_MIB_ID_P2P_FIND_INFO 0x1018
483
484/* 4.31 P2PPsModeInfo */
485#define WSM_MIB_ID_P2P_PS_MODE_INFO 0x1019
486
487/* 4.32 SetEtherTypeDataFrameFilter */
488#define WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER 0x101A
489
490/* 4.33 SetUDPPortDataFrameFilter */
491#define WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER 0x101B
492
493/* 4.34 SetMagicDataFrameFilter */
494#define WSM_MIB_ID_SET_MAGIC_DATAFRAME_FILTER 0x101C
495
496/* 4.35 P2PDeviceInfo */
497#define WSM_MIB_ID_P2P_DEVICE_INFO 0x101D
498
499/* 4.36 SetWCDMABand */
500#define WSM_MIB_ID_SET_WCDMA_BAND 0x101E
501
502/* 4.37 GroupTxSequenceCounter */
503#define WSM_MIB_ID_GRP_SEQ_COUNTER 0x101F
504
505/* 4.38 ProtectedMgmtPolicy */
506#define WSM_MIB_ID_PROTECTED_MGMT_POLICY 0x1020
507
508/* 4.39 SetHtProtection */
509#define WSM_MIB_ID_SET_HT_PROTECTION 0x1021
510
511/* 4.40 GPIO Command */
512#define WSM_MIB_ID_GPIO_COMMAND 0x1022
513
514/* 4.41 TSF Counter Value */
515#define WSM_MIB_ID_TSF_COUNTER 0x1023
516
517/* Test Purposes Only */
518#define WSM_MIB_ID_BLOCK_ACK_INFO 0x100D
519
520/* 4.42 UseMultiTxConfMessage */
521#define WSM_MIB_USE_MULTI_TX_CONF 0x1024
522
523/* 4.43 Keep-alive period */
524#define WSM_MIB_ID_KEEP_ALIVE_PERIOD 0x1025
525
526/* 4.44 Disable BSSID filter */
527#define WSM_MIB_ID_DISABLE_BSSID_FILTER 0x1026
528
529/* Frame template types */
530#define WSM_FRAME_TYPE_PROBE_REQUEST (0)
531#define WSM_FRAME_TYPE_BEACON (1)
532#define WSM_FRAME_TYPE_NULL (2)
533#define WSM_FRAME_TYPE_QOS_NULL (3)
534#define WSM_FRAME_TYPE_PS_POLL (4)
535#define WSM_FRAME_TYPE_PROBE_RESPONSE (5)
536
537#define WSM_FRAME_GREENFIELD (0x80) /* See 4.11 */
538
539/* Status */
540/* The WSM firmware has completed a request */
541/* successfully. */
542#define WSM_STATUS_SUCCESS (0)
543
544/* This is a generic failure code if other error codes do */
545/* not apply. */
546#define WSM_STATUS_FAILURE (1)
547
548/* A request contains one or more invalid parameters. */
549#define WSM_INVALID_PARAMETER (2)
550
551/* The request cannot perform because the device is in */
552/* an inappropriate mode. */
553#define WSM_ACCESS_DENIED (3)
554
555/* The frame received includes a decryption error. */
556#define WSM_STATUS_DECRYPTFAILURE (4)
557
558/* A MIC failure is detected in the received packets. */
559#define WSM_STATUS_MICFAILURE (5)
560
561/* The transmit request failed due to retry limit being */
562/* exceeded. */
563#define WSM_STATUS_RETRY_EXCEEDED (6)
564
565/* The transmit request failed due to MSDU life time */
566/* being exceeded. */
567#define WSM_STATUS_TX_LIFETIME_EXCEEDED (7)
568
569/* The link to the AP is lost. */
570#define WSM_STATUS_LINK_LOST (8)
571
572/* No key was found for the encrypted frame */
573#define WSM_STATUS_NO_KEY_FOUND (9)
574
575/* Jammer was detected when transmitting this frame */
576#define WSM_STATUS_JAMMER_DETECTED (10)
577
578/* The message should be requeued later. */
579/* This is applicable only to Transmit */
580#define WSM_REQUEUE (11)
581
582/* Advanced filtering options */
583#define WSM_MAX_FILTER_ELEMENTS (4)
584
585#define WSM_FILTER_ACTION_IGNORE (0)
586#define WSM_FILTER_ACTION_FILTER_IN (1)
587#define WSM_FILTER_ACTION_FILTER_OUT (2)
588
589#define WSM_FILTER_PORT_TYPE_DST (0)
590#define WSM_FILTER_PORT_TYPE_SRC (1)
591
592/* Actual header of WSM messages */
593struct wsm_hdr {
594 __le16 len;
595 __le16 id;
596};
597
598#define WSM_TX_SEQ_MAX (7)
599#define WSM_TX_SEQ(seq) \
600 ((seq & WSM_TX_SEQ_MAX) << 13)
601#define WSM_TX_LINK_ID_MAX (0x0F)
602#define WSM_TX_LINK_ID(link_id) \
603 ((link_id & WSM_TX_LINK_ID_MAX) << 6)
604
605#define MAX_BEACON_SKIP_TIME_MS 1000
606
607#define WSM_CMD_LAST_CHANCE_TIMEOUT (HZ * 3 / 2)
608
609/* ******************************************************************** */
610/* WSM capability */
611
612#define WSM_STARTUP_IND_ID 0x0801
613
614struct wsm_startup_ind {
615 u16 input_buffers;
616 u16 input_buffer_size;
617 u16 status;
618 u16 hw_id;
619 u16 hw_subid;
620 u16 fw_cap;
621 u16 fw_type;
622 u16 fw_api;
623 u16 fw_build;
624 u16 fw_ver;
625 char fw_label[128];
626 u32 config[4];
627};
628
629/* ******************************************************************** */
630/* WSM commands */
631
632/* 3.1 */
633#define WSM_CONFIGURATION_REQ_ID 0x0009
634#define WSM_CONFIGURATION_RESP_ID 0x0409
635
636struct wsm_tx_power_range {
637 int min_power_level;
638 int max_power_level;
639 u32 stepping;
640};
641
642struct wsm_configuration {
643 /* [in] */ u32 dot11MaxTransmitMsduLifeTime;
644 /* [in] */ u32 dot11MaxReceiveLifeTime;
645 /* [in] */ u32 dot11RtsThreshold;
646 /* [in, out] */ u8 *dot11StationId;
647 /* [in] */ const void *dpdData;
648 /* [in] */ size_t dpdData_size;
649 /* [out] */ u8 dot11FrequencyBandsSupported;
650 /* [out] */ u32 supportedRateMask;
651 /* [out] */ struct wsm_tx_power_range txPowerRange[2];
652};
653
654int wsm_configuration(struct cw1200_common *priv,
655 struct wsm_configuration *arg);
656
657/* 3.3 */
658#define WSM_RESET_REQ_ID 0x000A
659#define WSM_RESET_RESP_ID 0x040A
660struct wsm_reset {
661 /* [in] */ int link_id;
662 /* [in] */ bool reset_statistics;
663};
664
665int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg);
666
667/* 3.5 */
668#define WSM_READ_MIB_REQ_ID 0x0005
669#define WSM_READ_MIB_RESP_ID 0x0405
670int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *buf,
671 size_t buf_size);
672
673/* 3.7 */
674#define WSM_WRITE_MIB_REQ_ID 0x0006
675#define WSM_WRITE_MIB_RESP_ID 0x0406
676int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *buf,
677 size_t buf_size);
678
679/* 3.9 */
680#define WSM_START_SCAN_REQ_ID 0x0007
681#define WSM_START_SCAN_RESP_ID 0x0407
682
683struct wsm_ssid {
684 u8 ssid[32];
685 u32 length;
686};
687
688struct wsm_scan_ch {
689 u16 number;
690 u32 min_chan_time;
691 u32 max_chan_time;
692 u32 tx_power_level;
693};
694
695struct wsm_scan {
696 /* WSM_PHY_BAND_... */
697 u8 band;
698
699 /* WSM_SCAN_TYPE_... */
700 u8 type;
701
702 /* WSM_SCAN_FLAG_... */
703 u8 flags;
704
705 /* WSM_TRANSMIT_RATE_... */
706 u8 max_tx_rate;
707
708 /* Interval period in TUs that the device shall the re- */
709 /* execute the requested scan. Max value supported by the device */
710 /* is 256s. */
711 u32 auto_scan_interval;
712
713 /* Number of probe requests (per SSID) sent to one (1) */
714 /* channel. Zero (0) means that none is send, which */
715 /* means that a passive scan is to be done. Value */
716 /* greater than zero (0) means that an active scan is to */
717 /* be done. */
718 u32 num_probes;
719
720 /* Number of channels to be scanned. */
721 /* Maximum value is WSM_SCAN_MAX_NUM_OF_CHANNELS. */
722 u8 num_channels;
723
724 /* Number of SSID provided in the scan command (this */
725 /* is zero (0) in broadcast scan) */
726 /* The maximum number of SSIDs is WSM_SCAN_MAX_NUM_OF_SSIDS. */
727 u8 num_ssids;
728
729 /* The delay time (in microseconds) period */
730 /* before sending a probe-request. */
731 u8 probe_delay;
732
733 /* SSIDs to be scanned [numOfSSIDs]; */
734 struct wsm_ssid *ssids;
735
736 /* Channels to be scanned [numOfChannels]; */
737 struct wsm_scan_ch *ch;
738};
739
740int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg);
741
742/* 3.11 */
743#define WSM_STOP_SCAN_REQ_ID 0x0008
744#define WSM_STOP_SCAN_RESP_ID 0x0408
745int wsm_stop_scan(struct cw1200_common *priv);
746
747/* 3.13 */
748#define WSM_SCAN_COMPLETE_IND_ID 0x0806
749struct wsm_scan_complete {
750 /* WSM_STATUS_... */
751 u32 status;
752
753 /* WSM_PSM_... */
754 u8 psm;
755
756 /* Number of channels that the scan operation completed. */
757 u8 num_channels;
758};
759
760/* 3.14 */
761#define WSM_TX_CONFIRM_IND_ID 0x0404
762#define WSM_MULTI_TX_CONFIRM_ID 0x041E
763
764struct wsm_tx_confirm {
765 /* Packet identifier used in wsm_tx. */
766 u32 packet_id;
767
768 /* WSM_STATUS_... */
769 u32 status;
770
771 /* WSM_TRANSMIT_RATE_... */
772 u8 tx_rate;
773
774 /* The number of times the frame was transmitted */
775 /* without receiving an acknowledgement. */
776 u8 ack_failures;
777
778 /* WSM_TX_STATUS_... */
779 u16 flags;
780
781 /* The total time in microseconds that the frame spent in */
782 /* the WLAN device before transmission as completed. */
783 u32 media_delay;
784
785 /* The total time in microseconds that the frame spent in */
786 /* the WLAN device before transmission was started. */
787 u32 tx_queue_delay;
788};
789
790/* 3.15 */
791typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv,
792 struct wsm_tx_confirm *arg);
793
794/* Note that ideology of wsm_tx struct is different against the rest of
795 * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input
796 * argument for WSM call, but a prepared bytestream to be sent to firmware.
797 * It is filled partly in cw1200_tx, partly in low-level WSM code.
798 * Please pay attention once again: ideology is different.
799 *
800 * Legend:
801 * - [in]: cw1200_tx must fill this field.
802 * - [wsm]: the field is filled by low-level WSM.
803 */
804struct wsm_tx {
805 /* common WSM header */
806 struct wsm_hdr hdr;
807
808 /* Packet identifier that meant to be used in completion. */
809 u32 packet_id; /* Note this is actually a cookie */
810
811 /* WSM_TRANSMIT_RATE_... */
812 u8 max_tx_rate;
813
814 /* WSM_QUEUE_... */
815 u8 queue_id;
816
817 /* True: another packet is pending on the host for transmission. */
818 u8 more;
819
820 /* Bit 0 = 0 - Start expiry time from first Tx attempt (default) */
821 /* Bit 0 = 1 - Start expiry time from receipt of Tx Request */
822 /* Bits 3:1 - PTA Priority */
823 /* Bits 6:4 - Tx Rate Retry Policy */
824 /* Bit 7 - Reserved */
825 u8 flags;
826
827 /* Should be 0. */
828 u32 reserved;
829
830 /* The elapsed time in TUs, after the initial transmission */
831 /* of an MSDU, after which further attempts to transmit */
832 /* the MSDU shall be terminated. Overrides the global */
833 /* dot11MaxTransmitMsduLifeTime setting [optional] */
834 /* Device will set the default value if this is 0. */
835 u32 expire_time;
836
837 /* WSM_HT_TX_... */
838 __le32 ht_tx_parameters;
839} __packed;
840
841/* = sizeof(generic hi hdr) + sizeof(wsm hdr) + sizeof(alignment) */
842#define WSM_TX_EXTRA_HEADROOM (28)
843
844/* 3.16 */
845#define WSM_RECEIVE_IND_ID 0x0804
846
847struct wsm_rx {
848 /* WSM_STATUS_... */
849 u32 status;
850
851 /* Specifies the channel of the received packet. */
852 u16 channel_number;
853
854 /* WSM_TRANSMIT_RATE_... */
855 u8 rx_rate;
856
857 /* This value is expressed in signed Q8.0 format for */
858 /* RSSI and unsigned Q7.1 format for RCPI. */
859 u8 rcpi_rssi;
860
861 /* WSM_RX_STATUS_... */
862 u32 flags;
863};
864
865/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */
866#define WSM_RX_EXTRA_HEADROOM (16)
867
868typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg,
869 struct sk_buff **skb_p);
870
871/* 3.17 */
872struct wsm_event {
873 /* WSM_STATUS_... */
874 /* [out] */ u32 id;
875
876 /* Indication parameters. */
877 /* For error indication, this shall be a 32-bit WSM status. */
878 /* For RCPI or RSSI indication, this should be an 8-bit */
879 /* RCPI or RSSI value. */
880 /* [out] */ u32 data;
881};
882
883struct cw1200_wsm_event {
884 struct list_head link;
885 struct wsm_event evt;
886};
887
888/* 3.18 - 3.22 */
889/* Measurement. Skipped for now. Irrelevent. */
890
891typedef void (*wsm_event_cb) (struct cw1200_common *priv,
892 struct wsm_event *arg);
893
894/* 3.23 */
895#define WSM_JOIN_REQ_ID 0x000B
896#define WSM_JOIN_RESP_ID 0x040B
897
898struct wsm_join {
899 /* WSM_JOIN_MODE_... */
900 u8 mode;
901
902 /* WSM_PHY_BAND_... */
903 u8 band;
904
905 /* Specifies the channel number to join. The channel */
906 /* number will be mapped to an actual frequency */
907 /* according to the band */
908 u16 channel_number;
909
910 /* Specifies the BSSID of the BSS or IBSS to be joined */
911 /* or the IBSS to be started. */
912 u8 bssid[6];
913
914 /* ATIM window of IBSS */
915 /* When ATIM window is zero the initiated IBSS does */
916 /* not support power saving. */
917 u16 atim_window;
918
919 /* WSM_JOIN_PREAMBLE_... */
920 u8 preamble_type;
921
922 /* Specifies if a probe request should be send with the */
923 /* specified SSID when joining to the network. */
924 u8 probe_for_join;
925
926 /* DTIM Period (In multiples of beacon interval) */
927 u8 dtim_period;
928
929 /* WSM_JOIN_FLAGS_... */
930 u8 flags;
931
932 /* Length of the SSID */
933 u32 ssid_len;
934
935 /* Specifies the SSID of the IBSS to join or start */
936 u8 ssid[32];
937
938 /* Specifies the time between TBTTs in TUs */
939 u32 beacon_interval;
940
941 /* A bit mask that defines the BSS basic rate set. */
942 u32 basic_rate_set;
943};
944
945struct wsm_join_cnf {
946 u32 status;
947
948 /* Minimum transmission power level in units of 0.1dBm */
949 u32 min_power_level;
950
951 /* Maximum transmission power level in units of 0.1dBm */
952 u32 max_power_level;
953};
954
955int wsm_join(struct cw1200_common *priv, struct wsm_join *arg);
956
957/* 3.24 */
958struct wsm_join_complete {
959 /* WSM_STATUS_... */
960 u32 status;
961};
962
963/* 3.25 */
964#define WSM_SET_PM_REQ_ID 0x0010
965#define WSM_SET_PM_RESP_ID 0x0410
966struct wsm_set_pm {
967 /* WSM_PSM_... */
968 u8 mode;
969
970 /* in unit of 500us; 0 to use default */
971 u8 fast_psm_idle_period;
972
973 /* in unit of 500us; 0 to use default */
974 u8 ap_psm_change_period;
975
976 /* in unit of 500us; 0 to disable auto-pspoll */
977 u8 min_auto_pspoll_period;
978};
979
980int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg);
981
982/* 3.27 */
983struct wsm_set_pm_complete {
984 u8 psm; /* WSM_PSM_... */
985};
986
987/* 3.28 */
988#define WSM_SET_BSS_PARAMS_REQ_ID 0x0011
989#define WSM_SET_BSS_PARAMS_RESP_ID 0x0411
990struct wsm_set_bss_params {
991 /* This resets the beacon loss counters only */
992 u8 reset_beacon_loss;
993
994 /* The number of lost consecutive beacons after which */
995 /* the WLAN device should indicate the BSS-Lost event */
996 /* to the WLAN host driver. */
997 u8 beacon_lost_count;
998
999 /* The AID received during the association process. */
1000 u16 aid;
1001
1002 /* The operational rate set mask */
1003 u32 operational_rate_set;
1004};
1005
1006int wsm_set_bss_params(struct cw1200_common *priv,
1007 const struct wsm_set_bss_params *arg);
1008
1009/* 3.30 */
1010#define WSM_ADD_KEY_REQ_ID 0x000C
1011#define WSM_ADD_KEY_RESP_ID 0x040C
1012struct wsm_add_key {
1013 u8 type; /* WSM_KEY_TYPE_... */
1014 u8 index; /* Key entry index: 0 -- WSM_KEY_MAX_INDEX */
1015 u16 reserved;
1016 union {
1017 struct {
1018 u8 peer[6]; /* MAC address of the peer station */
1019 u8 reserved;
1020 u8 keylen; /* Key length in bytes */
1021 u8 keydata[16]; /* Key data */
1022 } __packed wep_pairwise;
1023 struct {
1024 u8 keyid; /* Unique per key identifier (0..3) */
1025 u8 keylen; /* Key length in bytes */
1026 u16 reserved;
1027 u8 keydata[16]; /* Key data */
1028 } __packed wep_group;
1029 struct {
1030 u8 peer[6]; /* MAC address of the peer station */
1031 u16 reserved;
1032 u8 keydata[16]; /* TKIP key data */
1033 u8 rx_mic_key[8]; /* Rx MIC key */
1034 u8 tx_mic_key[8]; /* Tx MIC key */
1035 } __packed tkip_pairwise;
1036 struct {
1037 u8 keydata[16]; /* TKIP key data */
1038 u8 rx_mic_key[8]; /* Rx MIC key */
1039 u8 keyid; /* Key ID */
1040 u8 reserved[3];
1041 u8 rx_seqnum[8]; /* Receive Sequence Counter */
1042 } __packed tkip_group;
1043 struct {
1044 u8 peer[6]; /* MAC address of the peer station */
1045 u16 reserved;
1046 u8 keydata[16]; /* AES key data */
1047 } __packed aes_pairwise;
1048 struct {
1049 u8 keydata[16]; /* AES key data */
1050 u8 keyid; /* Key ID */
1051 u8 reserved[3];
1052 u8 rx_seqnum[8]; /* Receive Sequence Counter */
1053 } __packed aes_group;
1054 struct {
1055 u8 peer[6]; /* MAC address of the peer station */
1056 u8 keyid; /* Key ID */
1057 u8 reserved;
1058 u8 keydata[16]; /* WAPI key data */
1059 u8 mic_key[16]; /* MIC key data */
1060 } __packed wapi_pairwise;
1061 struct {
1062 u8 keydata[16]; /* WAPI key data */
1063 u8 mic_key[16]; /* MIC key data */
1064 u8 keyid; /* Key ID */
1065 u8 reserved[3];
1066 } __packed wapi_group;
1067 } __packed;
1068} __packed;
1069
1070int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg);
1071
1072/* 3.32 */
1073#define WSM_REMOVE_KEY_REQ_ID 0x000D
1074#define WSM_REMOVE_KEY_RESP_ID 0x040D
1075struct wsm_remove_key {
1076 u8 index; /* Key entry index : 0-10 */
1077};
1078
1079int wsm_remove_key(struct cw1200_common *priv,
1080 const struct wsm_remove_key *arg);
1081
1082/* 3.34 */
1083struct wsm_set_tx_queue_params {
1084 /* WSM_ACK_POLICY_... */
1085 u8 ackPolicy;
1086
1087 /* Medium Time of TSPEC (in 32us units) allowed per */
1088 /* One Second Averaging Period for this queue. */
1089 u16 allowedMediumTime;
1090
1091 /* dot11MaxTransmitMsduLifetime to be used for the */
1092 /* specified queue. */
1093 u32 maxTransmitLifetime;
1094};
1095
1096struct wsm_tx_queue_params {
1097 /* NOTE: index is a linux queue id. */
1098 struct wsm_set_tx_queue_params params[4];
1099};
1100
1101
1102#define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\
1103 max_life_time) \
1104do { \
1105 struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \
1106 p->ackPolicy = (ack_policy); \
1107 p->allowedMediumTime = (allowed_time); \
1108 p->maxTransmitLifetime = (max_life_time); \
1109} while (0)
1110
1111int wsm_set_tx_queue_params(struct cw1200_common *priv,
1112 const struct wsm_set_tx_queue_params *arg, u8 id);
1113
1114/* 3.36 */
1115#define WSM_EDCA_PARAMS_REQ_ID 0x0013
1116#define WSM_EDCA_PARAMS_RESP_ID 0x0413
1117struct wsm_edca_queue_params {
1118 /* CWmin (in slots) for the access class. */
1119 u16 cwmin;
1120
1121 /* CWmax (in slots) for the access class. */
1122 u16 cwmax;
1123
1124 /* AIFS (in slots) for the access class. */
1125 u16 aifns;
1126
1127 /* TX OP Limit (in microseconds) for the access class. */
1128 u16 txop_limit;
1129
1130 /* dot11MaxReceiveLifetime to be used for the specified */
1131 /* the access class. Overrides the global */
1132 /* dot11MaxReceiveLifetime value */
1133 u32 max_rx_lifetime;
1134};
1135
1136struct wsm_edca_params {
1137 /* NOTE: index is a linux queue id. */
1138 struct wsm_edca_queue_params params[4];
1139 bool uapsd_enable[4];
1140};
1141
1142#define TXOP_UNIT 32
1143#define WSM_EDCA_SET(__edca, __queue, __aifs, __cw_min, __cw_max, __txop, __lifetime,\
1144 __uapsd) \
1145 do { \
1146 struct wsm_edca_queue_params *p = &(__edca)->params[__queue]; \
1147 p->cwmin = __cw_min; \
1148 p->cwmax = __cw_max; \
1149 p->aifns = __aifs; \
1150 p->txop_limit = ((__txop) * TXOP_UNIT); \
1151 p->max_rx_lifetime = __lifetime; \
1152 (__edca)->uapsd_enable[__queue] = (__uapsd); \
1153 } while (0)
1154
1155int wsm_set_edca_params(struct cw1200_common *priv,
1156 const struct wsm_edca_params *arg);
1157
1158int wsm_set_uapsd_param(struct cw1200_common *priv,
1159 const struct wsm_edca_params *arg);
1160
1161/* 3.38 */
1162/* Set-System info. Skipped for now. Irrelevent. */
1163
1164/* 3.40 */
1165#define WSM_SWITCH_CHANNEL_REQ_ID 0x0016
1166#define WSM_SWITCH_CHANNEL_RESP_ID 0x0416
1167
1168struct wsm_switch_channel {
1169 /* 1 - means the STA shall not transmit any further */
1170 /* frames until the channel switch has completed */
1171 u8 mode;
1172
1173 /* Number of TBTTs until channel switch occurs. */
1174 /* 0 - indicates switch shall occur at any time */
1175 /* 1 - occurs immediately before the next TBTT */
1176 u8 switch_count;
1177
1178 /* The new channel number to switch to. */
1179 /* Note this is defined as per section 2.7. */
1180 u16 channel_number;
1181};
1182
1183int wsm_switch_channel(struct cw1200_common *priv,
1184 const struct wsm_switch_channel *arg);
1185
1186typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv);
1187
1188#define WSM_START_REQ_ID 0x0017
1189#define WSM_START_RESP_ID 0x0417
1190
1191struct wsm_start {
1192 /* WSM_START_MODE_... */
1193 /* [in] */ u8 mode;
1194
1195 /* WSM_PHY_BAND_... */
1196 /* [in] */ u8 band;
1197
1198 /* Channel number */
1199 /* [in] */ u16 channel_number;
1200
1201 /* Client Traffic window in units of TU */
1202 /* Valid only when mode == ..._P2P */
1203 /* [in] */ u32 ct_window;
1204
1205 /* Interval between two consecutive */
1206 /* beacon transmissions in TU. */
1207 /* [in] */ u32 beacon_interval;
1208
1209 /* DTIM period in terms of beacon intervals */
1210 /* [in] */ u8 dtim_period;
1211
1212 /* WSM_JOIN_PREAMBLE_... */
1213 /* [in] */ u8 preamble;
1214
1215 /* The delay time (in microseconds) period */
1216 /* before sending a probe-request. */
1217 /* [in] */ u8 probe_delay;
1218
1219 /* Length of the SSID */
1220 /* [in] */ u8 ssid_len;
1221
1222 /* SSID of the BSS or P2P_GO to be started now. */
1223 /* [in] */ u8 ssid[32];
1224
1225 /* The basic supported rates for the MiniAP. */
1226 /* [in] */ u32 basic_rate_set;
1227};
1228
1229int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg);
1230
1231#define WSM_BEACON_TRANSMIT_REQ_ID 0x0018
1232#define WSM_BEACON_TRANSMIT_RESP_ID 0x0418
1233
1234struct wsm_beacon_transmit {
1235 /* 1: enable; 0: disable */
1236 /* [in] */ u8 enable_beaconing;
1237};
1238
1239int wsm_beacon_transmit(struct cw1200_common *priv,
1240 const struct wsm_beacon_transmit *arg);
1241
1242int wsm_start_find(struct cw1200_common *priv);
1243
1244int wsm_stop_find(struct cw1200_common *priv);
1245
1246typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status);
1247
1248struct wsm_suspend_resume {
1249 /* See 3.52 */
1250 /* Link ID */
1251 /* [out] */ int link_id;
1252 /* Stop sending further Tx requests down to device for this link */
1253 /* [out] */ bool stop;
1254 /* Transmit multicast Frames */
1255 /* [out] */ bool multicast;
1256 /* The AC on which Tx to be suspended /resumed. */
1257 /* This is applicable only for U-APSD */
1258 /* WSM_QUEUE_... */
1259 /* [out] */ int queue;
1260};
1261
1262typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv,
1263 struct wsm_suspend_resume *arg);
1264
1265/* 3.54 Update-IE request. */
1266struct wsm_update_ie {
1267 /* WSM_UPDATE_IE_... */
1268 /* [in] */ u16 what;
1269 /* [in] */ u16 count;
1270 /* [in] */ u8 *ies;
1271 /* [in] */ size_t length;
1272};
1273
1274int wsm_update_ie(struct cw1200_common *priv,
1275 const struct wsm_update_ie *arg);
1276
1277/* 3.56 */
1278struct wsm_map_link {
1279 /* MAC address of the remote device */
1280 /* [in] */ u8 mac_addr[6];
1281 /* [in] */ u8 link_id;
1282};
1283
1284int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg);
1285
1286/* ******************************************************************** */
1287/* MIB shortcats */
1288
1289static inline int wsm_set_output_power(struct cw1200_common *priv,
1290 int power_level)
1291{
1292 __le32 val = __cpu_to_le32(power_level);
1293 return wsm_write_mib(priv, WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL,
1294 &val, sizeof(val));
1295}
1296
1297static inline int wsm_set_beacon_wakeup_period(struct cw1200_common *priv,
1298 unsigned dtim_interval,
1299 unsigned listen_interval)
1300{
1301 struct {
1302 u8 numBeaconPeriods;
1303 u8 reserved;
1304 __le16 listenInterval;
1305 } val = {
1306 dtim_interval, 0, __cpu_to_le16(listen_interval)
1307 };
1308
1309 if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
1310 return -EINVAL;
1311 else
1312 return wsm_write_mib(priv, WSM_MIB_ID_BEACON_WAKEUP_PERIOD,
1313 &val, sizeof(val));
1314}
1315
1316struct wsm_rcpi_rssi_threshold {
1317 u8 rssiRcpiMode; /* WSM_RCPI_RSSI_... */
1318 u8 lowerThreshold;
1319 u8 upperThreshold;
1320 u8 rollingAverageCount;
1321};
1322
1323static inline int wsm_set_rcpi_rssi_threshold(struct cw1200_common *priv,
1324 struct wsm_rcpi_rssi_threshold *arg)
1325{
1326 return wsm_write_mib(priv, WSM_MIB_ID_RCPI_RSSI_THRESHOLD, arg,
1327 sizeof(*arg));
1328}
1329
1330struct wsm_mib_counters_table {
1331 __le32 plcp_errors;
1332 __le32 fcs_errors;
1333 __le32 tx_packets;
1334 __le32 rx_packets;
1335 __le32 rx_packet_errors;
1336 __le32 rx_decryption_failures;
1337 __le32 rx_mic_failures;
1338 __le32 rx_no_key_failures;
1339 __le32 tx_multicast_frames;
1340 __le32 tx_frames_success;
1341 __le32 tx_frame_failures;
1342 __le32 tx_frames_retried;
1343 __le32 tx_frames_multi_retried;
1344 __le32 rx_frame_duplicates;
1345 __le32 rts_success;
1346 __le32 rts_failures;
1347 __le32 ack_failures;
1348 __le32 rx_multicast_frames;
1349 __le32 rx_frames_success;
1350 __le32 rx_cmac_icv_errors;
1351 __le32 rx_cmac_replays;
1352 __le32 rx_mgmt_ccmp_replays;
1353} __packed;
1354
1355static inline int wsm_get_counters_table(struct cw1200_common *priv,
1356 struct wsm_mib_counters_table *arg)
1357{
1358 return wsm_read_mib(priv, WSM_MIB_ID_COUNTERS_TABLE,
1359 arg, sizeof(*arg));
1360}
1361
1362static inline int wsm_get_station_id(struct cw1200_common *priv, u8 *mac)
1363{
1364 return wsm_read_mib(priv, WSM_MIB_ID_DOT11_STATION_ID, mac, ETH_ALEN);
1365}
1366
1367struct wsm_rx_filter {
1368 bool promiscuous;
1369 bool bssid;
1370 bool fcs;
1371 bool probeResponder;
1372};
1373
1374static inline int wsm_set_rx_filter(struct cw1200_common *priv,
1375 const struct wsm_rx_filter *arg)
1376{
1377 __le32 val = 0;
1378 if (arg->promiscuous)
1379 val |= __cpu_to_le32(BIT(0));
1380 if (arg->bssid)
1381 val |= __cpu_to_le32(BIT(1));
1382 if (arg->fcs)
1383 val |= __cpu_to_le32(BIT(2));
1384 if (arg->probeResponder)
1385 val |= __cpu_to_le32(BIT(3));
1386 return wsm_write_mib(priv, WSM_MIB_ID_RX_FILTER, &val, sizeof(val));
1387}
1388
1389int wsm_set_probe_responder(struct cw1200_common *priv, bool enable);
1390
1391#define WSM_BEACON_FILTER_IE_HAS_CHANGED BIT(0)
1392#define WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT BIT(1)
1393#define WSM_BEACON_FILTER_IE_HAS_APPEARED BIT(2)
1394
1395struct wsm_beacon_filter_table_entry {
1396 u8 ie_id;
1397 u8 flags;
1398 u8 oui[3];
1399 u8 match_data[3];
1400} __packed;
1401
1402struct wsm_mib_beacon_filter_table {
1403 __le32 num;
1404 struct wsm_beacon_filter_table_entry entry[10];
1405} __packed;
1406
1407static inline int wsm_set_beacon_filter_table(struct cw1200_common *priv,
1408 struct wsm_mib_beacon_filter_table *ft)
1409{
1410 size_t size = __le32_to_cpu(ft->num) *
1411 sizeof(struct wsm_beacon_filter_table_entry) +
1412 sizeof(__le32);
1413
1414 return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_TABLE, ft, size);
1415}
1416
1417#define WSM_BEACON_FILTER_ENABLE BIT(0) /* Enable/disable beacon filtering */
1418#define WSM_BEACON_FILTER_AUTO_ERP BIT(1) /* If 1 FW will handle ERP IE changes internally */
1419
1420struct wsm_beacon_filter_control {
1421 int enabled;
1422 int bcn_count;
1423};
1424
1425static inline int wsm_beacon_filter_control(struct cw1200_common *priv,
1426 struct wsm_beacon_filter_control *arg)
1427{
1428 struct {
1429 __le32 enabled;
1430 __le32 bcn_count;
1431 } val;
1432 val.enabled = __cpu_to_le32(arg->enabled);
1433 val.bcn_count = __cpu_to_le32(arg->bcn_count);
1434 return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_ENABLE, &val,
1435 sizeof(val));
1436}
1437
1438enum wsm_power_mode {
1439 wsm_power_mode_active = 0,
1440 wsm_power_mode_doze = 1,
1441 wsm_power_mode_quiescent = 2,
1442};
1443
1444struct wsm_operational_mode {
1445 enum wsm_power_mode power_mode;
1446 int disable_more_flag_usage;
1447 int perform_ant_diversity;
1448};
1449
1450static inline int wsm_set_operational_mode(struct cw1200_common *priv,
1451 const struct wsm_operational_mode *arg)
1452{
1453 u8 val = arg->power_mode;
1454 if (arg->disable_more_flag_usage)
1455 val |= BIT(4);
1456 if (arg->perform_ant_diversity)
1457 val |= BIT(5);
1458 return wsm_write_mib(priv, WSM_MIB_ID_OPERATIONAL_POWER_MODE, &val,
1459 sizeof(val));
1460}
1461
1462struct wsm_template_frame {
1463 u8 frame_type;
1464 u8 rate;
1465 struct sk_buff *skb;
1466};
1467
1468static inline int wsm_set_template_frame(struct cw1200_common *priv,
1469 struct wsm_template_frame *arg)
1470{
1471 int ret;
1472 u8 *p = skb_push(arg->skb, 4);
1473 p[0] = arg->frame_type;
1474 p[1] = arg->rate;
1475 ((__le16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4);
1476 ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len);
1477 skb_pull(arg->skb, 4);
1478 return ret;
1479}
1480
1481
1482struct wsm_protected_mgmt_policy {
1483 bool protectedMgmtEnable;
1484 bool unprotectedMgmtFramesAllowed;
1485 bool encryptionForAuthFrame;
1486};
1487
1488static inline int wsm_set_protected_mgmt_policy(struct cw1200_common *priv,
1489 struct wsm_protected_mgmt_policy *arg)
1490{
1491 __le32 val = 0;
1492 int ret;
1493 if (arg->protectedMgmtEnable)
1494 val |= __cpu_to_le32(BIT(0));
1495 if (arg->unprotectedMgmtFramesAllowed)
1496 val |= __cpu_to_le32(BIT(1));
1497 if (arg->encryptionForAuthFrame)
1498 val |= __cpu_to_le32(BIT(2));
1499 ret = wsm_write_mib(priv, WSM_MIB_ID_PROTECTED_MGMT_POLICY,
1500 &val, sizeof(val));
1501 return ret;
1502}
1503
1504struct wsm_mib_block_ack_policy {
1505 u8 tx_tid;
1506 u8 reserved1;
1507 u8 rx_tid;
1508 u8 reserved2;
1509} __packed;
1510
1511static inline int wsm_set_block_ack_policy(struct cw1200_common *priv,
1512 u8 tx_tid_policy,
1513 u8 rx_tid_policy)
1514{
1515 struct wsm_mib_block_ack_policy val = {
1516 .tx_tid = tx_tid_policy,
1517 .rx_tid = rx_tid_policy,
1518 };
1519 return wsm_write_mib(priv, WSM_MIB_ID_BLOCK_ACK_POLICY, &val,
1520 sizeof(val));
1521}
1522
1523struct wsm_mib_association_mode {
1524 u8 flags; /* WSM_ASSOCIATION_MODE_... */
1525 u8 preamble; /* WSM_JOIN_PREAMBLE_... */
1526 u8 greenfield; /* 1 for greenfield */
1527 u8 mpdu_start_spacing;
1528 __le32 basic_rate_set;
1529} __packed;
1530
1531static inline int wsm_set_association_mode(struct cw1200_common *priv,
1532 struct wsm_mib_association_mode *arg)
1533{
1534 return wsm_write_mib(priv, WSM_MIB_ID_SET_ASSOCIATION_MODE, arg,
1535 sizeof(*arg));
1536}
1537
1538#define WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED BIT(2)
1539#define WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT BIT(3)
1540struct wsm_tx_rate_retry_policy {
1541 u8 index;
1542 u8 short_retries;
1543 u8 long_retries;
1544 /* BIT(2) - Terminate retries when Tx rate retry policy
1545 * finishes.
1546 * BIT(3) - Count initial frame transmission as part of
1547 * rate retry counting but not as a retry
1548 * attempt
1549 */
1550 u8 flags;
1551 u8 rate_recoveries;
1552 u8 reserved[3];
1553 __le32 rate_count_indices[3];
1554} __packed;
1555
1556struct wsm_set_tx_rate_retry_policy {
1557 u8 num;
1558 u8 reserved[3];
1559 struct wsm_tx_rate_retry_policy tbl[8];
1560} __packed;
1561
1562static inline int wsm_set_tx_rate_retry_policy(struct cw1200_common *priv,
1563 struct wsm_set_tx_rate_retry_policy *arg)
1564{
1565 size_t size = 4 + arg->num * sizeof(struct wsm_tx_rate_retry_policy);
1566 return wsm_write_mib(priv, WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg,
1567 size);
1568}
1569
1570/* 4.32 SetEtherTypeDataFrameFilter */
1571struct wsm_ether_type_filter_hdr {
1572 u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */
1573 u8 reserved[3];
1574} __packed;
1575
1576struct wsm_ether_type_filter {
1577 u8 action; /* WSM_FILTER_ACTION_XXX */
1578 u8 reserved;
1579 __le16 type; /* Type of ethernet frame */
1580} __packed;
1581
1582static inline int wsm_set_ether_type_filter(struct cw1200_common *priv,
1583 struct wsm_ether_type_filter_hdr *arg)
1584{
1585 size_t size = sizeof(struct wsm_ether_type_filter_hdr) +
1586 arg->num * sizeof(struct wsm_ether_type_filter);
1587 return wsm_write_mib(priv, WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER,
1588 arg, size);
1589}
1590
1591/* 4.33 SetUDPPortDataFrameFilter */
1592struct wsm_udp_port_filter_hdr {
1593 u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */
1594 u8 reserved[3];
1595} __packed;
1596
1597struct wsm_udp_port_filter {
1598 u8 action; /* WSM_FILTER_ACTION_XXX */
1599 u8 type; /* WSM_FILTER_PORT_TYPE_XXX */
1600 __le16 port; /* Port number */
1601} __packed;
1602
1603static inline int wsm_set_udp_port_filter(struct cw1200_common *priv,
1604 struct wsm_udp_port_filter_hdr *arg)
1605{
1606 size_t size = sizeof(struct wsm_udp_port_filter_hdr) +
1607 arg->num * sizeof(struct wsm_udp_port_filter);
1608 return wsm_write_mib(priv, WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER,
1609 arg, size);
1610}
1611
1612/* Undocumented MIBs: */
1613/* 4.35 P2PDeviceInfo */
1614#define D11_MAX_SSID_LEN (32)
1615
1616struct wsm_p2p_device_type {
1617 __le16 category_id;
1618 u8 oui[4];
1619 __le16 subcategory_id;
1620} __packed;
1621
1622struct wsm_p2p_device_info {
1623 struct wsm_p2p_device_type primaryDevice;
1624 u8 reserved1[3];
1625 u8 devname_size;
1626 u8 local_devname[D11_MAX_SSID_LEN];
1627 u8 reserved2[3];
1628 u8 num_secdev_supported;
1629 struct wsm_p2p_device_type secdevs[0];
1630} __packed;
1631
1632/* 4.36 SetWCDMABand - WO */
1633struct wsm_cdma_band {
1634 u8 wcdma_band;
1635 u8 reserved[3];
1636} __packed;
1637
1638/* 4.37 GroupTxSequenceCounter - RO */
1639struct wsm_group_tx_seq {
1640 __le32 bits_47_16;
1641 __le16 bits_15_00;
1642 __le16 reserved;
1643} __packed;
1644
1645/* 4.39 SetHtProtection - WO */
1646#define WSM_DUAL_CTS_PROT_ENB (1 << 0)
1647#define WSM_NON_GREENFIELD_STA_PRESENT (1 << 1)
1648#define WSM_HT_PROT_MODE__NO_PROT (0 << 2)
1649#define WSM_HT_PROT_MODE__NON_MEMBER (1 << 2)
1650#define WSM_HT_PROT_MODE__20_MHZ (2 << 2)
1651#define WSM_HT_PROT_MODE__NON_HT_MIXED (3 << 2)
1652#define WSM_LSIG_TXOP_PROT_FULL (1 << 4)
1653#define WSM_LARGE_L_LENGTH_PROT (1 << 5)
1654
1655struct wsm_ht_protection {
1656 __le32 flags;
1657} __packed;
1658
1659/* 4.40 GPIO Command - R/W */
1660#define WSM_GPIO_COMMAND_SETUP 0
1661#define WSM_GPIO_COMMAND_READ 1
1662#define WSM_GPIO_COMMAND_WRITE 2
1663#define WSM_GPIO_COMMAND_RESET 3
1664#define WSM_GPIO_ALL_PINS 0xFF
1665
1666struct wsm_gpio_command {
1667 u8 command;
1668 u8 pin;
1669 __le16 config;
1670} __packed;
1671
1672/* 4.41 TSFCounter - RO */
1673struct wsm_tsf_counter {
1674 __le64 tsf_counter;
1675} __packed;
1676
1677/* 4.43 Keep alive period */
1678struct wsm_keep_alive_period {
1679 __le16 period;
1680 u8 reserved[2];
1681} __packed;
1682
1683static inline int wsm_keep_alive_period(struct cw1200_common *priv,
1684 int period)
1685{
1686 struct wsm_keep_alive_period arg = {
1687 .period = __cpu_to_le16(period),
1688 };
1689 return wsm_write_mib(priv, WSM_MIB_ID_KEEP_ALIVE_PERIOD,
1690 &arg, sizeof(arg));
1691};
1692
1693/* BSSID filtering */
1694struct wsm_set_bssid_filtering {
1695 u8 filter;
1696 u8 reserved[3];
1697} __packed;
1698
1699static inline int wsm_set_bssid_filtering(struct cw1200_common *priv,
1700 bool enabled)
1701{
1702 struct wsm_set_bssid_filtering arg = {
1703 .filter = !enabled,
1704 };
1705 return wsm_write_mib(priv, WSM_MIB_ID_DISABLE_BSSID_FILTER,
1706 &arg, sizeof(arg));
1707}
1708
1709/* Multicast filtering - 4.5 */
1710struct wsm_mib_multicast_filter {
1711 __le32 enable;
1712 __le32 num_addrs;
1713 u8 macaddrs[WSM_MAX_GRP_ADDRTABLE_ENTRIES][ETH_ALEN];
1714} __packed;
1715
1716static inline int wsm_set_multicast_filter(struct cw1200_common *priv,
1717 struct wsm_mib_multicast_filter *fp)
1718{
1719 return wsm_write_mib(priv, WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE,
1720 fp, sizeof(*fp));
1721}
1722
1723/* ARP IPv4 filtering - 4.10 */
1724struct wsm_mib_arp_ipv4_filter {
1725 __le32 enable;
1726 __be32 ipv4addrs[WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES];
1727} __packed;
1728
1729static inline int wsm_set_arp_ipv4_filter(struct cw1200_common *priv,
1730 struct wsm_mib_arp_ipv4_filter *fp)
1731{
1732 return wsm_write_mib(priv, WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE,
1733 fp, sizeof(*fp));
1734}
1735
1736/* P2P Power Save Mode Info - 4.31 */
1737struct wsm_p2p_ps_modeinfo {
1738 u8 opp_ps_ct_window;
1739 u8 count;
1740 u8 reserved;
1741 u8 dtim_count;
1742 __le32 duration;
1743 __le32 interval;
1744 __le32 start_time;
1745} __packed;
1746
1747static inline int wsm_set_p2p_ps_modeinfo(struct cw1200_common *priv,
1748 struct wsm_p2p_ps_modeinfo *mi)
1749{
1750 return wsm_write_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO,
1751 mi, sizeof(*mi));
1752}
1753
1754static inline int wsm_get_p2p_ps_modeinfo(struct cw1200_common *priv,
1755 struct wsm_p2p_ps_modeinfo *mi)
1756{
1757 return wsm_read_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO,
1758 mi, sizeof(*mi));
1759}
1760
1761/* UseMultiTxConfMessage */
1762
1763static inline int wsm_use_multi_tx_conf(struct cw1200_common *priv,
1764 bool enabled)
1765{
1766 __le32 arg = enabled ? __cpu_to_le32(1) : 0;
1767
1768 return wsm_write_mib(priv, WSM_MIB_USE_MULTI_TX_CONF,
1769 &arg, sizeof(arg));
1770}
1771
1772
1773/* 4.26 SetUpasdInformation */
1774struct wsm_uapsd_info {
1775 __le16 uapsd_flags;
1776 __le16 min_auto_trigger_interval;
1777 __le16 max_auto_trigger_interval;
1778 __le16 auto_trigger_step;
1779};
1780
1781static inline int wsm_set_uapsd_info(struct cw1200_common *priv,
1782 struct wsm_uapsd_info *arg)
1783{
1784 return wsm_write_mib(priv, WSM_MIB_ID_SET_UAPSD_INFORMATION,
1785 arg, sizeof(*arg));
1786}
1787
1788/* 4.22 OverrideInternalTxRate */
1789struct wsm_override_internal_txrate {
1790 u8 internalTxRate;
1791 u8 nonErpInternalTxRate;
1792 u8 reserved[2];
1793} __packed;
1794
1795static inline int wsm_set_override_internal_txrate(struct cw1200_common *priv,
1796 struct wsm_override_internal_txrate *arg)
1797{
1798 return wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
1799 arg, sizeof(*arg));
1800}
1801
1802/* ******************************************************************** */
1803/* WSM TX port control */
1804
1805void wsm_lock_tx(struct cw1200_common *priv);
1806void wsm_lock_tx_async(struct cw1200_common *priv);
1807bool wsm_flush_tx(struct cw1200_common *priv);
1808void wsm_unlock_tx(struct cw1200_common *priv);
1809
1810/* ******************************************************************** */
1811/* WSM / BH API */
1812
1813int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len);
1814int wsm_handle_rx(struct cw1200_common *priv, u16 id, struct wsm_hdr *wsm,
1815 struct sk_buff **skb_p);
1816
1817/* ******************************************************************** */
1818/* wsm_buf API */
1819
1820struct wsm_buf {
1821 u8 *begin;
1822 u8 *data;
1823 u8 *end;
1824};
1825
1826void wsm_buf_init(struct wsm_buf *buf);
1827void wsm_buf_deinit(struct wsm_buf *buf);
1828
1829/* ******************************************************************** */
1830/* wsm_cmd API */
1831
1832struct wsm_cmd {
1833 spinlock_t lock; /* Protect structure from multiple access */
1834 int done;
1835 u8 *ptr;
1836 size_t len;
1837 void *arg;
1838 int ret;
1839 u16 cmd;
1840};
1841
1842/* ******************************************************************** */
1843/* WSM TX buffer access */
1844
1845int wsm_get_tx(struct cw1200_common *priv, u8 **data,
1846 size_t *tx_len, int *burst);
1847void wsm_txed(struct cw1200_common *priv, u8 *data);
1848
1849/* ******************************************************************** */
1850/* Queue mapping: WSM <---> linux */
1851/* Linux: VO VI BE BK */
1852/* WSM: BE BK VI VO */
1853
1854static inline u8 wsm_queue_id_to_linux(u8 queue_id)
1855{
1856 static const u8 queue_mapping[] = {
1857 2, 3, 1, 0
1858 };
1859 return queue_mapping[queue_id];
1860}
1861
1862static inline u8 wsm_queue_id_to_wsm(u8 queue_id)
1863{
1864 static const u8 queue_mapping[] = {
1865 3, 2, 0, 1
1866 };
1867 return queue_mapping[queue_id];
1868}
1869
1870#endif /* CW1200_HWIO_H_INCLUDED */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 15920aaa5dd6..f8ab193009cd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6242,8 +6242,6 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6242 if ((val & 0x0000ff00) != 0) 6242 if ((val & 0x0000ff00) != 0)
6243 pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff); 6243 pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
6244 6244
6245 pci_set_power_state(pci_dev, PCI_D0);
6246
6247 if (!ipw2100_hw_is_adapter_in_system(dev)) { 6245 if (!ipw2100_hw_is_adapter_in_system(dev)) {
6248 printk(KERN_WARNING DRV_NAME 6246 printk(KERN_WARNING DRV_NAME
6249 "Device not found via register read.\n"); 6247 "Device not found via register read.\n");
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index d96257b79a84..6b823a1ab789 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -3548,6 +3548,7 @@ static int ipw_load(struct ipw_priv *priv)
3548 ipw_rx_queue_reset(priv, priv->rxq); 3548 ipw_rx_queue_reset(priv, priv->rxq);
3549 if (!priv->rxq) { 3549 if (!priv->rxq) {
3550 IPW_ERROR("Unable to initialize Rx queue\n"); 3550 IPW_ERROR("Unable to initialize Rx queue\n");
3551 rc = -ENOMEM;
3551 goto error; 3552 goto error;
3552 } 3553 }
3553 3554
@@ -8256,7 +8257,7 @@ static int is_duplicate_packet(struct ipw_priv *priv,
8256 u8 *mac = header->addr2; 8257 u8 *mac = header->addr2;
8257 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE; 8258 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8258 8259
8259 __list_for_each(p, &priv->ibss_mac_hash[index]) { 8260 list_for_each(p, &priv->ibss_mac_hash[index]) {
8260 entry = 8261 entry =
8261 list_entry(p, struct ipw_ibss_seq, list); 8262 list_entry(p, struct ipw_ibss_seq, list);
8262 if (!memcmp(entry->mac, mac, ETH_ALEN)) 8263 if (!memcmp(entry->mac, mac, ETH_ALEN))
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 95a1ca1e895c..9ffe65931b29 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1195,7 +1195,7 @@ static int libipw_parse_info_param(struct libipw_info_element
1195#ifdef CONFIG_LIBIPW_DEBUG 1195#ifdef CONFIG_LIBIPW_DEBUG
1196 p += snprintf(p, sizeof(rates_str) - 1196 p += snprintf(p, sizeof(rates_str) -
1197 (p - rates_str), "%02X ", 1197 (p - rates_str), "%02X ",
1198 network->rates[i]); 1198 network->rates_ex[i]);
1199#endif 1199#endif
1200 if (libipw_is_ofdm_rate 1200 if (libipw_is_ofdm_rate
1201 (info_element->data[i])) { 1201 (info_element->data[i])) {
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index b37a582ccbe7..9581d07a4242 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3119,7 +3119,7 @@ il3945_store_debug_level(struct device *d, struct device_attribute *attr,
3119 unsigned long val; 3119 unsigned long val;
3120 int ret; 3120 int ret;
3121 3121
3122 ret = strict_strtoul(buf, 0, &val); 3122 ret = kstrtoul(buf, 0, &val);
3123 if (ret) 3123 if (ret)
3124 IL_INFO("%s is not in hex or decimal form.\n", buf); 3124 IL_INFO("%s is not in hex or decimal form.\n", buf);
3125 else 3125 else
@@ -3727,7 +3727,8 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3727 * 5. Setup HW Constants 3727 * 5. Setup HW Constants
3728 * ********************/ 3728 * ********************/
3729 /* Device-specific setup */ 3729 /* Device-specific setup */
3730 if (il3945_hw_set_hw_params(il)) { 3730 err = il3945_hw_set_hw_params(il);
3731 if (err) {
3731 IL_ERR("failed to set hw settings\n"); 3732 IL_ERR("failed to set hw settings\n");
3732 goto out_eeprom_free; 3733 goto out_eeprom_free;
3733 } 3734 }
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index dc1e6da9976a..c092033945cc 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -331,6 +331,19 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
331 return; 331 return;
332 } 332 }
333 333
334 /*
335 * Firmware will not transmit frame on passive channel, if it not yet
336 * received some valid frame on that channel. When this error happen
337 * we have to wait until firmware will unblock itself i.e. when we
338 * note received beacon or other frame. We unblock queues in
339 * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
340 */
341 if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
342 il->iw_mode == NL80211_IFTYPE_STATION) {
343 il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
344 D_INFO("Stopped queues - RX waiting on passive channel\n");
345 }
346
334 txq->time_stamp = jiffies; 347 txq->time_stamp = jiffies;
335 info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); 348 info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
336 ieee80211_tx_info_clear_status(info); 349 ieee80211_tx_info_clear_status(info);
@@ -488,6 +501,11 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
488 return; 501 return;
489 } 502 }
490 503
504 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
505 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
506 D_INFO("Woke queues - frame received on passive channel\n");
507 }
508
491 skb = dev_alloc_skb(128); 509 skb = dev_alloc_skb(128);
492 if (!skb) { 510 if (!skb) {
493 IL_ERR("dev_alloc_skb failed\n"); 511 IL_ERR("dev_alloc_skb failed\n");
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 9a95045c97b6..b9b2bb51e605 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -588,6 +588,11 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
588 return; 588 return;
589 } 589 }
590 590
591 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
592 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
593 D_INFO("Woke queues - frame received on passive channel\n");
594 }
595
591 /* In case of HW accelerated crypto and bad decryption, drop */ 596 /* In case of HW accelerated crypto and bad decryption, drop */
592 if (!il->cfg->mod_params->sw_crypto && 597 if (!il->cfg->mod_params->sw_crypto &&
593 il_set_decrypted_flag(il, hdr, ampdu_status, stats)) 598 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
@@ -2806,6 +2811,19 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2806 return; 2811 return;
2807 } 2812 }
2808 2813
2814 /*
2815 * Firmware will not transmit frame on passive channel, if it not yet
2816 * received some valid frame on that channel. When this error happen
2817 * we have to wait until firmware will unblock itself i.e. when we
2818 * note received beacon or other frame. We unblock queues in
2819 * il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
2820 */
2821 if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
2822 il->iw_mode == NL80211_IFTYPE_STATION) {
2823 il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
2824 D_INFO("Stopped queues - RX waiting on passive channel\n");
2825 }
2826
2809 spin_lock_irqsave(&il->sta_lock, flags); 2827 spin_lock_irqsave(&il->sta_lock, flags);
2810 if (txq->sched_retry) { 2828 if (txq->sched_retry) {
2811 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp); 2829 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
@@ -4567,7 +4585,7 @@ il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4567 unsigned long val; 4585 unsigned long val;
4568 int ret; 4586 int ret;
4569 4587
4570 ret = strict_strtoul(buf, 0, &val); 4588 ret = kstrtoul(buf, 0, &val);
4571 if (ret) 4589 if (ret)
4572 IL_ERR("%s is not in hex or decimal form.\n", buf); 4590 IL_ERR("%s is not in hex or decimal form.\n", buf);
4573 else 4591 else
@@ -4614,7 +4632,7 @@ il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4614 unsigned long val; 4632 unsigned long val;
4615 int ret; 4633 int ret;
4616 4634
4617 ret = strict_strtoul(buf, 10, &val); 4635 ret = kstrtoul(buf, 10, &val);
4618 if (ret) 4636 if (ret)
4619 IL_INFO("%s is not in decimal form.\n", buf); 4637 IL_INFO("%s is not in decimal form.\n", buf);
4620 else { 4638 else {
@@ -5741,7 +5759,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5741 hw->flags = 5759 hw->flags =
5742 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | 5760 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5743 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | 5761 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
5744 IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 5762 IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
5763 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5745 if (il->cfg->sku & IL_SKU_N) 5764 if (il->cfg->sku & IL_SKU_N)
5746 hw->flags |= 5765 hw->flags |=
5747 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 5766 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 3b6c99400892..048421511988 100644
--- a/drivers/net/wireless/iwlegacy/commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -1348,14 +1348,6 @@ struct il_rx_mpdu_res_start {
1348#define TX_CMD_SEC_KEY128 0x08 1348#define TX_CMD_SEC_KEY128 0x08
1349 1349
1350/* 1350/*
1351 * security overhead sizes
1352 */
1353#define WEP_IV_LEN 4
1354#define WEP_ICV_LEN 4
1355#define CCMP_MIC_LEN 8
1356#define TKIP_ICV_LEN 4
1357
1358/*
1359 * C_TX = 0x1c (command) 1351 * C_TX = 0x1c (command)
1360 */ 1352 */
1361 1353
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e9a3cbc409ae..3195aad440dd 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -5307,6 +5307,17 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5307 D_MAC80211("BSSID %pM\n", bss_conf->bssid); 5307 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5308 5308
5309 /* 5309 /*
5310 * On passive channel we wait with blocked queues to see if
5311 * there is traffic on that channel. If no frame will be
5312 * received (what is very unlikely since scan detects AP on
5313 * that channel, but theoretically possible), mac80211 associate
5314 * procedure will time out and mac80211 will call us with NULL
5315 * bssid. We have to unblock queues on such condition.
5316 */
5317 if (is_zero_ether_addr(bss_conf->bssid))
5318 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
5319
5320 /*
5310 * If there is currently a HW scan going on in the background, 5321 * If there is currently a HW scan going on in the background,
5311 * then we need to cancel it, otherwise sometimes we are not 5322 * then we need to cancel it, otherwise sometimes we are not
5312 * able to authenticate (FIXME: why ?) 5323 * able to authenticate (FIXME: why ?)
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 4caaf52986a4..83f8ed8a5528 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1299,6 +1299,8 @@ struct il_priv {
1299 /* queue refcounts */ 1299 /* queue refcounts */
1300#define IL_MAX_HW_QUEUES 32 1300#define IL_MAX_HW_QUEUES 32
1301 unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)]; 1301 unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
1302#define IL_STOP_REASON_PASSIVE 0
1303 unsigned long stop_reason;
1302 /* for each AC */ 1304 /* for each AC */
1303 atomic_t queue_stop_count[4]; 1305 atomic_t queue_stop_count[4];
1304 1306
@@ -2257,6 +2259,19 @@ il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
2257} 2259}
2258 2260
2259static inline void 2261static inline void
2262_il_wake_queue(struct il_priv *il, u8 ac)
2263{
2264 if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
2265 ieee80211_wake_queue(il->hw, ac);
2266}
2267
2268static inline void
2269_il_stop_queue(struct il_priv *il, u8 ac)
2270{
2271 if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
2272 ieee80211_stop_queue(il->hw, ac);
2273}
2274static inline void
2260il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) 2275il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
2261{ 2276{
2262 u8 queue = txq->swq_id; 2277 u8 queue = txq->swq_id;
@@ -2264,8 +2279,7 @@ il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
2264 u8 hwq = (queue >> 2) & 0x1f; 2279 u8 hwq = (queue >> 2) & 0x1f;
2265 2280
2266 if (test_and_clear_bit(hwq, il->queue_stopped)) 2281 if (test_and_clear_bit(hwq, il->queue_stopped))
2267 if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0) 2282 _il_wake_queue(il, ac);
2268 ieee80211_wake_queue(il->hw, ac);
2269} 2283}
2270 2284
2271static inline void 2285static inline void
@@ -2276,8 +2290,27 @@ il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
2276 u8 hwq = (queue >> 2) & 0x1f; 2290 u8 hwq = (queue >> 2) & 0x1f;
2277 2291
2278 if (!test_and_set_bit(hwq, il->queue_stopped)) 2292 if (!test_and_set_bit(hwq, il->queue_stopped))
2279 if (atomic_inc_return(&il->queue_stop_count[ac]) > 0) 2293 _il_stop_queue(il, ac);
2280 ieee80211_stop_queue(il->hw, ac); 2294}
2295
2296static inline void
2297il_wake_queues_by_reason(struct il_priv *il, int reason)
2298{
2299 u8 ac;
2300
2301 if (test_and_clear_bit(reason, &il->stop_reason))
2302 for (ac = 0; ac < 4; ac++)
2303 _il_wake_queue(il, ac);
2304}
2305
2306static inline void
2307il_stop_queues_by_reason(struct il_priv *il, int reason)
2308{
2309 u8 ac;
2310
2311 if (!test_and_set_bit(reason, &il->stop_reason))
2312 for (ac = 0; ac < 4; ac++)
2313 _il_stop_queue(il, ac);
2281} 2314}
2282 2315
2283#ifdef ieee80211_stop_queue 2316#ifdef ieee80211_stop_queue
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 56c2040a955b..cbaa5c2c410f 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -128,16 +128,6 @@ config IWLWIFI_DEVICE_TRACING
128 occur. 128 occur.
129endmenu 129endmenu
130 130
131config IWLWIFI_DEVICE_TESTMODE
132 def_bool y
133 depends on IWLWIFI
134 depends on NL80211_TESTMODE
135 help
136 This option enables the testmode support for iwlwifi device through
137 NL80211_TESTMODE. This provide the capabilities of enable user space
138 validation applications to interacts with the device through the
139 generic netlink message via NL80211_TESTMODE channel.
140
141config IWLWIFI_P2P 131config IWLWIFI_P2P
142 def_bool y 132 def_bool y
143 bool "iwlwifi experimental P2P support" 133 bool "iwlwifi experimental P2P support"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 3b5613ea458b..1fa64429bcc2 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -7,14 +7,15 @@ iwlwifi-objs += iwl-notif-wait.o
7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o 7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o 8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o 9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
10iwlwifi-objs += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o 10iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
11iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
12
13iwlwifi-objs += $(iwlwifi-m)
11 14
12iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 15iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
13iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
14 16
15ccflags-y += -D__CHECK_ENDIAN__ -I$(src) 17ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
16 18
17
18obj-$(CONFIG_IWLDVM) += dvm/ 19obj-$(CONFIG_IWLDVM) += dvm/
19obj-$(CONFIG_IWLMVM) += mvm/ 20obj-$(CONFIG_IWLMVM) += mvm/
20 21
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
index 5ff76b204141..dce7ab2e0c4b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -8,6 +8,5 @@ iwldvm-objs += scan.o led.o
8iwldvm-objs += rxon.o devices.o 8iwldvm-objs += rxon.o devices.o
9 9
10iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o 10iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
11iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
12 11
13ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ 12ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 48545ab00311..18355110deff 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -76,13 +76,16 @@
76#define IWL_INVALID_STATION 255 76#define IWL_INVALID_STATION 255
77 77
78/* device operations */ 78/* device operations */
79extern struct iwl_lib_ops iwl1000_lib; 79extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg;
80extern struct iwl_lib_ops iwl2000_lib; 80extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg;
81extern struct iwl_lib_ops iwl2030_lib; 81extern const struct iwl_dvm_cfg iwl_dvm_105_cfg;
82extern struct iwl_lib_ops iwl5000_lib; 82extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg;
83extern struct iwl_lib_ops iwl5150_lib; 83extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg;
84extern struct iwl_lib_ops iwl6000_lib; 84extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg;
85extern struct iwl_lib_ops iwl6030_lib; 85extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg;
86extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg;
87extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg;
88extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
86 89
87 90
88#define TIME_UNIT 1024 91#define TIME_UNIT 1024
@@ -291,8 +294,8 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
291 294
292static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) 295static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
293{ 296{
294 return priv->cfg->bt_params && 297 return priv->lib->bt_params &&
295 priv->cfg->bt_params->advanced_bt_coexist; 298 priv->lib->bt_params->advanced_bt_coexist;
296} 299}
297 300
298#ifdef CONFIG_IWLWIFI_DEBUG 301#ifdef CONFIG_IWLWIFI_DEBUG
@@ -402,43 +405,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
402 405
403extern int iwl_alive_start(struct iwl_priv *priv); 406extern int iwl_alive_start(struct iwl_priv *priv);
404 407
405/* testmode support */
406#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
407
408extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
409 int len);
410extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
411 struct sk_buff *skb,
412 struct netlink_callback *cb,
413 void *data, int len);
414extern void iwl_testmode_init(struct iwl_priv *priv);
415extern void iwl_testmode_free(struct iwl_priv *priv);
416
417#else
418
419static inline
420int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
421{
422 return -ENOSYS;
423}
424
425static inline
426int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
427 struct netlink_callback *cb,
428 void *data, int len)
429{
430 return -ENOSYS;
431}
432
433static inline void iwl_testmode_init(struct iwl_priv *priv)
434{
435}
436
437static inline void iwl_testmode_free(struct iwl_priv *priv)
438{
439}
440#endif
441
442#ifdef CONFIG_IWLWIFI_DEBUG 408#ifdef CONFIG_IWLWIFI_DEBUG
443void iwl_print_rx_config_cmd(struct iwl_priv *priv, 409void iwl_print_rx_config_cmd(struct iwl_priv *priv,
444 enum iwl_rxon_context_id ctxid); 410 enum iwl_rxon_context_id ctxid);
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index d6c4cf2ad7c5..1b0f0d502568 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -521,7 +521,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
521 521
522 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); 522 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
523 523
524 if (priv->cfg->base_params->hd_v2) { 524 if (priv->lib->hd_v2) {
525 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = 525 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
526 HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; 526 HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
527 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = 527 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -895,7 +895,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
895 continue; 895 continue;
896 } 896 }
897 897
898 delta_g = (priv->cfg->base_params->chain_noise_scale * 898 delta_g = (priv->lib->chain_noise_scale *
899 ((s32)average_noise[default_chain] - 899 ((s32)average_noise[default_chain] -
900 (s32)average_noise[i])) / 1500; 900 (s32)average_noise[i])) / 1500;
901 901
@@ -1051,8 +1051,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1051 return; 1051 return;
1052 1052
1053 /* Analyze signal for disconnected antenna */ 1053 /* Analyze signal for disconnected antenna */
1054 if (priv->cfg->bt_params && 1054 if (priv->lib->bt_params &&
1055 priv->cfg->bt_params->advanced_bt_coexist) { 1055 priv->lib->bt_params->advanced_bt_coexist) {
1056 /* Disable disconnected antenna algorithm for advanced 1056 /* Disable disconnected antenna algorithm for advanced
1057 bt coex, assuming valid antennas are connected */ 1057 bt coex, assuming valid antennas are connected */
1058 data->active_chains = priv->nvm_data->valid_rx_ant; 1058 data->active_chains = priv->nvm_data->valid_rx_ant;
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 95ca026ecc9d..ebdac909f0cd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -838,10 +838,6 @@ struct iwl_qosparam_cmd {
838#define STA_MODIFY_DELBA_TID_MSK 0x10 838#define STA_MODIFY_DELBA_TID_MSK 0x10
839#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 839#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
840 840
841/* Receiver address (actually, Rx station's index into station table),
842 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
843#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
844
845/* agn */ 841/* agn */
846struct iwl_keyinfo { 842struct iwl_keyinfo {
847 __le16 key_flags; 843 __le16 key_flags;
@@ -1225,14 +1221,6 @@ struct iwl_rx_mpdu_res_start {
1225#define TX_CMD_SEC_KEY128 0x08 1221#define TX_CMD_SEC_KEY128 0x08
1226 1222
1227/* 1223/*
1228 * security overhead sizes
1229 */
1230#define WEP_IV_LEN 4
1231#define WEP_ICV_LEN 4
1232#define CCMP_MIC_LEN 8
1233#define TKIP_ICV_LEN 4
1234
1235/*
1236 * REPLY_TX = 0x1c (command) 1224 * REPLY_TX = 0x1c (command)
1237 */ 1225 */
1238 1226
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 71ea77576d22..60a4e0d15715 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -52,8 +52,6 @@
52#include "rs.h" 52#include "rs.h"
53#include "tt.h" 53#include "tt.h"
54 54
55#include "iwl-test.h"
56
57/* CT-KILL constants */ 55/* CT-KILL constants */
58#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ 56#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
59#define CT_KILL_THRESHOLD 114 /* in Celsius */ 57#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -568,16 +566,61 @@ struct iwl_hw_params {
568 const struct iwl_sensitivity_ranges *sens; 566 const struct iwl_sensitivity_ranges *sens;
569}; 567};
570 568
571struct iwl_lib_ops { 569/**
572 /* set hw dependent parameters */ 570 * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters
571 * @advanced_bt_coexist: support advanced bt coexist
572 * @bt_init_traffic_load: specify initial bt traffic load
573 * @bt_prio_boost: default bt priority boost value
574 * @agg_time_limit: maximum number of uSec in aggregation
575 * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
576 */
577struct iwl_dvm_bt_params {
578 bool advanced_bt_coexist;
579 u8 bt_init_traffic_load;
580 u32 bt_prio_boost;
581 u16 agg_time_limit;
582 bool bt_sco_disable;
583 bool bt_session_2;
584};
585
586/**
587 * struct iwl_dvm_cfg - DVM firmware specific device configuration
588 * @set_hw_params: set hardware parameters
589 * @set_channel_switch: send channel switch command
590 * @nic_config: apply device specific configuration
591 * @temperature: read temperature
592 * @adv_thermal_throttle: support advance thermal throttle
593 * @support_ct_kill_exit: support ct kill exit condition
594 * @plcp_delta_threshold: plcp error rate threshold used to trigger
595 * radio tuning when there is a high receiving plcp error rate
596 * @chain_noise_scale: default chain noise scale used for gain computation
597 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
598 * @no_idle_support: do not support idle mode
599 * @bt_params: pointer to BT parameters
600 * @need_temp_offset_calib: need to perform temperature offset calibration
601 * @no_xtal_calib: some devices do not need crystal calibration data,
602 * don't send it to those
603 * @temp_offset_v2: support v2 of temperature offset calibration
604 * @adv_pm: advanced power management
605 */
606struct iwl_dvm_cfg {
573 void (*set_hw_params)(struct iwl_priv *priv); 607 void (*set_hw_params)(struct iwl_priv *priv);
574 int (*set_channel_switch)(struct iwl_priv *priv, 608 int (*set_channel_switch)(struct iwl_priv *priv,
575 struct ieee80211_channel_switch *ch_switch); 609 struct ieee80211_channel_switch *ch_switch);
576 /* device specific configuration */
577 void (*nic_config)(struct iwl_priv *priv); 610 void (*nic_config)(struct iwl_priv *priv);
578
579 /* temperature */
580 void (*temperature)(struct iwl_priv *priv); 611 void (*temperature)(struct iwl_priv *priv);
612
613 const struct iwl_dvm_bt_params *bt_params;
614 s32 chain_noise_scale;
615 u8 plcp_delta_threshold;
616 bool adv_thermal_throttle;
617 bool support_ct_kill_exit;
618 bool hd_v2;
619 bool no_idle_support;
620 bool need_temp_offset_calib;
621 bool no_xtal_calib;
622 bool temp_offset_v2;
623 bool adv_pm;
581}; 624};
582 625
583struct iwl_wipan_noa_data { 626struct iwl_wipan_noa_data {
@@ -610,7 +653,7 @@ struct iwl_priv {
610 struct device *dev; /* for debug prints only */ 653 struct device *dev; /* for debug prints only */
611 const struct iwl_cfg *cfg; 654 const struct iwl_cfg *cfg;
612 const struct iwl_fw *fw; 655 const struct iwl_fw *fw;
613 const struct iwl_lib_ops *lib; 656 const struct iwl_dvm_cfg *lib;
614 unsigned long status; 657 unsigned long status;
615 658
616 spinlock_t sta_lock; 659 spinlock_t sta_lock;
@@ -646,10 +689,6 @@ struct iwl_priv {
646 struct iwl_spectrum_notification measure_report; 689 struct iwl_spectrum_notification measure_report;
647 u8 measurement_status; 690 u8 measurement_status;
648 691
649#define IWL_OWNERSHIP_DRIVER 0
650#define IWL_OWNERSHIP_TM 1
651 u8 ucode_owner;
652
653 /* ucode beacon time */ 692 /* ucode beacon time */
654 u32 ucode_beacon_time; 693 u32 ucode_beacon_time;
655 int missed_beacon_threshold; 694 int missed_beacon_threshold;
@@ -844,7 +883,7 @@ struct iwl_priv {
844#endif /* CONFIG_IWLWIFI_DEBUGFS */ 883#endif /* CONFIG_IWLWIFI_DEBUGFS */
845 884
846 struct iwl_nvm_data *nvm_data; 885 struct iwl_nvm_data *nvm_data;
847 /* eeprom blob for debugfs/testmode */ 886 /* eeprom blob for debugfs */
848 u8 *eeprom_blob; 887 u8 *eeprom_blob;
849 size_t eeprom_blob_size; 888 size_t eeprom_blob_size;
850 889
@@ -860,16 +899,14 @@ struct iwl_priv {
860 unsigned long blink_on, blink_off; 899 unsigned long blink_on, blink_off;
861 bool led_registered; 900 bool led_registered;
862 901
863#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
864 struct iwl_test tst;
865 u32 tm_fixed_rate;
866#endif
867
868 /* WoWLAN GTK rekey data */ 902 /* WoWLAN GTK rekey data */
869 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; 903 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
870 __le64 replay_ctr; 904 __le64 replay_ctr;
871 __le16 last_seq_ctl; 905 __le16 last_seq_ctl;
872 bool have_rekey_data; 906 bool have_rekey_data;
907#ifdef CONFIG_PM_SLEEP
908 struct wiphy_wowlan_support wowlan_support;
909#endif
873 910
874 /* device_pointers: pointers to ucode event tables */ 911 /* device_pointers: pointers to ucode event tables */
875 struct { 912 struct {
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index c48907c8ab43..352c6cb7b4f1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -174,10 +174,13 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
174 priv->hw_params.sens = &iwl1000_sensitivity; 174 priv->hw_params.sens = &iwl1000_sensitivity;
175} 175}
176 176
177struct iwl_lib_ops iwl1000_lib = { 177const struct iwl_dvm_cfg iwl_dvm_1000_cfg = {
178 .set_hw_params = iwl1000_hw_set_hw_params, 178 .set_hw_params = iwl1000_hw_set_hw_params,
179 .nic_config = iwl1000_nic_config, 179 .nic_config = iwl1000_nic_config,
180 .temperature = iwlagn_temperature, 180 .temperature = iwlagn_temperature,
181 .support_ct_kill_exit = true,
182 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
183 .chain_noise_scale = 1000,
181}; 184};
182 185
183 186
@@ -232,16 +235,56 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
232 priv->hw_params.sens = &iwl2000_sensitivity; 235 priv->hw_params.sens = &iwl2000_sensitivity;
233} 236}
234 237
235struct iwl_lib_ops iwl2000_lib = { 238const struct iwl_dvm_cfg iwl_dvm_2000_cfg = {
236 .set_hw_params = iwl2000_hw_set_hw_params, 239 .set_hw_params = iwl2000_hw_set_hw_params,
237 .nic_config = iwl2000_nic_config, 240 .nic_config = iwl2000_nic_config,
238 .temperature = iwlagn_temperature, 241 .temperature = iwlagn_temperature,
242 .adv_thermal_throttle = true,
243 .support_ct_kill_exit = true,
244 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
245 .chain_noise_scale = 1000,
246 .hd_v2 = true,
247 .need_temp_offset_calib = true,
248 .temp_offset_v2 = true,
239}; 249};
240 250
241struct iwl_lib_ops iwl2030_lib = { 251const struct iwl_dvm_cfg iwl_dvm_105_cfg = {
242 .set_hw_params = iwl2000_hw_set_hw_params, 252 .set_hw_params = iwl2000_hw_set_hw_params,
243 .nic_config = iwl2000_nic_config, 253 .nic_config = iwl2000_nic_config,
244 .temperature = iwlagn_temperature, 254 .temperature = iwlagn_temperature,
255 .adv_thermal_throttle = true,
256 .support_ct_kill_exit = true,
257 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
258 .chain_noise_scale = 1000,
259 .hd_v2 = true,
260 .need_temp_offset_calib = true,
261 .temp_offset_v2 = true,
262 .adv_pm = true,
263};
264
265static const struct iwl_dvm_bt_params iwl2030_bt_params = {
266 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
267 .advanced_bt_coexist = true,
268 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
269 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
270 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
271 .bt_sco_disable = true,
272 .bt_session_2 = true,
273};
274
275const struct iwl_dvm_cfg iwl_dvm_2030_cfg = {
276 .set_hw_params = iwl2000_hw_set_hw_params,
277 .nic_config = iwl2000_nic_config,
278 .temperature = iwlagn_temperature,
279 .adv_thermal_throttle = true,
280 .support_ct_kill_exit = true,
281 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
282 .chain_noise_scale = 1000,
283 .hd_v2 = true,
284 .bt_params = &iwl2030_bt_params,
285 .need_temp_offset_calib = true,
286 .temp_offset_v2 = true,
287 .adv_pm = true,
245}; 288};
246 289
247/* 290/*
@@ -420,16 +463,23 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
420 return iwl_dvm_send_cmd(priv, &hcmd); 463 return iwl_dvm_send_cmd(priv, &hcmd);
421} 464}
422 465
423struct iwl_lib_ops iwl5000_lib = { 466const struct iwl_dvm_cfg iwl_dvm_5000_cfg = {
424 .set_hw_params = iwl5000_hw_set_hw_params, 467 .set_hw_params = iwl5000_hw_set_hw_params,
425 .set_channel_switch = iwl5000_hw_channel_switch, 468 .set_channel_switch = iwl5000_hw_channel_switch,
426 .temperature = iwlagn_temperature, 469 .temperature = iwlagn_temperature,
470 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
471 .chain_noise_scale = 1000,
472 .no_idle_support = true,
427}; 473};
428 474
429struct iwl_lib_ops iwl5150_lib = { 475const struct iwl_dvm_cfg iwl_dvm_5150_cfg = {
430 .set_hw_params = iwl5150_hw_set_hw_params, 476 .set_hw_params = iwl5150_hw_set_hw_params,
431 .set_channel_switch = iwl5000_hw_channel_switch, 477 .set_channel_switch = iwl5000_hw_channel_switch,
432 .temperature = iwl5150_temperature, 478 .temperature = iwl5150_temperature,
479 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
480 .chain_noise_scale = 1000,
481 .no_idle_support = true,
482 .no_xtal_calib = true,
433}; 483};
434 484
435 485
@@ -584,16 +634,59 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
584 return err; 634 return err;
585} 635}
586 636
587struct iwl_lib_ops iwl6000_lib = { 637const struct iwl_dvm_cfg iwl_dvm_6000_cfg = {
588 .set_hw_params = iwl6000_hw_set_hw_params, 638 .set_hw_params = iwl6000_hw_set_hw_params,
589 .set_channel_switch = iwl6000_hw_channel_switch, 639 .set_channel_switch = iwl6000_hw_channel_switch,
590 .nic_config = iwl6000_nic_config, 640 .nic_config = iwl6000_nic_config,
591 .temperature = iwlagn_temperature, 641 .temperature = iwlagn_temperature,
642 .adv_thermal_throttle = true,
643 .support_ct_kill_exit = true,
644 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
645 .chain_noise_scale = 1000,
646};
647
648const struct iwl_dvm_cfg iwl_dvm_6005_cfg = {
649 .set_hw_params = iwl6000_hw_set_hw_params,
650 .set_channel_switch = iwl6000_hw_channel_switch,
651 .nic_config = iwl6000_nic_config,
652 .temperature = iwlagn_temperature,
653 .adv_thermal_throttle = true,
654 .support_ct_kill_exit = true,
655 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
656 .chain_noise_scale = 1000,
657 .need_temp_offset_calib = true,
658};
659
660const struct iwl_dvm_cfg iwl_dvm_6050_cfg = {
661 .set_hw_params = iwl6000_hw_set_hw_params,
662 .set_channel_switch = iwl6000_hw_channel_switch,
663 .nic_config = iwl6000_nic_config,
664 .temperature = iwlagn_temperature,
665 .adv_thermal_throttle = true,
666 .support_ct_kill_exit = true,
667 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
668 .chain_noise_scale = 1500,
669};
670
671static const struct iwl_dvm_bt_params iwl6000_bt_params = {
672 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
673 .advanced_bt_coexist = true,
674 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
675 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
676 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
677 .bt_sco_disable = true,
592}; 678};
593 679
594struct iwl_lib_ops iwl6030_lib = { 680const struct iwl_dvm_cfg iwl_dvm_6030_cfg = {
595 .set_hw_params = iwl6000_hw_set_hw_params, 681 .set_hw_params = iwl6000_hw_set_hw_params,
596 .set_channel_switch = iwl6000_hw_channel_switch, 682 .set_channel_switch = iwl6000_hw_channel_switch,
597 .nic_config = iwl6000_nic_config, 683 .nic_config = iwl6000_nic_config,
598 .temperature = iwlagn_temperature, 684 .temperature = iwlagn_temperature,
685 .adv_thermal_throttle = true,
686 .support_ct_kill_exit = true,
687 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
688 .chain_noise_scale = 1000,
689 .bt_params = &iwl6000_bt_params,
690 .need_temp_offset_calib = true,
691 .adv_pm = true,
599}; 692};
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 54f553380aa8..3d5bdc4217a8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -254,23 +254,23 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
254 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != 254 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
255 sizeof(basic.bt3_lookup_table)); 255 sizeof(basic.bt3_lookup_table));
256 256
257 if (priv->cfg->bt_params) { 257 if (priv->lib->bt_params) {
258 /* 258 /*
259 * newer generation of devices (2000 series and newer) 259 * newer generation of devices (2000 series and newer)
260 * use the version 2 of the bt command 260 * use the version 2 of the bt command
261 * we need to make sure sending the host command 261 * we need to make sure sending the host command
262 * with correct data structure to avoid uCode assert 262 * with correct data structure to avoid uCode assert
263 */ 263 */
264 if (priv->cfg->bt_params->bt_session_2) { 264 if (priv->lib->bt_params->bt_session_2) {
265 bt_cmd_v2.prio_boost = cpu_to_le32( 265 bt_cmd_v2.prio_boost = cpu_to_le32(
266 priv->cfg->bt_params->bt_prio_boost); 266 priv->lib->bt_params->bt_prio_boost);
267 bt_cmd_v2.tx_prio_boost = 0; 267 bt_cmd_v2.tx_prio_boost = 0;
268 bt_cmd_v2.rx_prio_boost = 0; 268 bt_cmd_v2.rx_prio_boost = 0;
269 } else { 269 } else {
270 /* older version only has 8 bits */ 270 /* older version only has 8 bits */
271 WARN_ON(priv->cfg->bt_params->bt_prio_boost & ~0xFF); 271 WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF);
272 bt_cmd_v1.prio_boost = 272 bt_cmd_v1.prio_boost =
273 priv->cfg->bt_params->bt_prio_boost; 273 priv->lib->bt_params->bt_prio_boost;
274 bt_cmd_v1.tx_prio_boost = 0; 274 bt_cmd_v1.tx_prio_boost = 0;
275 bt_cmd_v1.rx_prio_boost = 0; 275 bt_cmd_v1.rx_prio_boost = 0;
276 } 276 }
@@ -330,7 +330,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
330 priv->bt_full_concurrent ? 330 priv->bt_full_concurrent ?
331 "full concurrency" : "3-wire"); 331 "full concurrency" : "3-wire");
332 332
333 if (priv->cfg->bt_params->bt_session_2) { 333 if (priv->lib->bt_params->bt_session_2) {
334 memcpy(&bt_cmd_v2.basic, &basic, 334 memcpy(&bt_cmd_v2.basic, &basic,
335 sizeof(basic)); 335 sizeof(basic));
336 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 336 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
@@ -758,8 +758,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
758 */ 758 */
759static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) 759static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
760{ 760{
761 if (priv->cfg->bt_params && 761 if (priv->lib->bt_params &&
762 priv->cfg->bt_params->advanced_bt_coexist && 762 priv->lib->bt_params->advanced_bt_coexist &&
763 (priv->bt_full_concurrent || 763 (priv->bt_full_concurrent ||
764 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { 764 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
765 /* 765 /*
@@ -830,8 +830,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
830 else 830 else
831 active_chains = priv->nvm_data->valid_rx_ant; 831 active_chains = priv->nvm_data->valid_rx_ant;
832 832
833 if (priv->cfg->bt_params && 833 if (priv->lib->bt_params &&
834 priv->cfg->bt_params->advanced_bt_coexist && 834 priv->lib->bt_params->advanced_bt_coexist &&
835 (priv->bt_full_concurrent || 835 (priv->bt_full_concurrent ||
836 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { 836 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
837 /* 837 /*
@@ -1288,12 +1288,6 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1288 if (!(cmd->flags & CMD_ASYNC)) 1288 if (!(cmd->flags & CMD_ASYNC))
1289 lockdep_assert_held(&priv->mutex); 1289 lockdep_assert_held(&priv->mutex);
1290 1290
1291 if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
1292 !(cmd->flags & CMD_ON_DEMAND)) {
1293 IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
1294 return -EIO;
1295 }
1296
1297 return iwl_trans_send_cmd(priv->trans, cmd); 1291 return iwl_trans_send_cmd(priv->trans, cmd);
1298} 1292}
1299 1293
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cab23af0be9e..822f1a00efbb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -208,20 +208,21 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
208 priv->trans->ops->d3_suspend && 208 priv->trans->ops->d3_suspend &&
209 priv->trans->ops->d3_resume && 209 priv->trans->ops->d3_resume &&
210 device_can_wakeup(priv->trans->dev)) { 210 device_can_wakeup(priv->trans->dev)) {
211 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 211 priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT |
212 WIPHY_WOWLAN_DISCONNECT | 212 WIPHY_WOWLAN_DISCONNECT |
213 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 213 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
214 WIPHY_WOWLAN_RFKILL_RELEASE; 214 WIPHY_WOWLAN_RFKILL_RELEASE;
215 if (!iwlwifi_mod_params.sw_crypto) 215 if (!iwlwifi_mod_params.sw_crypto)
216 hw->wiphy->wowlan.flags |= 216 priv->wowlan_support.flags |=
217 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 217 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
218 WIPHY_WOWLAN_GTK_REKEY_FAILURE; 218 WIPHY_WOWLAN_GTK_REKEY_FAILURE;
219 219
220 hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; 220 priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
221 hw->wiphy->wowlan.pattern_min_len = 221 priv->wowlan_support.pattern_min_len =
222 IWLAGN_WOWLAN_MIN_PATTERN_LEN; 222 IWLAGN_WOWLAN_MIN_PATTERN_LEN;
223 hw->wiphy->wowlan.pattern_max_len = 223 priv->wowlan_support.pattern_max_len =
224 IWLAGN_WOWLAN_MAX_PATTERN_LEN; 224 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
225 hw->wiphy->wowlan = &priv->wowlan_support;
225 } 226 }
226#endif 227#endif
227 228
@@ -426,7 +427,11 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
426 if (ret) 427 if (ret)
427 goto error; 428 goto error;
428 429
429 iwl_trans_d3_suspend(priv->trans); 430 /* let the ucode operate on its own */
431 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
432 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
433
434 iwl_trans_d3_suspend(priv->trans, false);
430 435
431 goto out; 436 goto out;
432 437
@@ -500,7 +505,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
500 /* we'll clear ctx->vif during iwlagn_prepare_restart() */ 505 /* we'll clear ctx->vif during iwlagn_prepare_restart() */
501 vif = ctx->vif; 506 vif = ctx->vif;
502 507
503 ret = iwl_trans_d3_resume(priv->trans, &d3_status); 508 ret = iwl_trans_d3_resume(priv->trans, &d3_status, false);
504 if (ret) 509 if (ret)
505 goto out_unlock; 510 goto out_unlock;
506 511
@@ -509,6 +514,10 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
509 goto out_unlock; 514 goto out_unlock;
510 } 515 }
511 516
517 /* uCode is no longer operating by itself */
518 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
519 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
520
512 base = priv->device_pointers.error_event_table; 521 base = priv->device_pointers.error_event_table;
513 if (!iwlagn_hw_valid_rtc_data_addr(base)) { 522 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
514 IWL_WARN(priv, "Invalid error table during resume!\n"); 523 IWL_WARN(priv, "Invalid error table during resume!\n");
@@ -1276,8 +1285,8 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1276 IWL_DEBUG_MAC80211(priv, "enter\n"); 1285 IWL_DEBUG_MAC80211(priv, "enter\n");
1277 mutex_lock(&priv->mutex); 1286 mutex_lock(&priv->mutex);
1278 1287
1279 if (priv->cfg->bt_params && 1288 if (priv->lib->bt_params &&
1280 priv->cfg->bt_params->advanced_bt_coexist) { 1289 priv->lib->bt_params->advanced_bt_coexist) {
1281 if (rssi_event == RSSI_EVENT_LOW) 1290 if (rssi_event == RSSI_EVENT_LOW)
1282 priv->bt_enable_pspoll = true; 1291 priv->bt_enable_pspoll = true;
1283 else if (rssi_event == RSSI_EVENT_HIGH) 1292 else if (rssi_event == RSSI_EVENT_HIGH)
@@ -1387,7 +1396,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
1387 return err; 1396 return err;
1388 } 1397 }
1389 1398
1390 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist && 1399 if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist &&
1391 vif->type == NL80211_IFTYPE_ADHOC) { 1400 vif->type == NL80211_IFTYPE_ADHOC) {
1392 /* 1401 /*
1393 * pretend to have high BT traffic as long as we 1402 * pretend to have high BT traffic as long as we
@@ -1757,8 +1766,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
1757 .remain_on_channel = iwlagn_mac_remain_on_channel, 1766 .remain_on_channel = iwlagn_mac_remain_on_channel,
1758 .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel, 1767 .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
1759 .rssi_callback = iwlagn_mac_rssi_callback, 1768 .rssi_callback = iwlagn_mac_rssi_callback,
1760 CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
1761 CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
1762 .set_tim = iwlagn_mac_set_tim, 1769 .set_tim = iwlagn_mac_set_tim,
1763}; 1770};
1764 1771
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 74d7572e7091..3952ddf2ddb2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -615,7 +615,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
615 615
616 priv->thermal_throttle.ct_kill_toggle = false; 616 priv->thermal_throttle.ct_kill_toggle = false;
617 617
618 if (priv->cfg->base_params->support_ct_kill_exit) { 618 if (priv->lib->support_ct_kill_exit) {
619 adv_cmd.critical_temperature_enter = 619 adv_cmd.critical_temperature_enter =
620 cpu_to_le32(priv->hw_params.ct_kill_threshold); 620 cpu_to_le32(priv->hw_params.ct_kill_threshold);
621 adv_cmd.critical_temperature_exit = 621 adv_cmd.critical_temperature_exit =
@@ -732,10 +732,10 @@ int iwl_alive_start(struct iwl_priv *priv)
732 } 732 }
733 733
734 /* download priority table before any calibration request */ 734 /* download priority table before any calibration request */
735 if (priv->cfg->bt_params && 735 if (priv->lib->bt_params &&
736 priv->cfg->bt_params->advanced_bt_coexist) { 736 priv->lib->bt_params->advanced_bt_coexist) {
737 /* Configure Bluetooth device coexistence support */ 737 /* Configure Bluetooth device coexistence support */
738 if (priv->cfg->bt_params->bt_sco_disable) 738 if (priv->lib->bt_params->bt_sco_disable)
739 priv->bt_enable_pspoll = false; 739 priv->bt_enable_pspoll = false;
740 else 740 else
741 priv->bt_enable_pspoll = true; 741 priv->bt_enable_pspoll = true;
@@ -873,9 +873,9 @@ void iwl_down(struct iwl_priv *priv)
873 priv->bt_status = 0; 873 priv->bt_status = 0;
874 priv->cur_rssi_ctx = NULL; 874 priv->cur_rssi_ctx = NULL;
875 priv->bt_is_sco = 0; 875 priv->bt_is_sco = 0;
876 if (priv->cfg->bt_params) 876 if (priv->lib->bt_params)
877 priv->bt_traffic_load = 877 priv->bt_traffic_load =
878 priv->cfg->bt_params->bt_init_traffic_load; 878 priv->lib->bt_params->bt_init_traffic_load;
879 else 879 else
880 priv->bt_traffic_load = 0; 880 priv->bt_traffic_load = 0;
881 priv->bt_full_concurrent = false; 881 priv->bt_full_concurrent = false;
@@ -1058,7 +1058,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
1058 1058
1059 iwl_setup_scan_deferred_work(priv); 1059 iwl_setup_scan_deferred_work(priv);
1060 1060
1061 if (priv->cfg->bt_params) 1061 if (priv->lib->bt_params)
1062 iwlagn_bt_setup_deferred_work(priv); 1062 iwlagn_bt_setup_deferred_work(priv);
1063 1063
1064 init_timer(&priv->statistics_periodic); 1064 init_timer(&priv->statistics_periodic);
@@ -1072,7 +1072,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
1072 1072
1073void iwl_cancel_deferred_work(struct iwl_priv *priv) 1073void iwl_cancel_deferred_work(struct iwl_priv *priv)
1074{ 1074{
1075 if (priv->cfg->bt_params) 1075 if (priv->lib->bt_params)
1076 iwlagn_bt_cancel_deferred_work(priv); 1076 iwlagn_bt_cancel_deferred_work(priv);
1077 1077
1078 cancel_work_sync(&priv->run_time_calib_work); 1078 cancel_work_sync(&priv->run_time_calib_work);
@@ -1098,16 +1098,13 @@ static int iwl_init_drv(struct iwl_priv *priv)
1098 1098
1099 priv->band = IEEE80211_BAND_2GHZ; 1099 priv->band = IEEE80211_BAND_2GHZ;
1100 1100
1101 priv->plcp_delta_threshold = 1101 priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
1102 priv->cfg->base_params->plcp_delta_threshold;
1103 1102
1104 priv->iw_mode = NL80211_IFTYPE_STATION; 1103 priv->iw_mode = NL80211_IFTYPE_STATION;
1105 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; 1104 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
1106 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 1105 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
1107 priv->agg_tids_count = 0; 1106 priv->agg_tids_count = 0;
1108 1107
1109 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
1110
1111 priv->rx_statistics_jiffies = jiffies; 1108 priv->rx_statistics_jiffies = jiffies;
1112 1109
1113 /* Choose which receivers/antennas to use */ 1110 /* Choose which receivers/antennas to use */
@@ -1116,8 +1113,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
1116 iwl_init_scan_params(priv); 1113 iwl_init_scan_params(priv);
1117 1114
1118 /* init bt coex */ 1115 /* init bt coex */
1119 if (priv->cfg->bt_params && 1116 if (priv->lib->bt_params &&
1120 priv->cfg->bt_params->advanced_bt_coexist) { 1117 priv->lib->bt_params->advanced_bt_coexist) {
1121 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; 1118 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
1122 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; 1119 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
1123 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; 1120 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -1173,12 +1170,6 @@ static void iwl_option_config(struct iwl_priv *priv)
1173 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n"); 1170 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
1174#endif 1171#endif
1175 1172
1176#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1177 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE enabled\n");
1178#else
1179 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE disabled\n");
1180#endif
1181
1182#ifdef CONFIG_IWLWIFI_P2P 1173#ifdef CONFIG_IWLWIFI_P2P
1183 IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n"); 1174 IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
1184#else 1175#else
@@ -1264,31 +1255,37 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1264 switch (priv->cfg->device_family) { 1255 switch (priv->cfg->device_family) {
1265 case IWL_DEVICE_FAMILY_1000: 1256 case IWL_DEVICE_FAMILY_1000:
1266 case IWL_DEVICE_FAMILY_100: 1257 case IWL_DEVICE_FAMILY_100:
1267 priv->lib = &iwl1000_lib; 1258 priv->lib = &iwl_dvm_1000_cfg;
1268 break; 1259 break;
1269 case IWL_DEVICE_FAMILY_2000: 1260 case IWL_DEVICE_FAMILY_2000:
1261 priv->lib = &iwl_dvm_2000_cfg;
1262 break;
1270 case IWL_DEVICE_FAMILY_105: 1263 case IWL_DEVICE_FAMILY_105:
1271 priv->lib = &iwl2000_lib; 1264 priv->lib = &iwl_dvm_105_cfg;
1272 break; 1265 break;
1273 case IWL_DEVICE_FAMILY_2030: 1266 case IWL_DEVICE_FAMILY_2030:
1274 case IWL_DEVICE_FAMILY_135: 1267 case IWL_DEVICE_FAMILY_135:
1275 priv->lib = &iwl2030_lib; 1268 priv->lib = &iwl_dvm_2030_cfg;
1276 break; 1269 break;
1277 case IWL_DEVICE_FAMILY_5000: 1270 case IWL_DEVICE_FAMILY_5000:
1278 priv->lib = &iwl5000_lib; 1271 priv->lib = &iwl_dvm_5000_cfg;
1279 break; 1272 break;
1280 case IWL_DEVICE_FAMILY_5150: 1273 case IWL_DEVICE_FAMILY_5150:
1281 priv->lib = &iwl5150_lib; 1274 priv->lib = &iwl_dvm_5150_cfg;
1282 break; 1275 break;
1283 case IWL_DEVICE_FAMILY_6000: 1276 case IWL_DEVICE_FAMILY_6000:
1284 case IWL_DEVICE_FAMILY_6005:
1285 case IWL_DEVICE_FAMILY_6000i: 1277 case IWL_DEVICE_FAMILY_6000i:
1278 priv->lib = &iwl_dvm_6000_cfg;
1279 break;
1280 case IWL_DEVICE_FAMILY_6005:
1281 priv->lib = &iwl_dvm_6005_cfg;
1282 break;
1286 case IWL_DEVICE_FAMILY_6050: 1283 case IWL_DEVICE_FAMILY_6050:
1287 case IWL_DEVICE_FAMILY_6150: 1284 case IWL_DEVICE_FAMILY_6150:
1288 priv->lib = &iwl6000_lib; 1285 priv->lib = &iwl_dvm_6050_cfg;
1289 break; 1286 break;
1290 case IWL_DEVICE_FAMILY_6030: 1287 case IWL_DEVICE_FAMILY_6030:
1291 priv->lib = &iwl6030_lib; 1288 priv->lib = &iwl_dvm_6030_cfg;
1292 break; 1289 break;
1293 default: 1290 default:
1294 break; 1291 break;
@@ -1350,8 +1347,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1350 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? 1347 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
1351 true : false; 1348 true : false;
1352 1349
1353 /* enable/disable bt channel inhibition */ 1350 /* bt channel inhibition enabled*/
1354 priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce; 1351 priv->bt_ch_announce = true;
1355 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n", 1352 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
1356 (priv->bt_ch_announce) ? "On" : "Off"); 1353 (priv->bt_ch_announce) ? "On" : "Off");
1357 1354
@@ -1446,7 +1443,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1446 ********************/ 1443 ********************/
1447 iwl_setup_deferred_work(priv); 1444 iwl_setup_deferred_work(priv);
1448 iwl_setup_rx_handlers(priv); 1445 iwl_setup_rx_handlers(priv);
1449 iwl_testmode_init(priv);
1450 1446
1451 iwl_power_initialize(priv); 1447 iwl_power_initialize(priv);
1452 iwl_tt_initialize(priv); 1448 iwl_tt_initialize(priv);
@@ -1483,7 +1479,6 @@ out_mac80211_unregister:
1483 iwlagn_mac_unregister(priv); 1479 iwlagn_mac_unregister(priv);
1484out_destroy_workqueue: 1480out_destroy_workqueue:
1485 iwl_tt_exit(priv); 1481 iwl_tt_exit(priv);
1486 iwl_testmode_free(priv);
1487 iwl_cancel_deferred_work(priv); 1482 iwl_cancel_deferred_work(priv);
1488 destroy_workqueue(priv->workqueue); 1483 destroy_workqueue(priv->workqueue);
1489 priv->workqueue = NULL; 1484 priv->workqueue = NULL;
@@ -1505,7 +1500,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1505 1500
1506 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 1501 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
1507 1502
1508 iwl_testmode_free(priv);
1509 iwlagn_mac_unregister(priv); 1503 iwlagn_mac_unregister(priv);
1510 1504
1511 iwl_tt_exit(priv); 1505 iwl_tt_exit(priv);
@@ -1854,14 +1848,9 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1854 return pos; 1848 return pos;
1855 } 1849 }
1856 1850
1857#ifdef CONFIG_IWLWIFI_DEBUG
1858 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) 1851 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
1859 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) 1852 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1860 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; 1853 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1861#else
1862 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1863 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1864#endif
1865 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", 1854 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1866 size); 1855 size);
1867 1856
@@ -1905,10 +1894,8 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
1905 unsigned int reload_msec; 1894 unsigned int reload_msec;
1906 unsigned long reload_jiffies; 1895 unsigned long reload_jiffies;
1907 1896
1908#ifdef CONFIG_IWLWIFI_DEBUG
1909 if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) 1897 if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
1910 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); 1898 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
1911#endif
1912 1899
1913 /* uCode is no longer loaded. */ 1900 /* uCode is no longer loaded. */
1914 priv->ucode_loaded = false; 1901 priv->ucode_loaded = false;
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index bd69018d07a9..77cb59712235 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -163,7 +163,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
163 u8 skip; 163 u8 skip;
164 u32 slp_itrvl; 164 u32 slp_itrvl;
165 165
166 if (priv->cfg->adv_pm) { 166 if (priv->lib->adv_pm) {
167 table = apm_range_2; 167 table = apm_range_2;
168 if (period <= IWL_DTIM_RANGE_1_MAX) 168 if (period <= IWL_DTIM_RANGE_1_MAX)
169 table = apm_range_1; 169 table = apm_range_1;
@@ -217,7 +217,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
217 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 217 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
218 218
219 if (iwl_advanced_bt_coexist(priv)) { 219 if (iwl_advanced_bt_coexist(priv)) {
220 if (!priv->cfg->bt_params->bt_sco_disable) 220 if (!priv->lib->bt_params->bt_sco_disable)
221 cmd->flags |= IWL_POWER_BT_SCO_ENA; 221 cmd->flags |= IWL_POWER_BT_SCO_ENA;
222 else 222 else
223 cmd->flags &= ~IWL_POWER_BT_SCO_ENA; 223 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -293,7 +293,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
293 293
294 if (priv->wowlan) 294 if (priv->wowlan)
295 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); 295 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
296 else if (!priv->cfg->base_params->no_idle_support && 296 else if (!priv->lib->no_idle_support &&
297 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 297 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
298 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); 298 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
299 else if (iwl_tt_is_low_power_state(priv)) { 299 else if (iwl_tt_is_low_power_state(priv)) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 10fbb176cc8e..1b693944123b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -351,12 +351,6 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
351 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 351 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
352 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 352 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
353 353
354#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
355 /* testmode has higher priority to overwirte the fixed rate */
356 if (priv->tm_fixed_rate)
357 lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
358#endif
359
360 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", 354 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
361 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); 355 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
362 356
@@ -419,23 +413,18 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
419 413
420 load = rs_tl_get_load(lq_data, tid); 414 load = rs_tl_get_load(lq_data, tid);
421 415
422 if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { 416 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
423 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 417 sta->addr, tid);
424 sta->addr, tid); 418 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
425 ret = ieee80211_start_tx_ba_session(sta, tid, 5000); 419 if (ret == -EAGAIN) {
426 if (ret == -EAGAIN) { 420 /*
427 /* 421 * driver and mac80211 is out of sync
428 * driver and mac80211 is out of sync 422 * this might be cause by reloading firmware
429 * this might be cause by reloading firmware 423 * stop the tx ba session here
430 * stop the tx ba session here 424 */
431 */ 425 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
432 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", 426 tid);
433 tid); 427 ieee80211_stop_tx_ba_session(sta, tid);
434 ieee80211_stop_tx_ba_session(sta, tid);
435 }
436 } else {
437 IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
438 "because load = %u\n", tid, load);
439 } 428 }
440 return ret; 429 return ret;
441} 430}
@@ -1083,12 +1072,7 @@ done:
1083 if (sta && sta->supp_rates[sband->band]) 1072 if (sta && sta->supp_rates[sband->band])
1084 rs_rate_scale_perform(priv, skb, sta, lq_sta); 1073 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1085 1074
1086#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE) 1075 if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
1087 if ((priv->tm_fixed_rate) &&
1088 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
1089 rs_program_fix_rate(priv, lq_sta);
1090#endif
1091 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1092 rs_bt_update_lq(priv, ctx, lq_sta); 1076 rs_bt_update_lq(priv, ctx, lq_sta);
1093} 1077}
1094 1078
@@ -2913,9 +2897,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2913 if (sband->band == IEEE80211_BAND_5GHZ) 2897 if (sband->band == IEEE80211_BAND_5GHZ)
2914 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2898 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2915 lq_sta->is_agg = 0; 2899 lq_sta->is_agg = 0;
2916#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
2917 priv->tm_fixed_rate = 0;
2918#endif
2919#ifdef CONFIG_MAC80211_DEBUGFS 2900#ifdef CONFIG_MAC80211_DEBUGFS
2920 lq_sta->dbg_fixed_rate = 0; 2901 lq_sta->dbg_fixed_rate = 0;
2921#endif 2902#endif
@@ -3064,11 +3045,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3064 * overwrite if needed, pass aggregation time limit 3045 * overwrite if needed, pass aggregation time limit
3065 * to uCode in uSec 3046 * to uCode in uSec
3066 */ 3047 */
3067 if (priv && priv->cfg->bt_params && 3048 if (priv && priv->lib->bt_params &&
3068 priv->cfg->bt_params->agg_time_limit && 3049 priv->lib->bt_params->agg_time_limit &&
3069 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) 3050 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
3070 lq_cmd->agg_params.agg_time_limit = 3051 lq_cmd->agg_params.agg_time_limit =
3071 cpu_to_le16(priv->cfg->bt_params->agg_time_limit); 3052 cpu_to_le16(priv->lib->bt_params->agg_time_limit);
3072} 3053}
3073 3054
3074static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 3055static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index a4eed2055fdb..d71776dd1e6a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -335,8 +335,7 @@ static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
335 if (msecs < 99) 335 if (msecs < 99)
336 return; 336 return;
337 337
338 if (iwlwifi_mod_params.plcp_check && 338 if (!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
339 !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
340 iwl_force_rf_reset(priv, false); 339 iwl_force_rf_reset(priv, false);
341} 340}
342 341
@@ -1102,7 +1101,7 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
1102 iwl_notification_wait_init(&priv->notif_wait); 1101 iwl_notification_wait_init(&priv->notif_wait);
1103 1102
1104 /* Set up BT Rx handlers */ 1103 /* Set up BT Rx handlers */
1105 if (priv->cfg->bt_params) 1104 if (priv->lib->bt_params)
1106 iwlagn_bt_rx_handler_setup(priv); 1105 iwlagn_bt_rx_handler_setup(priv);
1107} 1106}
1108 1107
@@ -1120,32 +1119,17 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1120 */ 1119 */
1121 iwl_notification_wait_notify(&priv->notif_wait, pkt); 1120 iwl_notification_wait_notify(&priv->notif_wait, pkt);
1122 1121
1123#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 1122 /* Based on type of command response or notification,
1124 /* 1123 * handle those that need handling via function in
1125 * RX data may be forwarded to userspace in one 1124 * rx_handlers table. See iwl_setup_rx_handlers() */
1126 * of two cases: the user owns the fw through testmode or when 1125 if (priv->rx_handlers[pkt->hdr.cmd]) {
1127 * the user requested to monitor the rx w/o affecting the regular flow. 1126 priv->rx_handlers_stats[pkt->hdr.cmd]++;
1128 * In these cases the iwl_test object will handle forwarding the rx 1127 err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
1129 * data to user space. 1128 } else {
1130 * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow 1129 /* No handling needed */
1131 * continues. 1130 IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
1132 */ 1131 iwl_dvm_get_cmd_string(pkt->hdr.cmd),
1133 iwl_test_rx(&priv->tst, rxb); 1132 pkt->hdr.cmd);
1134#endif
1135
1136 if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
1137 /* Based on type of command response or notification,
1138 * handle those that need handling via function in
1139 * rx_handlers table. See iwl_setup_rx_handlers() */
1140 if (priv->rx_handlers[pkt->hdr.cmd]) {
1141 priv->rx_handlers_stats[pkt->hdr.cmd]++;
1142 err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
1143 } else {
1144 /* No handling needed */
1145 IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
1146 iwl_dvm_get_cmd_string(pkt->hdr.cmd),
1147 pkt->hdr.cmd);
1148 }
1149 } 1133 }
1150 return err; 1134 return err;
1151} 1135}
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index d69b55866714..8c686a5b90ac 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -801,8 +801,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
801 * Internal scans are passive, so we can indiscriminately set 801 * Internal scans are passive, so we can indiscriminately set
802 * the BT ignore flag on 2.4 GHz since it applies to TX only. 802 * the BT ignore flag on 2.4 GHz since it applies to TX only.
803 */ 803 */
804 if (priv->cfg->bt_params && 804 if (priv->lib->bt_params &&
805 priv->cfg->bt_params->advanced_bt_coexist) 805 priv->lib->bt_params->advanced_bt_coexist)
806 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; 806 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
807 break; 807 break;
808 case IEEE80211_BAND_5GHZ: 808 case IEEE80211_BAND_5GHZ:
@@ -844,8 +844,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
844 band = priv->scan_band; 844 band = priv->scan_band;
845 845
846 if (band == IEEE80211_BAND_2GHZ && 846 if (band == IEEE80211_BAND_2GHZ &&
847 priv->cfg->bt_params && 847 priv->lib->bt_params &&
848 priv->cfg->bt_params->advanced_bt_coexist) { 848 priv->lib->bt_params->advanced_bt_coexist) {
849 /* transmit 2.4 GHz probes only on first antenna */ 849 /* transmit 2.4 GHz probes only on first antenna */
850 scan_tx_antennas = first_antenna(scan_tx_antennas); 850 scan_tx_antennas = first_antenna(scan_tx_antennas);
851 } 851 }
@@ -873,8 +873,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
873 873
874 rx_ant = first_antenna(active_chains); 874 rx_ant = first_antenna(active_chains);
875 } 875 }
876 if (priv->cfg->bt_params && 876 if (priv->lib->bt_params &&
877 priv->cfg->bt_params->advanced_bt_coexist && 877 priv->lib->bt_params->advanced_bt_coexist &&
878 priv->bt_full_concurrent) { 878 priv->bt_full_concurrent) {
879 /* operated as 1x1 in full concurrency mode */ 879 /* operated as 1x1 in full concurrency mode */
880 rx_ant = first_antenna(rx_ant); 880 rx_ant = first_antenna(rx_ant);
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
deleted file mode 100644
index b89b9d9b9969..000000000000
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ /dev/null
@@ -1,471 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/init.h>
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/dma-mapping.h>
68#include <net/net_namespace.h>
69#include <linux/netdevice.h>
70#include <net/cfg80211.h>
71#include <net/mac80211.h>
72#include <net/netlink.h>
73
74#include "iwl-debug.h"
75#include "iwl-trans.h"
76#include "dev.h"
77#include "agn.h"
78#include "iwl-test.h"
79#include "iwl-testmode.h"
80
81static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
82 struct iwl_host_cmd *cmd)
83{
84 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
85 return iwl_dvm_send_cmd(priv, cmd);
86}
87
88static bool iwl_testmode_valid_hw_addr(u32 addr)
89{
90 if (iwlagn_hw_valid_rtc_data_addr(addr))
91 return true;
92
93 if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
94 addr < IWLAGN_RTC_INST_UPPER_BOUND)
95 return true;
96
97 return false;
98}
99
100static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
101{
102 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
103 return priv->fw->ucode_ver;
104}
105
106static struct sk_buff*
107iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
108{
109 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
110 return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
111}
112
113static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
114{
115 return cfg80211_testmode_reply(skb);
116}
117
118static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
119 int len)
120{
121 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
122 return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
123 GFP_ATOMIC);
124}
125
126static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
127{
128 return cfg80211_testmode_event(skb, GFP_ATOMIC);
129}
130
131static struct iwl_test_ops tst_ops = {
132 .send_cmd = iwl_testmode_send_cmd,
133 .valid_hw_addr = iwl_testmode_valid_hw_addr,
134 .get_fw_ver = iwl_testmode_get_fw_ver,
135 .alloc_reply = iwl_testmode_alloc_reply,
136 .reply = iwl_testmode_reply,
137 .alloc_event = iwl_testmode_alloc_event,
138 .event = iwl_testmode_event,
139};
140
141void iwl_testmode_init(struct iwl_priv *priv)
142{
143 iwl_test_init(&priv->tst, priv->trans, &tst_ops);
144}
145
146void iwl_testmode_free(struct iwl_priv *priv)
147{
148 iwl_test_free(&priv->tst);
149}
150
151static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
152{
153 struct iwl_notification_wait calib_wait;
154 static const u8 calib_complete[] = {
155 CALIBRATION_COMPLETE_NOTIFICATION
156 };
157 int ret;
158
159 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
160 calib_complete, ARRAY_SIZE(calib_complete),
161 NULL, NULL);
162 ret = iwl_init_alive_start(priv);
163 if (ret) {
164 IWL_ERR(priv, "Fail init calibration: %d\n", ret);
165 goto cfg_init_calib_error;
166 }
167
168 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
169 if (ret)
170 IWL_ERR(priv, "Error detecting"
171 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
172 return ret;
173
174cfg_init_calib_error:
175 iwl_remove_notification(&priv->notif_wait, &calib_wait);
176 return ret;
177}
178
179/*
180 * This function handles the user application commands for driver.
181 *
182 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
183 * handlers respectively.
184 *
185 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
186 * value of the actual command execution is replied to the user application.
187 *
188 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
189 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
190 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
191 *
192 * @hw: ieee80211_hw object that represents the device
193 * @tb: gnl message fields from the user space
194 */
195static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
196{
197 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
198 struct iwl_trans *trans = priv->trans;
199 struct sk_buff *skb;
200 unsigned char *rsp_data_ptr = NULL;
201 int status = 0, rsp_data_len = 0;
202 u32 inst_size = 0, data_size = 0;
203 const struct fw_img *img;
204
205 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
206 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
207 rsp_data_ptr = (unsigned char *)priv->cfg->name;
208 rsp_data_len = strlen(priv->cfg->name);
209 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
210 rsp_data_len + 20);
211 if (!skb) {
212 IWL_ERR(priv, "Memory allocation fail\n");
213 return -ENOMEM;
214 }
215 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
216 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
217 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
218 rsp_data_len, rsp_data_ptr))
219 goto nla_put_failure;
220 status = cfg80211_testmode_reply(skb);
221 if (status < 0)
222 IWL_ERR(priv, "Error sending msg : %d\n", status);
223 break;
224
225 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
226 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
227 if (status)
228 IWL_ERR(priv, "Error loading init ucode: %d\n", status);
229 break;
230
231 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
232 iwl_testmode_cfg_init_calib(priv);
233 priv->ucode_loaded = false;
234 iwl_trans_stop_device(trans);
235 break;
236
237 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
238 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
239 if (status) {
240 IWL_ERR(priv,
241 "Error loading runtime ucode: %d\n", status);
242 break;
243 }
244 status = iwl_alive_start(priv);
245 if (status)
246 IWL_ERR(priv,
247 "Error starting the device: %d\n", status);
248 break;
249
250 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
251 iwl_scan_cancel_timeout(priv, 200);
252 priv->ucode_loaded = false;
253 iwl_trans_stop_device(trans);
254 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
255 if (status) {
256 IWL_ERR(priv,
257 "Error loading WOWLAN ucode: %d\n", status);
258 break;
259 }
260 status = iwl_alive_start(priv);
261 if (status)
262 IWL_ERR(priv,
263 "Error starting the device: %d\n", status);
264 break;
265
266 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
267 if (priv->eeprom_blob) {
268 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
269 priv->eeprom_blob_size + 20);
270 if (!skb) {
271 IWL_ERR(priv, "Memory allocation fail\n");
272 return -ENOMEM;
273 }
274 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
275 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
276 nla_put(skb, IWL_TM_ATTR_EEPROM,
277 priv->eeprom_blob_size,
278 priv->eeprom_blob))
279 goto nla_put_failure;
280 status = cfg80211_testmode_reply(skb);
281 if (status < 0)
282 IWL_ERR(priv, "Error sending msg : %d\n",
283 status);
284 } else
285 return -ENODATA;
286 break;
287
288 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
289 if (!tb[IWL_TM_ATTR_FIXRATE]) {
290 IWL_ERR(priv, "Missing fixrate setting\n");
291 return -ENOMSG;
292 }
293 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
294 break;
295
296 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
297 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
298 if (!skb) {
299 IWL_ERR(priv, "Memory allocation fail\n");
300 return -ENOMEM;
301 }
302 if (!priv->ucode_loaded) {
303 IWL_ERR(priv, "No uCode has not been loaded\n");
304 return -EINVAL;
305 } else {
306 img = &priv->fw->img[priv->cur_ucode];
307 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
308 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
309 }
310 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
311 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
312 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
313 goto nla_put_failure;
314 status = cfg80211_testmode_reply(skb);
315 if (status < 0)
316 IWL_ERR(priv, "Error sending msg : %d\n", status);
317 break;
318
319 default:
320 IWL_ERR(priv, "Unknown testmode driver command ID\n");
321 return -ENOSYS;
322 }
323 return status;
324
325nla_put_failure:
326 kfree_skb(skb);
327 return -EMSGSIZE;
328}
329
330/*
331 * This function handles the user application switch ucode ownership.
332 *
333 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
334 * decide who the current owner of the uCode
335 *
336 * If the current owner is OWNERSHIP_TM, then the only host command
337 * can deliver to uCode is from testmode, all the other host commands
338 * will dropped.
339 *
340 * default driver is the owner of uCode in normal operational mode
341 *
342 * @hw: ieee80211_hw object that represents the device
343 * @tb: gnl message fields from the user space
344 */
345static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
346{
347 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
348 u8 owner;
349
350 if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
351 IWL_ERR(priv, "Missing ucode owner\n");
352 return -ENOMSG;
353 }
354
355 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
356 if (owner == IWL_OWNERSHIP_DRIVER) {
357 priv->ucode_owner = owner;
358 iwl_test_enable_notifications(&priv->tst, false);
359 } else if (owner == IWL_OWNERSHIP_TM) {
360 priv->ucode_owner = owner;
361 iwl_test_enable_notifications(&priv->tst, true);
362 } else {
363 IWL_ERR(priv, "Invalid owner\n");
364 return -EINVAL;
365 }
366 return 0;
367}
368
369/* The testmode gnl message handler that takes the gnl message from the
370 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
371 * invoke the corresponding handlers.
372 *
373 * This function is invoked when there is user space application sending
374 * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
375 * by nl80211.
376 *
377 * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
378 * dispatching it to the corresponding handler.
379 *
380 * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
381 * -ENOSYS is replied to the user application if the command is unknown;
382 * Otherwise, the command is dispatched to the respective handler.
383 *
384 * @hw: ieee80211_hw object that represents the device
385 * @data: pointer to user space message
386 * @len: length in byte of @data
387 */
388int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
389{
390 struct nlattr *tb[IWL_TM_ATTR_MAX];
391 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
392 int result;
393
394 result = iwl_test_parse(&priv->tst, tb, data, len);
395 if (result)
396 return result;
397
398 /* in case multiple accesses to the device happens */
399 mutex_lock(&priv->mutex);
400 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
401 case IWL_TM_CMD_APP2DEV_UCODE:
402 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
403 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
404 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
405 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
406 case IWL_TM_CMD_APP2DEV_END_TRACE:
407 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
408 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
409 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
410 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
411 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
412 result = iwl_test_handle_cmd(&priv->tst, tb);
413 break;
414
415 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
416 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
417 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
418 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
419 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
420 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
421 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
422 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
423 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
424 result = iwl_testmode_driver(hw, tb);
425 break;
426
427 case IWL_TM_CMD_APP2DEV_OWNERSHIP:
428 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
429 result = iwl_testmode_ownership(hw, tb);
430 break;
431
432 default:
433 IWL_ERR(priv, "Unknown testmode command\n");
434 result = -ENOSYS;
435 break;
436 }
437 mutex_unlock(&priv->mutex);
438
439 if (result)
440 IWL_ERR(priv, "Test cmd failed result=%d\n", result);
441 return result;
442}
443
444int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
445 struct netlink_callback *cb,
446 void *data, int len)
447{
448 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
449 int result;
450 u32 cmd;
451
452 if (cb->args[3]) {
453 /* offset by 1 since commands start at 0 */
454 cmd = cb->args[3] - 1;
455 } else {
456 struct nlattr *tb[IWL_TM_ATTR_MAX];
457
458 result = iwl_test_parse(&priv->tst, tb, data, len);
459 if (result)
460 return result;
461
462 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
463 cb->args[3] = cmd + 1;
464 }
465
466 /* in case multiple accesses to the device happens */
467 mutex_lock(&priv->mutex);
468 result = iwl_test_dump(&priv->tst, cmd, skb, cb);
469 mutex_unlock(&priv->mutex);
470 return result;
471}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index 03f9bc01c0cc..fbeee081ee2f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -627,7 +627,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
627 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); 627 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
628 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); 628 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
629 629
630 if (priv->cfg->base_params->adv_thermal_throttle) { 630 if (priv->lib->adv_thermal_throttle) {
631 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); 631 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
632 tt->restriction = kcalloc(IWL_TI_STATE_MAX, 632 tt->restriction = kcalloc(IWL_TI_STATE_MAX,
633 sizeof(struct iwl_tt_restriction), 633 sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index a900aaf47790..5ee983faa679 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -83,8 +83,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
83 else if (ieee80211_is_back_req(fc)) 83 else if (ieee80211_is_back_req(fc))
84 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 84 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
85 else if (info->band == IEEE80211_BAND_2GHZ && 85 else if (info->band == IEEE80211_BAND_2GHZ &&
86 priv->cfg->bt_params && 86 priv->lib->bt_params &&
87 priv->cfg->bt_params->advanced_bt_coexist && 87 priv->lib->bt_params->advanced_bt_coexist &&
88 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || 88 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
89 ieee80211_is_reassoc_req(fc) || 89 ieee80211_is_reassoc_req(fc) ||
90 skb->protocol == cpu_to_be16(ETH_P_PAE))) 90 skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -162,18 +162,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
162 if (ieee80211_is_data(fc)) { 162 if (ieee80211_is_data(fc)) {
163 tx_cmd->initial_rate_index = 0; 163 tx_cmd->initial_rate_index = 0;
164 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; 164 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
165#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
166 if (priv->tm_fixed_rate) {
167 /*
168 * rate overwrite by testmode
169 * we not only send lq command to change rate
170 * we also re-enforce per data pkt base.
171 */
172 tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
173 memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
174 sizeof(tx_cmd->rate_n_flags));
175 }
176#endif
177 return; 165 return;
178 } else if (ieee80211_is_back_req(fc)) 166 } else if (ieee80211_is_back_req(fc))
179 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; 167 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
@@ -202,8 +190,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
202 rate_flags |= RATE_MCS_CCK_MSK; 190 rate_flags |= RATE_MCS_CCK_MSK;
203 191
204 /* Set up antennas */ 192 /* Set up antennas */
205 if (priv->cfg->bt_params && 193 if (priv->lib->bt_params &&
206 priv->cfg->bt_params->advanced_bt_coexist && 194 priv->lib->bt_params->advanced_bt_coexist &&
207 priv->bt_full_concurrent) { 195 priv->bt_full_concurrent) {
208 /* operated as 1x1 in full concurrency mode */ 196 /* operated as 1x1 in full concurrency mode */
209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 197 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
@@ -986,8 +974,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
986 * notification again. 974 * notification again.
987 */ 975 */
988 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && 976 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
989 priv->cfg->bt_params && 977 priv->lib->bt_params &&
990 priv->cfg->bt_params->advanced_bt_coexist) { 978 priv->lib->bt_params->advanced_bt_coexist) {
991 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); 979 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
992 } 980 }
993 981
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 0a1cdc5e856b..86270b69cd02 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -132,8 +132,8 @@ int iwl_init_alive_start(struct iwl_priv *priv)
132{ 132{
133 int ret; 133 int ret;
134 134
135 if (priv->cfg->bt_params && 135 if (priv->lib->bt_params &&
136 priv->cfg->bt_params->advanced_bt_coexist) { 136 priv->lib->bt_params->advanced_bt_coexist) {
137 /* 137 /*
138 * Tell uCode we are ready to perform calibration 138 * Tell uCode we are ready to perform calibration
139 * need to perform this before any calibration 139 * need to perform this before any calibration
@@ -155,8 +155,8 @@ int iwl_init_alive_start(struct iwl_priv *priv)
155 * temperature offset calibration is only needed for runtime ucode, 155 * temperature offset calibration is only needed for runtime ucode,
156 * so prepare the value now. 156 * so prepare the value now.
157 */ 157 */
158 if (priv->cfg->need_temp_offset_calib) { 158 if (priv->lib->need_temp_offset_calib) {
159 if (priv->cfg->temp_offset_v2) 159 if (priv->lib->temp_offset_v2)
160 return iwl_set_temperature_offset_calib_v2(priv); 160 return iwl_set_temperature_offset_calib_v2(priv);
161 else 161 else
162 return iwl_set_temperature_offset_calib(priv); 162 return iwl_set_temperature_offset_calib(priv);
@@ -277,7 +277,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
277 if (ret) 277 if (ret)
278 return ret; 278 return ret;
279 279
280 if (!priv->cfg->no_xtal_calib) { 280 if (!priv->lib->no_xtal_calib) {
281 ret = iwl_set_Xtal_calib(priv); 281 ret = iwl_set_Xtal_calib(priv);
282 if (ret) 282 if (ret)
283 return ret; 283 return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index c080ae3070b2..0d2afe098afc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -60,9 +60,6 @@ static const struct iwl_base_params iwl1000_base_params = {
60 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 60 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
61 .shadow_ram_support = false, 61 .shadow_ram_support = false,
62 .led_compensation = 51, 62 .led_compensation = 51,
63 .support_ct_kill_exit = true,
64 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
65 .chain_noise_scale = 1000,
66 .wd_timeout = IWL_WATCHDOG_DISABLED, 63 .wd_timeout = IWL_WATCHDOG_DISABLED,
67 .max_event_log_size = 128, 64 .max_event_log_size = 128,
68}; 65};
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index a6ddd2f9fba0..c727ec7c90a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -72,14 +72,9 @@ static const struct iwl_base_params iwl2000_base_params = {
72 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 72 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
73 .shadow_ram_support = true, 73 .shadow_ram_support = true,
74 .led_compensation = 51, 74 .led_compensation = 51,
75 .adv_thermal_throttle = true,
76 .support_ct_kill_exit = true,
77 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
78 .chain_noise_scale = 1000,
79 .wd_timeout = IWL_DEF_WD_TIMEOUT, 75 .wd_timeout = IWL_DEF_WD_TIMEOUT,
80 .max_event_log_size = 512, 76 .max_event_log_size = 512,
81 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 77 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
82 .hd_v2 = true,
83}; 78};
84 79
85 80
@@ -90,14 +85,9 @@ static const struct iwl_base_params iwl2030_base_params = {
90 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 85 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
91 .shadow_ram_support = true, 86 .shadow_ram_support = true,
92 .led_compensation = 57, 87 .led_compensation = 57,
93 .adv_thermal_throttle = true,
94 .support_ct_kill_exit = true,
95 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
96 .chain_noise_scale = 1000,
97 .wd_timeout = IWL_LONG_WD_TIMEOUT, 88 .wd_timeout = IWL_LONG_WD_TIMEOUT,
98 .max_event_log_size = 512, 89 .max_event_log_size = 512,
99 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 90 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
100 .hd_v2 = true,
101}; 91};
102 92
103static const struct iwl_ht_params iwl2000_ht_params = { 93static const struct iwl_ht_params iwl2000_ht_params = {
@@ -106,16 +96,6 @@ static const struct iwl_ht_params iwl2000_ht_params = {
106 .ht40_bands = BIT(IEEE80211_BAND_2GHZ), 96 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
107}; 97};
108 98
109static const struct iwl_bt_params iwl2030_bt_params = {
110 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
111 .advanced_bt_coexist = true,
112 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
113 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
114 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
115 .bt_sco_disable = true,
116 .bt_session_2 = true,
117};
118
119static const struct iwl_eeprom_params iwl20x0_eeprom_params = { 99static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
120 .regulatory_bands = { 100 .regulatory_bands = {
121 EEPROM_REG_BAND_1_CHANNELS, 101 EEPROM_REG_BAND_1_CHANNELS,
@@ -137,12 +117,10 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
137 .device_family = IWL_DEVICE_FAMILY_2000, \ 117 .device_family = IWL_DEVICE_FAMILY_2000, \
138 .max_inst_size = IWL60_RTC_INST_SIZE, \ 118 .max_inst_size = IWL60_RTC_INST_SIZE, \
139 .max_data_size = IWL60_RTC_DATA_SIZE, \ 119 .max_data_size = IWL60_RTC_DATA_SIZE, \
140 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ 120 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
141 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 121 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
142 .base_params = &iwl2000_base_params, \ 122 .base_params = &iwl2000_base_params, \
143 .eeprom_params = &iwl20x0_eeprom_params, \ 123 .eeprom_params = &iwl20x0_eeprom_params, \
144 .need_temp_offset_calib = true, \
145 .temp_offset_v2 = true, \
146 .led_mode = IWL_LED_RF_STATE 124 .led_mode = IWL_LED_RF_STATE
147 125
148const struct iwl_cfg iwl2000_2bgn_cfg = { 126const struct iwl_cfg iwl2000_2bgn_cfg = {
@@ -168,12 +146,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
168 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ 146 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
169 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 147 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
170 .base_params = &iwl2030_base_params, \ 148 .base_params = &iwl2030_base_params, \
171 .bt_params = &iwl2030_bt_params, \
172 .eeprom_params = &iwl20x0_eeprom_params, \ 149 .eeprom_params = &iwl20x0_eeprom_params, \
173 .need_temp_offset_calib = true, \ 150 .led_mode = IWL_LED_RF_STATE
174 .temp_offset_v2 = true, \
175 .led_mode = IWL_LED_RF_STATE, \
176 .adv_pm = true
177 151
178const struct iwl_cfg iwl2030_2bgn_cfg = { 152const struct iwl_cfg iwl2030_2bgn_cfg = {
179 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", 153 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -193,10 +167,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
193 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 167 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
194 .base_params = &iwl2000_base_params, \ 168 .base_params = &iwl2000_base_params, \
195 .eeprom_params = &iwl20x0_eeprom_params, \ 169 .eeprom_params = &iwl20x0_eeprom_params, \
196 .need_temp_offset_calib = true, \
197 .temp_offset_v2 = true, \
198 .led_mode = IWL_LED_RF_STATE, \ 170 .led_mode = IWL_LED_RF_STATE, \
199 .adv_pm = true, \
200 .rx_with_siso_diversity = true 171 .rx_with_siso_diversity = true
201 172
202const struct iwl_cfg iwl105_bgn_cfg = { 173const struct iwl_cfg iwl105_bgn_cfg = {
@@ -222,12 +193,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
222 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ 193 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
223 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 194 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
224 .base_params = &iwl2030_base_params, \ 195 .base_params = &iwl2030_base_params, \
225 .bt_params = &iwl2030_bt_params, \
226 .eeprom_params = &iwl20x0_eeprom_params, \ 196 .eeprom_params = &iwl20x0_eeprom_params, \
227 .need_temp_offset_calib = true, \
228 .temp_offset_v2 = true, \
229 .led_mode = IWL_LED_RF_STATE, \ 197 .led_mode = IWL_LED_RF_STATE, \
230 .adv_pm = true, \
231 .rx_with_siso_diversity = true 198 .rx_with_siso_diversity = true
232 199
233const struct iwl_cfg iwl135_bgn_cfg = { 200const struct iwl_cfg iwl135_bgn_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 403f3f224bf6..ecc01e1a61a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -59,11 +59,8 @@ static const struct iwl_base_params iwl5000_base_params = {
59 .num_of_queues = IWLAGN_NUM_QUEUES, 59 .num_of_queues = IWLAGN_NUM_QUEUES,
60 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 60 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
61 .led_compensation = 51, 61 .led_compensation = 51,
62 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
63 .chain_noise_scale = 1000,
64 .wd_timeout = IWL_WATCHDOG_DISABLED, 62 .wd_timeout = IWL_WATCHDOG_DISABLED,
65 .max_event_log_size = 512, 63 .max_event_log_size = 512,
66 .no_idle_support = true,
67}; 64};
68 65
69static const struct iwl_ht_params iwl5000_ht_params = { 66static const struct iwl_ht_params iwl5000_ht_params = {
@@ -159,7 +156,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
159 .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ 156 .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
160 .base_params = &iwl5000_base_params, \ 157 .base_params = &iwl5000_base_params, \
161 .eeprom_params = &iwl5000_eeprom_params, \ 158 .eeprom_params = &iwl5000_eeprom_params, \
162 .no_xtal_calib = true, \
163 .led_mode = IWL_LED_BLINK, \ 159 .led_mode = IWL_LED_BLINK, \
164 .internal_wimax_coex = true 160 .internal_wimax_coex = true
165 161
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index b5ab8d1bcac0..30d45e2fc193 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -82,10 +82,6 @@ static const struct iwl_base_params iwl6000_base_params = {
82 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 82 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
83 .shadow_ram_support = true, 83 .shadow_ram_support = true,
84 .led_compensation = 51, 84 .led_compensation = 51,
85 .adv_thermal_throttle = true,
86 .support_ct_kill_exit = true,
87 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
88 .chain_noise_scale = 1000,
89 .wd_timeout = IWL_DEF_WD_TIMEOUT, 85 .wd_timeout = IWL_DEF_WD_TIMEOUT,
90 .max_event_log_size = 512, 86 .max_event_log_size = 512,
91 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 87 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -98,10 +94,6 @@ static const struct iwl_base_params iwl6050_base_params = {
98 .max_ll_items = OTP_MAX_LL_ITEMS_6x50, 94 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
99 .shadow_ram_support = true, 95 .shadow_ram_support = true,
100 .led_compensation = 51, 96 .led_compensation = 51,
101 .adv_thermal_throttle = true,
102 .support_ct_kill_exit = true,
103 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
104 .chain_noise_scale = 1500,
105 .wd_timeout = IWL_DEF_WD_TIMEOUT, 97 .wd_timeout = IWL_DEF_WD_TIMEOUT,
106 .max_event_log_size = 1024, 98 .max_event_log_size = 1024,
107 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 99 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -114,10 +106,6 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
114 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 106 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
115 .shadow_ram_support = true, 107 .shadow_ram_support = true,
116 .led_compensation = 57, 108 .led_compensation = 57,
117 .adv_thermal_throttle = true,
118 .support_ct_kill_exit = true,
119 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
120 .chain_noise_scale = 1000,
121 .wd_timeout = IWL_LONG_WD_TIMEOUT, 109 .wd_timeout = IWL_LONG_WD_TIMEOUT,
122 .max_event_log_size = 512, 110 .max_event_log_size = 512,
123 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 111 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -129,15 +117,6 @@ static const struct iwl_ht_params iwl6000_ht_params = {
129 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 117 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
130}; 118};
131 119
132static const struct iwl_bt_params iwl6000_bt_params = {
133 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
134 .advanced_bt_coexist = true,
135 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
136 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
137 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
138 .bt_sco_disable = true,
139};
140
141static const struct iwl_eeprom_params iwl6000_eeprom_params = { 120static const struct iwl_eeprom_params iwl6000_eeprom_params = {
142 .regulatory_bands = { 121 .regulatory_bands = {
143 EEPROM_REG_BAND_1_CHANNELS, 122 EEPROM_REG_BAND_1_CHANNELS,
@@ -163,7 +142,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
163 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 142 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
164 .base_params = &iwl6000_g2_base_params, \ 143 .base_params = &iwl6000_g2_base_params, \
165 .eeprom_params = &iwl6000_eeprom_params, \ 144 .eeprom_params = &iwl6000_eeprom_params, \
166 .need_temp_offset_calib = true, \
167 .led_mode = IWL_LED_RF_STATE 145 .led_mode = IWL_LED_RF_STATE
168 146
169const struct iwl_cfg iwl6005_2agn_cfg = { 147const struct iwl_cfg iwl6005_2agn_cfg = {
@@ -217,11 +195,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
217 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ 195 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
218 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 196 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
219 .base_params = &iwl6000_g2_base_params, \ 197 .base_params = &iwl6000_g2_base_params, \
220 .bt_params = &iwl6000_bt_params, \
221 .eeprom_params = &iwl6000_eeprom_params, \ 198 .eeprom_params = &iwl6000_eeprom_params, \
222 .need_temp_offset_calib = true, \ 199 .led_mode = IWL_LED_RF_STATE
223 .led_mode = IWL_LED_RF_STATE, \
224 .adv_pm = true \
225 200
226const struct iwl_cfg iwl6030_2agn_cfg = { 201const struct iwl_cfg iwl6030_2agn_cfg = {
227 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", 202 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -256,11 +231,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
256 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ 231 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
257 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 232 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
258 .base_params = &iwl6000_g2_base_params, \ 233 .base_params = &iwl6000_g2_base_params, \
259 .bt_params = &iwl6000_bt_params, \
260 .eeprom_params = &iwl6000_eeprom_params, \ 234 .eeprom_params = &iwl6000_eeprom_params, \
261 .need_temp_offset_calib = true, \ 235 .led_mode = IWL_LED_RF_STATE
262 .led_mode = IWL_LED_RF_STATE, \
263 .adv_pm = true
264 236
265const struct iwl_cfg iwl6035_2agn_cfg = { 237const struct iwl_cfg iwl6035_2agn_cfg = {
266 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 238 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 50263e87fe15..22b7fa5b971a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,16 +67,16 @@
67#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
68 68
69/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
70#define IWL7260_UCODE_API_MAX 6 70#define IWL7260_UCODE_API_MAX 7
71#define IWL3160_UCODE_API_MAX 6 71#define IWL3160_UCODE_API_MAX 7
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 6 74#define IWL7260_UCODE_API_OK 7
75#define IWL3160_UCODE_API_OK 6 75#define IWL3160_UCODE_API_OK 7
76 76
77/* Lowest firmware API version supported */ 77/* Lowest firmware API version supported */
78#define IWL7260_UCODE_API_MIN 6 78#define IWL7260_UCODE_API_MIN 7
79#define IWL3160_UCODE_API_MIN 6 79#define IWL3160_UCODE_API_MIN 7
80 80
81/* NVM versions */ 81/* NVM versions */
82#define IWL7260_NVM_VERSION 0x0a1d 82#define IWL7260_NVM_VERSION 0x0a1d
@@ -96,13 +96,9 @@ static const struct iwl_base_params iwl7000_base_params = {
96 .pll_cfg_val = 0, 96 .pll_cfg_val = 0,
97 .shadow_ram_support = true, 97 .shadow_ram_support = true,
98 .led_compensation = 57, 98 .led_compensation = 57,
99 .adv_thermal_throttle = true,
100 .support_ct_kill_exit = true,
101 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
102 .chain_noise_scale = 1000,
103 .wd_timeout = IWL_LONG_WD_TIMEOUT, 99 .wd_timeout = IWL_LONG_WD_TIMEOUT,
104 .max_event_log_size = 512, 100 .max_event_log_size = 512,
105 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 101 .shadow_reg_enable = true,
106}; 102};
107 103
108static const struct iwl_ht_params iwl7000_ht_params = { 104static const struct iwl_ht_params iwl7000_ht_params = {
@@ -118,14 +114,11 @@ static const struct iwl_ht_params iwl7000_ht_params = {
118 .max_inst_size = IWL60_RTC_INST_SIZE, \ 114 .max_inst_size = IWL60_RTC_INST_SIZE, \
119 .max_data_size = IWL60_RTC_DATA_SIZE, \ 115 .max_data_size = IWL60_RTC_DATA_SIZE, \
120 .base_params = &iwl7000_base_params, \ 116 .base_params = &iwl7000_base_params, \
121 /* TODO: .bt_params? */ \ 117 .led_mode = IWL_LED_RF_STATE
122 .need_temp_offset_calib = true, \
123 .led_mode = IWL_LED_RF_STATE, \
124 .adv_pm = true \
125 118
126 119
127const struct iwl_cfg iwl7260_2ac_cfg = { 120const struct iwl_cfg iwl7260_2ac_cfg = {
128 .name = "Intel(R) Dual Band Wireless AC7260", 121 .name = "Intel(R) Dual Band Wireless AC 7260",
129 .fw_name_pre = IWL7260_FW_PRE, 122 .fw_name_pre = IWL7260_FW_PRE,
130 IWL_DEVICE_7000, 123 IWL_DEVICE_7000,
131 .ht_params = &iwl7000_ht_params, 124 .ht_params = &iwl7000_ht_params,
@@ -133,8 +126,44 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
133 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 126 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
134}; 127};
135 128
136const struct iwl_cfg iwl3160_ac_cfg = { 129const struct iwl_cfg iwl7260_2n_cfg = {
137 .name = "Intel(R) Dual Band Wireless AC3160", 130 .name = "Intel(R) Dual Band Wireless N 7260",
131 .fw_name_pre = IWL7260_FW_PRE,
132 IWL_DEVICE_7000,
133 .ht_params = &iwl7000_ht_params,
134 .nvm_ver = IWL7260_NVM_VERSION,
135 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
136};
137
138const struct iwl_cfg iwl7260_n_cfg = {
139 .name = "Intel(R) Wireless N 7260",
140 .fw_name_pre = IWL7260_FW_PRE,
141 IWL_DEVICE_7000,
142 .ht_params = &iwl7000_ht_params,
143 .nvm_ver = IWL7260_NVM_VERSION,
144 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
145};
146
147const struct iwl_cfg iwl3160_2ac_cfg = {
148 .name = "Intel(R) Dual Band Wireless AC 3160",
149 .fw_name_pre = IWL3160_FW_PRE,
150 IWL_DEVICE_7000,
151 .ht_params = &iwl7000_ht_params,
152 .nvm_ver = IWL3160_NVM_VERSION,
153 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
154};
155
156const struct iwl_cfg iwl3160_2n_cfg = {
157 .name = "Intel(R) Dual Band Wireless N 3160",
158 .fw_name_pre = IWL3160_FW_PRE,
159 IWL_DEVICE_7000,
160 .ht_params = &iwl7000_ht_params,
161 .nvm_ver = IWL3160_NVM_VERSION,
162 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
163};
164
165const struct iwl_cfg iwl3160_n_cfg = {
166 .name = "Intel(R) Wireless N 3160",
138 .fw_name_pre = IWL3160_FW_PRE, 167 .fw_name_pre = IWL3160_FW_PRE,
139 IWL_DEVICE_7000, 168 IWL_DEVICE_7000,
140 .ht_params = &iwl7000_ht_params, 169 .ht_params = &iwl7000_ht_params,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index c38aa8f77554..83b9ff6ff3ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -136,17 +136,9 @@ enum iwl_led_mode {
136 * @led_compensation: compensate on the led on/off time per HW according 136 * @led_compensation: compensate on the led on/off time per HW according
137 * to the deviation to achieve the desired led frequency. 137 * to the deviation to achieve the desired led frequency.
138 * The detail algorithm is described in iwl-led.c 138 * The detail algorithm is described in iwl-led.c
139 * @chain_noise_num_beacons: number of beacons used to compute chain noise
140 * @adv_thermal_throttle: support advance thermal throttle
141 * @support_ct_kill_exit: support ct kill exit condition
142 * @plcp_delta_threshold: plcp error rate threshold used to trigger
143 * radio tuning when there is a high receiving plcp error rate
144 * @chain_noise_scale: default chain noise scale used for gain computation
145 * @wd_timeout: TX queues watchdog timeout 139 * @wd_timeout: TX queues watchdog timeout
146 * @max_event_log_size: size of event log buffer size for ucode event logging 140 * @max_event_log_size: size of event log buffer size for ucode event logging
147 * @shadow_reg_enable: HW shadow register support 141 * @shadow_reg_enable: HW shadow register support
148 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
149 * @no_idle_support: do not support idle mode
150 */ 142 */
151struct iwl_base_params { 143struct iwl_base_params {
152 int eeprom_size; 144 int eeprom_size;
@@ -157,31 +149,9 @@ struct iwl_base_params {
157 const u16 max_ll_items; 149 const u16 max_ll_items;
158 const bool shadow_ram_support; 150 const bool shadow_ram_support;
159 u16 led_compensation; 151 u16 led_compensation;
160 bool adv_thermal_throttle;
161 bool support_ct_kill_exit;
162 u8 plcp_delta_threshold;
163 s32 chain_noise_scale;
164 unsigned int wd_timeout; 152 unsigned int wd_timeout;
165 u32 max_event_log_size; 153 u32 max_event_log_size;
166 const bool shadow_reg_enable; 154 const bool shadow_reg_enable;
167 const bool hd_v2;
168 const bool no_idle_support;
169};
170
171/*
172 * @advanced_bt_coexist: support advanced bt coexist
173 * @bt_init_traffic_load: specify initial bt traffic load
174 * @bt_prio_boost: default bt priority boost value
175 * @agg_time_limit: maximum number of uSec in aggregation
176 * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
177 */
178struct iwl_bt_params {
179 bool advanced_bt_coexist;
180 u8 bt_init_traffic_load;
181 u32 bt_prio_boost;
182 u16 agg_time_limit;
183 bool bt_sco_disable;
184 bool bt_session_2;
185}; 155};
186 156
187/* 157/*
@@ -231,16 +201,10 @@ struct iwl_eeprom_params {
231 * @nvm_calib_ver: NVM calibration version 201 * @nvm_calib_ver: NVM calibration version
232 * @lib: pointer to the lib ops 202 * @lib: pointer to the lib ops
233 * @base_params: pointer to basic parameters 203 * @base_params: pointer to basic parameters
234 * @ht_params: point to ht patameters 204 * @ht_params: point to ht parameters
235 * @bt_params: pointer to bt parameters
236 * @need_temp_offset_calib: need to perform temperature offset calibration
237 * @no_xtal_calib: some devices do not need crystal calibration data,
238 * don't send it to those
239 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) 205 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
240 * @adv_pm: advance power management
241 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 206 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
242 * @internal_wimax_coex: internal wifi/wimax combo device 207 * @internal_wimax_coex: internal wifi/wimax combo device
243 * @temp_offset_v2: support v2 of temperature offset calibration
244 * 208 *
245 * We enable the driver to be backward compatible wrt. hardware features. 209 * We enable the driver to be backward compatible wrt. hardware features.
246 * API differences in uCode shouldn't be handled here but through TLVs 210 * API differences in uCode shouldn't be handled here but through TLVs
@@ -258,26 +222,23 @@ struct iwl_cfg {
258 const u32 max_inst_size; 222 const u32 max_inst_size;
259 u8 valid_tx_ant; 223 u8 valid_tx_ant;
260 u8 valid_rx_ant; 224 u8 valid_rx_ant;
225 bool bt_shared_single_ant;
261 u16 nvm_ver; 226 u16 nvm_ver;
262 u16 nvm_calib_ver; 227 u16 nvm_calib_ver;
263 /* params not likely to change within a device family */ 228 /* params not likely to change within a device family */
264 const struct iwl_base_params *base_params; 229 const struct iwl_base_params *base_params;
265 /* params likely to change within a device family */ 230 /* params likely to change within a device family */
266 const struct iwl_ht_params *ht_params; 231 const struct iwl_ht_params *ht_params;
267 const struct iwl_bt_params *bt_params;
268 const struct iwl_eeprom_params *eeprom_params; 232 const struct iwl_eeprom_params *eeprom_params;
269 const bool need_temp_offset_calib; /* if used set to true */
270 const bool no_xtal_calib;
271 enum iwl_led_mode led_mode; 233 enum iwl_led_mode led_mode;
272 const bool adv_pm;
273 const bool rx_with_siso_diversity; 234 const bool rx_with_siso_diversity;
274 const bool internal_wimax_coex; 235 const bool internal_wimax_coex;
275 const bool temp_offset_v2;
276}; 236};
277 237
278/* 238/*
279 * This list declares the config structures for all devices. 239 * This list declares the config structures for all devices.
280 */ 240 */
241#if IS_ENABLED(CONFIG_IWLDVM)
281extern const struct iwl_cfg iwl5300_agn_cfg; 242extern const struct iwl_cfg iwl5300_agn_cfg;
282extern const struct iwl_cfg iwl5100_agn_cfg; 243extern const struct iwl_cfg iwl5100_agn_cfg;
283extern const struct iwl_cfg iwl5350_agn_cfg; 244extern const struct iwl_cfg iwl5350_agn_cfg;
@@ -319,7 +280,14 @@ extern const struct iwl_cfg iwl6035_2agn_cfg;
319extern const struct iwl_cfg iwl105_bgn_cfg; 280extern const struct iwl_cfg iwl105_bgn_cfg;
320extern const struct iwl_cfg iwl105_bgn_d_cfg; 281extern const struct iwl_cfg iwl105_bgn_d_cfg;
321extern const struct iwl_cfg iwl135_bgn_cfg; 282extern const struct iwl_cfg iwl135_bgn_cfg;
283#endif /* CONFIG_IWLDVM */
284#if IS_ENABLED(CONFIG_IWLMVM)
322extern const struct iwl_cfg iwl7260_2ac_cfg; 285extern const struct iwl_cfg iwl7260_2ac_cfg;
323extern const struct iwl_cfg iwl3160_ac_cfg; 286extern const struct iwl_cfg iwl7260_2n_cfg;
287extern const struct iwl_cfg iwl7260_n_cfg;
288extern const struct iwl_cfg iwl3160_2ac_cfg;
289extern const struct iwl_cfg iwl3160_2n_cfg;
290extern const struct iwl_cfg iwl3160_n_cfg;
291#endif /* CONFIG_IWLMVM */
324 292
325#endif /* __IWL_CONFIG_H__ */ 293#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 20e845d4da04..a276af476e2d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -472,4 +472,23 @@
472#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) 472#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
473#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) 473#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
474 474
475/*****************************************************************************
476 * 7000/3000 series SHR DTS addresses *
477 *****************************************************************************/
478
479/* Diode Results Register Structure: */
480enum dtd_diode_reg {
481 DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */
482 DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */
483 DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */
484 DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */
485 DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */
486 DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */
487/* Those are the masks INSIDE the flags bit-field: */
488 DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0,
489 DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */
490 DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7,
491 DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
492};
493
475#endif /* !__iwl_csr_h__ */ 494#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 8cf5db7fb5c9..7edb8519c8a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -34,7 +34,11 @@
34 34
35static inline bool iwl_have_debug_level(u32 level) 35static inline bool iwl_have_debug_level(u32 level)
36{ 36{
37#ifdef CONFIG_IWLWIFI_DEBUG
37 return iwlwifi_mod_params.debug_level & level; 38 return iwlwifi_mod_params.debug_level & level;
39#else
40 return false;
41#endif
38} 42}
39 43
40void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, 44void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 40fed1f511e2..d0162d426f88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1111,11 +1111,8 @@ void iwl_drv_stop(struct iwl_drv *drv)
1111/* shared module parameters */ 1111/* shared module parameters */
1112struct iwl_mod_params iwlwifi_mod_params = { 1112struct iwl_mod_params iwlwifi_mod_params = {
1113 .restart_fw = true, 1113 .restart_fw = true,
1114 .plcp_check = true,
1115 .bt_coex_active = true, 1114 .bt_coex_active = true,
1116 .power_level = IWL_POWER_INDEX_1, 1115 .power_level = IWL_POWER_INDEX_1,
1117 .bt_ch_announce = true,
1118 .auto_agg = true,
1119 .wd_disable = true, 1116 .wd_disable = true,
1120 /* the rest are 0 by default */ 1117 /* the rest are 0 by default */
1121}; 1118};
@@ -1223,19 +1220,14 @@ module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
1223MODULE_PARM_DESC(antenna_coupling, 1220MODULE_PARM_DESC(antenna_coupling,
1224 "specify antenna coupling in dB (defualt: 0 dB)"); 1221 "specify antenna coupling in dB (defualt: 0 dB)");
1225 1222
1226module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce,
1227 bool, S_IRUGO);
1228MODULE_PARM_DESC(bt_ch_inhibition,
1229 "Enable BT channel inhibition (default: enable)");
1230
1231module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO);
1232MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
1233
1234module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO); 1223module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
1235MODULE_PARM_DESC(wd_disable, 1224MODULE_PARM_DESC(wd_disable,
1236 "Disable stuck queue watchdog timer 0=system default, " 1225 "Disable stuck queue watchdog timer 0=system default, "
1237 "1=disable, 2=enable (default: 0)"); 1226 "1=disable, 2=enable (default: 0)");
1238 1227
1228module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
1229MODULE_PARM_DESC(nvm_file, "NVM file name");
1230
1239/* 1231/*
1240 * set bt_coex_active to true, uCode will do kill/defer 1232 * set bt_coex_active to true, uCode will do kill/defer
1241 * every time the priority line is asserted (BT is sending signals on the 1233 * every time the priority line is asserted (BT is sending signals on the
@@ -1269,8 +1261,3 @@ module_param_named(power_level, iwlwifi_mod_params.power_level,
1269 int, S_IRUGO); 1261 int, S_IRUGO);
1270MODULE_PARM_DESC(power_level, 1262MODULE_PARM_DESC(power_level,
1271 "default power save level (range from 1 - 5, default: 1)"); 1263 "default power save level (range from 1 - 5, default: 1)");
1272
1273module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
1274 bool, S_IRUGO);
1275MODULE_PARM_DESC(auto_agg,
1276 "enable agg w/o check traffic load (default: enable)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 7d1450916308..429337a2b9a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -62,8 +62,7 @@
62 62
63#ifndef __iwl_drv_h__ 63#ifndef __iwl_drv_h__
64#define __iwl_drv_h__ 64#define __iwl_drv_h__
65 65#include <linux/export.h>
66#include <linux/module.h>
67 66
68/* for all modules */ 67/* for all modules */
69#define DRV_NAME "iwlwifi" 68#define DRV_NAME "iwlwifi"
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 600c9fdd7f71..4c887f365908 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -732,17 +732,16 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
732void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, 732void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
733 struct iwl_nvm_data *data, 733 struct iwl_nvm_data *data,
734 struct ieee80211_sta_ht_cap *ht_info, 734 struct ieee80211_sta_ht_cap *ht_info,
735 enum ieee80211_band band) 735 enum ieee80211_band band,
736 u8 tx_chains, u8 rx_chains)
736{ 737{
737 int max_bit_rate = 0; 738 int max_bit_rate = 0;
738 u8 rx_chains;
739 u8 tx_chains;
740 739
741 tx_chains = hweight8(data->valid_tx_ant); 740 tx_chains = hweight8(tx_chains);
742 if (cfg->rx_with_siso_diversity) 741 if (cfg->rx_with_siso_diversity)
743 rx_chains = 1; 742 rx_chains = 1;
744 else 743 else
745 rx_chains = hweight8(data->valid_rx_ant); 744 rx_chains = hweight8(rx_chains);
746 745
747 if (!(data->sku_cap_11n_enable) || !cfg->ht_params) { 746 if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
748 ht_info->ht_supported = false; 747 ht_info->ht_supported = false;
@@ -806,7 +805,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
806 sband->n_bitrates = N_RATES_24; 805 sband->n_bitrates = N_RATES_24;
807 n_used += iwl_init_sband_channels(data, sband, n_channels, 806 n_used += iwl_init_sband_channels(data, sband, n_channels,
808 IEEE80211_BAND_2GHZ); 807 IEEE80211_BAND_2GHZ);
809 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ); 808 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
809 data->valid_tx_ant, data->valid_rx_ant);
810 810
811 sband = &data->bands[IEEE80211_BAND_5GHZ]; 811 sband = &data->bands[IEEE80211_BAND_5GHZ];
812 sband->band = IEEE80211_BAND_5GHZ; 812 sband->band = IEEE80211_BAND_5GHZ;
@@ -814,7 +814,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
814 sband->n_bitrates = N_RATES_52; 814 sband->n_bitrates = N_RATES_52;
815 n_used += iwl_init_sband_channels(data, sband, n_channels, 815 n_used += iwl_init_sband_channels(data, sband, n_channels,
816 IEEE80211_BAND_5GHZ); 816 IEEE80211_BAND_5GHZ);
817 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); 817 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
818 data->valid_tx_ant, data->valid_rx_ant);
818 819
819 if (n_channels != n_used) 820 if (n_channels != n_used)
820 IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n", 821 IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 37f115390b19..d73304a23ec2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -133,6 +133,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
133void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, 133void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
134 struct iwl_nvm_data *data, 134 struct iwl_nvm_data *data,
135 struct ieee80211_sta_ht_cap *ht_info, 135 struct ieee80211_sta_ht_cap *ht_info,
136 enum ieee80211_band band); 136 enum ieee80211_band band,
137 u8 tx_chains, u8 rx_chains);
137 138
138#endif /* __iwl_eeprom_parse_h__ */ 139#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index c4c446d41eb0..f844d5c748c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -106,11 +106,14 @@ enum iwl_ucode_type {
106 106
107/* 107/*
108 * enumeration of ucode section. 108 * enumeration of ucode section.
109 * This enumeration is used for legacy tlv style (before 16.0 uCode). 109 * This enumeration is used directly for older firmware (before 16.0).
110 * For new firmware, there can be up to 4 sections (see below) but the
111 * first one packaged into the firmware file is the DATA section and
112 * some debugging code accesses that.
110 */ 113 */
111enum iwl_ucode_sec { 114enum iwl_ucode_sec {
112 IWL_UCODE_SECTION_INST,
113 IWL_UCODE_SECTION_DATA, 115 IWL_UCODE_SECTION_DATA,
116 IWL_UCODE_SECTION_INST,
114}; 117};
115/* 118/*
116 * For 16.0 uCode and above, there is no differentiation between sections, 119 * For 16.0 uCode and above, there is no differentiation between sections,
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d6f6c37c09fd..a1f580c0c6c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -93,7 +93,6 @@ enum iwl_power_level {
93 * use IWL_DISABLE_HT_* constants 93 * use IWL_DISABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 0 94 * @amsdu_size_8K: enable 8K amsdu size, default = 0
95 * @restart_fw: restart firmware, default = 1 95 * @restart_fw: restart firmware, default = 1
96 * @plcp_check: enable plcp health check, default = true
97 * @wd_disable: enable stuck queue check, default = 0 96 * @wd_disable: enable stuck queue check, default = 0
98 * @bt_coex_active: enable bt coex, default = true 97 * @bt_coex_active: enable bt coex, default = true
99 * @led_mode: system default, default = 0 98 * @led_mode: system default, default = 0
@@ -101,24 +100,22 @@ enum iwl_power_level {
101 * @power_level: power level, default = 1 100 * @power_level: power level, default = 1
102 * @debug_level: levels are IWL_DL_* 101 * @debug_level: levels are IWL_DL_*
103 * @ant_coupling: antenna coupling in dB, default = 0 102 * @ant_coupling: antenna coupling in dB, default = 0
104 * @bt_ch_announce: BT channel inhibition, default = enable
105 * @auto_agg: enable agg. without check, default = true
106 */ 103 */
107struct iwl_mod_params { 104struct iwl_mod_params {
108 int sw_crypto; 105 int sw_crypto;
109 unsigned int disable_11n; 106 unsigned int disable_11n;
110 int amsdu_size_8K; 107 int amsdu_size_8K;
111 bool restart_fw; 108 bool restart_fw;
112 bool plcp_check;
113 int wd_disable; 109 int wd_disable;
114 bool bt_coex_active; 110 bool bt_coex_active;
115 int led_mode; 111 int led_mode;
116 bool power_save; 112 bool power_save;
117 int power_level; 113 int power_level;
114#ifdef CONFIG_IWLWIFI_DEBUG
118 u32 debug_level; 115 u32 debug_level;
116#endif
119 int ant_coupling; 117 int ant_coupling;
120 bool bt_ch_announce; 118 char *nvm_file;
121 bool auto_agg;
122}; 119};
123 120
124#endif /* #__iwl_modparams_h__ */ 121#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 6199a0a597a6..acd2665afb8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -89,6 +89,7 @@ enum nvm_sku_bits {
89 NVM_SKU_CAP_BAND_24GHZ = BIT(0), 89 NVM_SKU_CAP_BAND_24GHZ = BIT(0),
90 NVM_SKU_CAP_BAND_52GHZ = BIT(1), 90 NVM_SKU_CAP_BAND_52GHZ = BIT(1),
91 NVM_SKU_CAP_11N_ENABLE = BIT(2), 91 NVM_SKU_CAP_11N_ENABLE = BIT(2),
92 NVM_SKU_CAP_11AC_ENABLE = BIT(3),
92}; 93};
93 94
94/* radio config bits (actual values from NVM definition) */ 95/* radio config bits (actual values from NVM definition) */
@@ -258,8 +259,6 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
258 struct iwl_nvm_data *data, 259 struct iwl_nvm_data *data,
259 struct ieee80211_sta_vht_cap *vht_cap) 260 struct ieee80211_sta_vht_cap *vht_cap)
260{ 261{
261 /* For now, assume new devices with NVM are VHT capable */
262
263 vht_cap->vht_supported = true; 262 vht_cap->vht_supported = true;
264 263
265 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | 264 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
@@ -292,7 +291,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
292} 291}
293 292
294static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 293static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
295 struct iwl_nvm_data *data, const __le16 *nvm_sw) 294 struct iwl_nvm_data *data, const __le16 *nvm_sw,
295 bool enable_vht, u8 tx_chains, u8 rx_chains)
296{ 296{
297 int n_channels = iwl_init_channel_map(dev, cfg, data, 297 int n_channels = iwl_init_channel_map(dev, cfg, data,
298 &nvm_sw[NVM_CHANNELS]); 298 &nvm_sw[NVM_CHANNELS]);
@@ -305,7 +305,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
305 sband->n_bitrates = N_RATES_24; 305 sband->n_bitrates = N_RATES_24;
306 n_used += iwl_init_sband_channels(data, sband, n_channels, 306 n_used += iwl_init_sband_channels(data, sband, n_channels,
307 IEEE80211_BAND_2GHZ); 307 IEEE80211_BAND_2GHZ);
308 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ); 308 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
309 tx_chains, rx_chains);
309 310
310 sband = &data->bands[IEEE80211_BAND_5GHZ]; 311 sband = &data->bands[IEEE80211_BAND_5GHZ];
311 sband->band = IEEE80211_BAND_5GHZ; 312 sband->band = IEEE80211_BAND_5GHZ;
@@ -313,8 +314,10 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
313 sband->n_bitrates = N_RATES_52; 314 sband->n_bitrates = N_RATES_52;
314 n_used += iwl_init_sband_channels(data, sband, n_channels, 315 n_used += iwl_init_sband_channels(data, sband, n_channels,
315 IEEE80211_BAND_5GHZ); 316 IEEE80211_BAND_5GHZ);
316 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); 317 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
317 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap); 318 tx_chains, rx_chains);
319 if (enable_vht)
320 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
318 321
319 if (n_channels != n_used) 322 if (n_channels != n_used)
320 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", 323 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
@@ -324,7 +327,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
324struct iwl_nvm_data * 327struct iwl_nvm_data *
325iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 328iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
326 const __le16 *nvm_hw, const __le16 *nvm_sw, 329 const __le16 *nvm_hw, const __le16 *nvm_sw,
327 const __le16 *nvm_calib) 330 const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains)
328{ 331{
329 struct iwl_nvm_data *data; 332 struct iwl_nvm_data *data;
330 u8 hw_addr[ETH_ALEN]; 333 u8 hw_addr[ETH_ALEN];
@@ -380,7 +383,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
380 data->hw_addr[4] = hw_addr[5]; 383 data->hw_addr[4] = hw_addr[5];
381 data->hw_addr[5] = hw_addr[4]; 384 data->hw_addr[5] = hw_addr[4];
382 385
383 iwl_init_sbands(dev, cfg, data, nvm_sw); 386 iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
387 tx_chains, rx_chains);
384 388
385 data->calib_version = 255; /* TODO: 389 data->calib_version = 255; /* TODO:
386 this value will prevent some checks from 390 this value will prevent some checks from
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index e57fb989661e..3325059c52d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -75,6 +75,6 @@
75struct iwl_nvm_data * 75struct iwl_nvm_data *
76iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 76iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
77 const __le16 *nvm_hw, const __le16 *nvm_sw, 77 const __le16 *nvm_hw, const __le16 *nvm_sw,
78 const __le16 *nvm_calib); 78 const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains);
79 79
80#endif /* __iwl_nvm_parse_h__ */ 80#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 25745daa0d5d..1a405ae6a9c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -92,20 +92,16 @@ struct iwl_phy_db_entry {
92struct iwl_phy_db { 92struct iwl_phy_db {
93 struct iwl_phy_db_entry cfg; 93 struct iwl_phy_db_entry cfg;
94 struct iwl_phy_db_entry calib_nch; 94 struct iwl_phy_db_entry calib_nch;
95 struct iwl_phy_db_entry calib_ch;
96 struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; 95 struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
97 struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; 96 struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
98 97
99 u32 channel_num;
100 u32 channel_size;
101
102 struct iwl_trans *trans; 98 struct iwl_trans *trans;
103}; 99};
104 100
105enum iwl_phy_db_section_type { 101enum iwl_phy_db_section_type {
106 IWL_PHY_DB_CFG = 1, 102 IWL_PHY_DB_CFG = 1,
107 IWL_PHY_DB_CALIB_NCH, 103 IWL_PHY_DB_CALIB_NCH,
108 IWL_PHY_DB_CALIB_CH, 104 IWL_PHY_DB_UNUSED,
109 IWL_PHY_DB_CALIB_CHG_PAPD, 105 IWL_PHY_DB_CALIB_CHG_PAPD,
110 IWL_PHY_DB_CALIB_CHG_TXP, 106 IWL_PHY_DB_CALIB_CHG_TXP,
111 IWL_PHY_DB_MAX 107 IWL_PHY_DB_MAX
@@ -169,8 +165,6 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
169 return &phy_db->cfg; 165 return &phy_db->cfg;
170 case IWL_PHY_DB_CALIB_NCH: 166 case IWL_PHY_DB_CALIB_NCH:
171 return &phy_db->calib_nch; 167 return &phy_db->calib_nch;
172 case IWL_PHY_DB_CALIB_CH:
173 return &phy_db->calib_ch;
174 case IWL_PHY_DB_CALIB_CHG_PAPD: 168 case IWL_PHY_DB_CALIB_CHG_PAPD:
175 if (chg_id >= IWL_NUM_PAPD_CH_GROUPS) 169 if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
176 return NULL; 170 return NULL;
@@ -208,7 +202,6 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
208 202
209 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); 203 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
210 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); 204 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
211 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
212 for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) 205 for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
213 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); 206 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
214 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) 207 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
@@ -248,13 +241,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
248 241
249 entry->size = size; 242 entry->size = size;
250 243
251 if (type == IWL_PHY_DB_CALIB_CH) {
252 phy_db->channel_num =
253 le32_to_cpup((__le32 *)phy_db_notif->data);
254 phy_db->channel_size =
255 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
256 }
257
258 IWL_DEBUG_INFO(phy_db->trans, 244 IWL_DEBUG_INFO(phy_db->trans,
259 "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", 245 "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
260 __func__, __LINE__, type, size); 246 __func__, __LINE__, type, size);
@@ -328,10 +314,7 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
328 u32 type, u8 **data, u16 *size, u16 ch_id) 314 u32 type, u8 **data, u16 *size, u16 ch_id)
329{ 315{
330 struct iwl_phy_db_entry *entry; 316 struct iwl_phy_db_entry *entry;
331 u32 channel_num;
332 u32 channel_size;
333 u16 ch_group_id = 0; 317 u16 ch_group_id = 0;
334 u16 index;
335 318
336 if (!phy_db) 319 if (!phy_db)
337 return -EINVAL; 320 return -EINVAL;
@@ -346,21 +329,8 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
346 if (!entry) 329 if (!entry)
347 return -EINVAL; 330 return -EINVAL;
348 331
349 if (type == IWL_PHY_DB_CALIB_CH) { 332 *data = entry->data;
350 index = ch_id_to_ch_index(ch_id); 333 *size = entry->size;
351 channel_num = phy_db->channel_num;
352 channel_size = phy_db->channel_size;
353 if (index >= channel_num) {
354 IWL_ERR(phy_db->trans, "Wrong channel number %d\n",
355 ch_id);
356 return -EINVAL;
357 }
358 *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
359 *size = channel_size;
360 } else {
361 *data = entry->data;
362 *size = entry->size;
363 }
364 334
365 IWL_DEBUG_INFO(phy_db->trans, 335 IWL_DEBUG_INFO(phy_db->trans,
366 "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", 336 "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
@@ -413,6 +383,9 @@ static int iwl_phy_db_send_all_channel_groups(
413 if (!entry) 383 if (!entry)
414 return -EINVAL; 384 return -EINVAL;
415 385
386 if (WARN_ON_ONCE(!entry->size))
387 continue;
388
416 /* Send the requested PHY DB section */ 389 /* Send the requested PHY DB section */
417 err = iwl_send_phy_db_cmd(phy_db, 390 err = iwl_send_phy_db_cmd(phy_db,
418 type, 391 type,
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 386f2a7c87cb..ff8cc75c189d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -100,6 +100,18 @@
100/* Device system time */ 100/* Device system time */
101#define DEVICE_SYSTEM_TIME_REG 0xA0206C 101#define DEVICE_SYSTEM_TIME_REG 0xA0206C
102 102
103/*****************************************************************************
104 * 7000/3000 series SHR DTS addresses *
105 *****************************************************************************/
106
107#define SHR_MISC_WFM_DTS_EN (0x00a10024)
108#define DTSC_CFG_MODE (0x00a10604)
109#define DTSC_VREF_AVG (0x00a10648)
110#define DTSC_VREF5_AVG (0x00a1064c)
111#define DTSC_CFG_MODE_PERIODIC (0x2)
112#define DTSC_PTAT_AVG (0x00a10650)
113
114
103/** 115/**
104 * Tx Scheduler 116 * Tx Scheduler
105 * 117 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
deleted file mode 100644
index 5cfd55b86ed3..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ /dev/null
@@ -1,852 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/export.h>
65#include <net/netlink.h>
66
67#include "iwl-drv.h"
68#include "iwl-io.h"
69#include "iwl-fh.h"
70#include "iwl-prph.h"
71#include "iwl-trans.h"
72#include "iwl-test.h"
73#include "iwl-csr.h"
74#include "iwl-testmode.h"
75
76/*
77 * Periphery registers absolute lower bound. This is used in order to
78 * differentiate registery access through HBUS_TARG_PRPH_* and
79 * HBUS_TARG_MEM_* accesses.
80 */
81#define IWL_ABS_PRPH_START (0xA00000)
82
83/*
84 * The TLVs used in the gnl message policy between the kernel module and
85 * user space application. iwl_testmode_gnl_msg_policy is to be carried
86 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
87 * See iwl-testmode.h
88 */
89static
90struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
91 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
92
93 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
94 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
95
96 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
97 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
98 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
99
100 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
101 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
102
103 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
104
105 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
107 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
108
109 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
110
111 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
112
113 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
114 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
115 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
116
117 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
118 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
121 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
122
123 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
124};
125
126static inline void iwl_test_trace_clear(struct iwl_test *tst)
127{
128 memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
129}
130
131static void iwl_test_trace_stop(struct iwl_test *tst)
132{
133 if (!tst->trace.enabled)
134 return;
135
136 if (tst->trace.cpu_addr && tst->trace.dma_addr)
137 dma_free_coherent(tst->trans->dev,
138 tst->trace.tsize,
139 tst->trace.cpu_addr,
140 tst->trace.dma_addr);
141
142 iwl_test_trace_clear(tst);
143}
144
145static inline void iwl_test_mem_clear(struct iwl_test *tst)
146{
147 memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
148}
149
150static inline void iwl_test_mem_stop(struct iwl_test *tst)
151{
152 if (!tst->mem.in_read)
153 return;
154
155 iwl_test_mem_clear(tst);
156}
157
158/*
159 * Initializes the test object
160 * During the lifetime of the test object it is assumed that the transport is
161 * started. The test object should be stopped before the transport is stopped.
162 */
163void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
164 struct iwl_test_ops *ops)
165{
166 tst->trans = trans;
167 tst->ops = ops;
168
169 iwl_test_trace_clear(tst);
170 iwl_test_mem_clear(tst);
171}
172EXPORT_SYMBOL_GPL(iwl_test_init);
173
174/*
175 * Stop the test object
176 */
177void iwl_test_free(struct iwl_test *tst)
178{
179 iwl_test_mem_stop(tst);
180 iwl_test_trace_stop(tst);
181}
182EXPORT_SYMBOL_GPL(iwl_test_free);
183
184static inline int iwl_test_send_cmd(struct iwl_test *tst,
185 struct iwl_host_cmd *cmd)
186{
187 return tst->ops->send_cmd(tst->trans->op_mode, cmd);
188}
189
190static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
191{
192 return tst->ops->valid_hw_addr(addr);
193}
194
195static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
196{
197 return tst->ops->get_fw_ver(tst->trans->op_mode);
198}
199
200static inline struct sk_buff*
201iwl_test_alloc_reply(struct iwl_test *tst, int len)
202{
203 return tst->ops->alloc_reply(tst->trans->op_mode, len);
204}
205
206static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
207{
208 return tst->ops->reply(tst->trans->op_mode, skb);
209}
210
211static inline struct sk_buff*
212iwl_test_alloc_event(struct iwl_test *tst, int len)
213{
214 return tst->ops->alloc_event(tst->trans->op_mode, len);
215}
216
217static inline void
218iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
219{
220 return tst->ops->event(tst->trans->op_mode, skb);
221}
222
223/*
224 * This function handles the user application commands to the fw. The fw
225 * commands are sent in a synchronuous manner. In case that the user requested
226 * to get commands response, it is send to the user.
227 */
228static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
229{
230 struct iwl_host_cmd cmd;
231 struct iwl_rx_packet *pkt;
232 struct sk_buff *skb;
233 void *reply_buf;
234 u32 reply_len;
235 int ret;
236 bool cmd_want_skb;
237
238 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
239
240 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
241 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
242 IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
243 return -ENOMSG;
244 }
245
246 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
247 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
248 if (cmd_want_skb)
249 cmd.flags |= CMD_WANT_SKB;
250
251 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
252 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
253 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
254 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
255 IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
256 cmd.id, cmd.flags, cmd.len[0]);
257
258 ret = iwl_test_send_cmd(tst, &cmd);
259 if (ret) {
260 IWL_ERR(tst->trans, "Failed to send hcmd\n");
261 return ret;
262 }
263 if (!cmd_want_skb)
264 return ret;
265
266 /* Handling return of SKB to the user */
267 pkt = cmd.resp_pkt;
268 if (!pkt) {
269 IWL_ERR(tst->trans, "HCMD received a null response packet\n");
270 return ret;
271 }
272
273 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
274 skb = iwl_test_alloc_reply(tst, reply_len + 20);
275 reply_buf = kmemdup(&pkt->hdr, reply_len, GFP_KERNEL);
276 if (!skb || !reply_buf) {
277 kfree_skb(skb);
278 kfree(reply_buf);
279 return -ENOMEM;
280 }
281
282 /* The reply is in a page, that we cannot send to user space. */
283 iwl_free_resp(&cmd);
284
285 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
286 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
287 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
288 goto nla_put_failure;
289 return iwl_test_reply(tst, skb);
290
291nla_put_failure:
292 IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
293 kfree(reply_buf);
294 kfree_skb(skb);
295 return -ENOMSG;
296}
297
298/*
299 * Handles the user application commands for register access.
300 */
301static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
302{
303 u32 ofs, val32, cmd;
304 u8 val8;
305 struct sk_buff *skb;
306 int status = 0;
307 struct iwl_trans *trans = tst->trans;
308
309 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
310 IWL_ERR(trans, "Missing reg offset\n");
311 return -ENOMSG;
312 }
313
314 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
315 IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
316
317 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
318
319 /*
320 * Allow access only to FH/CSR/HBUS in direct mode.
321 * Since we don't have the upper bounds for the CSR and HBUS segments,
322 * we will use only the upper bound of FH for sanity check.
323 */
324 if (ofs >= FH_MEM_UPPER_BOUND) {
325 IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
326 FH_MEM_UPPER_BOUND);
327 return -EINVAL;
328 }
329
330 switch (cmd) {
331 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
332 val32 = iwl_read_direct32(tst->trans, ofs);
333 IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
334
335 skb = iwl_test_alloc_reply(tst, 20);
336 if (!skb) {
337 IWL_ERR(trans, "Memory allocation fail\n");
338 return -ENOMEM;
339 }
340 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
341 goto nla_put_failure;
342 status = iwl_test_reply(tst, skb);
343 if (status < 0)
344 IWL_ERR(trans, "Error sending msg : %d\n", status);
345 break;
346
347 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
348 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
349 IWL_ERR(trans, "Missing value to write\n");
350 return -ENOMSG;
351 } else {
352 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
353 IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
354 iwl_write_direct32(tst->trans, ofs, val32);
355 }
356 break;
357
358 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
359 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
360 IWL_ERR(trans, "Missing value to write\n");
361 return -ENOMSG;
362 } else {
363 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
364 IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
365 iwl_write8(tst->trans, ofs, val8);
366 }
367 break;
368
369 default:
370 IWL_ERR(trans, "Unknown test register cmd ID\n");
371 return -ENOMSG;
372 }
373
374 return status;
375
376nla_put_failure:
377 kfree_skb(skb);
378 return -EMSGSIZE;
379}
380
381/*
382 * Handles the request to start FW tracing. Allocates of the trace buffer
383 * and sends a reply to user space with the address of the allocated buffer.
384 */
385static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
386{
387 struct sk_buff *skb;
388 int status = 0;
389
390 if (tst->trace.enabled)
391 return -EBUSY;
392
393 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
394 tst->trace.size = TRACE_BUFF_SIZE_DEF;
395 else
396 tst->trace.size =
397 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
398
399 if (!tst->trace.size)
400 return -EINVAL;
401
402 if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
403 tst->trace.size > TRACE_BUFF_SIZE_MAX)
404 return -EINVAL;
405
406 tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
407 tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
408 tst->trace.tsize,
409 &tst->trace.dma_addr,
410 GFP_KERNEL);
411 if (!tst->trace.cpu_addr)
412 return -ENOMEM;
413
414 tst->trace.enabled = true;
415 tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
416
417 memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
418
419 skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
420 if (!skb) {
421 IWL_ERR(tst->trans, "Memory allocation fail\n");
422 iwl_test_trace_stop(tst);
423 return -ENOMEM;
424 }
425
426 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
427 sizeof(tst->trace.dma_addr),
428 (u64 *)&tst->trace.dma_addr))
429 goto nla_put_failure;
430
431 status = iwl_test_reply(tst, skb);
432 if (status < 0)
433 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
434
435 tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
436 DUMP_CHUNK_SIZE);
437
438 return status;
439
440nla_put_failure:
441 kfree_skb(skb);
442 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
443 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
444 iwl_test_trace_stop(tst);
445 return -EMSGSIZE;
446}
447
448/*
449 * Handles indirect read from the periphery or the SRAM. The read is performed
450 * to a temporary buffer. The user space application should later issue a dump
451 */
452static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
453{
454 struct iwl_trans *trans = tst->trans;
455 unsigned long flags;
456 int i;
457
458 if (size & 0x3)
459 return -EINVAL;
460
461 tst->mem.size = size;
462 tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
463 if (tst->mem.addr == NULL)
464 return -ENOMEM;
465
466 /* Hard-coded periphery absolute address */
467 if (IWL_ABS_PRPH_START <= addr &&
468 addr < IWL_ABS_PRPH_START + PRPH_END) {
469 if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
470 return -EIO;
471 }
472 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
473 addr | (3 << 24));
474 for (i = 0; i < size; i += 4)
475 *(u32 *)(tst->mem.addr + i) =
476 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
477 iwl_trans_release_nic_access(trans, &flags);
478 } else { /* target memory (SRAM) */
479 iwl_trans_read_mem(trans, addr, tst->mem.addr,
480 tst->mem.size / 4);
481 }
482
483 tst->mem.nchunks =
484 DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
485 tst->mem.in_read = true;
486 return 0;
487
488}
489
490/*
491 * Handles indirect write to the periphery or SRAM. The is performed to a
492 * temporary buffer.
493 */
494static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
495 u32 size, unsigned char *buf)
496{
497 struct iwl_trans *trans = tst->trans;
498 u32 val, i;
499 unsigned long flags;
500
501 if (IWL_ABS_PRPH_START <= addr &&
502 addr < IWL_ABS_PRPH_START + PRPH_END) {
503 /* Periphery writes can be 1-3 bytes long, or DWORDs */
504 if (size < 4) {
505 memcpy(&val, buf, size);
506 if (!iwl_trans_grab_nic_access(trans, false, &flags))
507 return -EIO;
508 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
509 (addr & 0x0000FFFF) |
510 ((size - 1) << 24));
511 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
512 iwl_trans_release_nic_access(trans, &flags);
513 } else {
514 if (size % 4)
515 return -EINVAL;
516 for (i = 0; i < size; i += 4)
517 iwl_write_prph(trans, addr+i,
518 *(u32 *)(buf+i));
519 }
520 } else if (iwl_test_valid_hw_addr(tst, addr)) {
521 iwl_trans_write_mem(trans, addr, buf, size / 4);
522 } else {
523 return -EINVAL;
524 }
525 return 0;
526}
527
528/*
529 * Handles the user application commands for indirect read/write
530 * to/from the periphery or the SRAM.
531 */
532static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
533{
534 u32 addr, size, cmd;
535 unsigned char *buf;
536
537 /* Both read and write should be blocked, for atomicity */
538 if (tst->mem.in_read)
539 return -EBUSY;
540
541 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
542 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
543 IWL_ERR(tst->trans, "Error finding memory offset address\n");
544 return -ENOMSG;
545 }
546 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
547 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
548 IWL_ERR(tst->trans, "Error finding size for memory reading\n");
549 return -ENOMSG;
550 }
551 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
552
553 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
554 return iwl_test_indirect_read(tst, addr, size);
555 } else {
556 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
557 return -EINVAL;
558 buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
559 return iwl_test_indirect_write(tst, addr, size, buf);
560 }
561}
562
563/*
564 * Enable notifications to user space
565 */
566static int iwl_test_notifications(struct iwl_test *tst,
567 struct nlattr **tb)
568{
569 tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
570 return 0;
571}
572
573/*
574 * Handles the request to get the device id
575 */
576static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
577{
578 u32 devid = tst->trans->hw_id;
579 struct sk_buff *skb;
580 int status;
581
582 IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
583
584 skb = iwl_test_alloc_reply(tst, 20);
585 if (!skb) {
586 IWL_ERR(tst->trans, "Memory allocation fail\n");
587 return -ENOMEM;
588 }
589
590 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
591 goto nla_put_failure;
592 status = iwl_test_reply(tst, skb);
593 if (status < 0)
594 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
595
596 return 0;
597
598nla_put_failure:
599 kfree_skb(skb);
600 return -EMSGSIZE;
601}
602
603/*
604 * Handles the request to get the FW version
605 */
606static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
607{
608 struct sk_buff *skb;
609 int status;
610 u32 ver = iwl_test_fw_ver(tst);
611
612 IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
613
614 skb = iwl_test_alloc_reply(tst, 20);
615 if (!skb) {
616 IWL_ERR(tst->trans, "Memory allocation fail\n");
617 return -ENOMEM;
618 }
619
620 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
621 goto nla_put_failure;
622
623 status = iwl_test_reply(tst, skb);
624 if (status < 0)
625 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
626
627 return 0;
628
629nla_put_failure:
630 kfree_skb(skb);
631 return -EMSGSIZE;
632}
633
634/*
635 * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
636 */
637int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
638 void *data, int len)
639{
640 int result;
641
642 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
643 iwl_testmode_gnl_msg_policy);
644 if (result) {
645 IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
646 return result;
647 }
648
649 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
650 if (!tb[IWL_TM_ATTR_COMMAND]) {
651 IWL_ERR(tst->trans, "Missing testmode command type\n");
652 return -ENOMSG;
653 }
654 return 0;
655}
656IWL_EXPORT_SYMBOL(iwl_test_parse);
657
658/*
659 * Handle test commands.
660 * Returns 1 for unknown commands (not handled by the test object); negative
661 * value in case of error.
662 */
663int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
664{
665 int result;
666
667 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
668 case IWL_TM_CMD_APP2DEV_UCODE:
669 IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
670 result = iwl_test_fw_cmd(tst, tb);
671 break;
672
673 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
674 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
675 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
676 IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
677 result = iwl_test_reg(tst, tb);
678 break;
679
680 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
681 IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
682 result = iwl_test_trace_begin(tst, tb);
683 break;
684
685 case IWL_TM_CMD_APP2DEV_END_TRACE:
686 iwl_test_trace_stop(tst);
687 result = 0;
688 break;
689
690 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
691 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
692 IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
693 result = iwl_test_indirect_mem(tst, tb);
694 break;
695
696 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
697 IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
698 result = iwl_test_notifications(tst, tb);
699 break;
700
701 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
702 IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
703 result = iwl_test_get_fw_ver(tst, tb);
704 break;
705
706 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
707 IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
708 result = iwl_test_get_dev_id(tst, tb);
709 break;
710
711 default:
712 IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
713 result = 1;
714 break;
715 }
716 return result;
717}
718IWL_EXPORT_SYMBOL(iwl_test_handle_cmd);
719
720static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
721 struct netlink_callback *cb)
722{
723 int idx, length;
724
725 if (!tst->trace.enabled || !tst->trace.trace_addr)
726 return -EFAULT;
727
728 idx = cb->args[4];
729 if (idx >= tst->trace.nchunks)
730 return -ENOENT;
731
732 length = DUMP_CHUNK_SIZE;
733 if (((idx + 1) == tst->trace.nchunks) &&
734 (tst->trace.size % DUMP_CHUNK_SIZE))
735 length = tst->trace.size %
736 DUMP_CHUNK_SIZE;
737
738 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
739 tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
740 goto nla_put_failure;
741
742 cb->args[4] = ++idx;
743 return 0;
744
745 nla_put_failure:
746 return -ENOBUFS;
747}
748
749static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
750 struct netlink_callback *cb)
751{
752 int idx, length;
753
754 if (!tst->mem.in_read)
755 return -EFAULT;
756
757 idx = cb->args[4];
758 if (idx >= tst->mem.nchunks) {
759 iwl_test_mem_stop(tst);
760 return -ENOENT;
761 }
762
763 length = DUMP_CHUNK_SIZE;
764 if (((idx + 1) == tst->mem.nchunks) &&
765 (tst->mem.size % DUMP_CHUNK_SIZE))
766 length = tst->mem.size % DUMP_CHUNK_SIZE;
767
768 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
769 tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
770 goto nla_put_failure;
771
772 cb->args[4] = ++idx;
773 return 0;
774
775 nla_put_failure:
776 return -ENOBUFS;
777}
778
779/*
780 * Handle dump commands.
781 * Returns 1 for unknown commands (not handled by the test object); negative
782 * value in case of error.
783 */
784int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
785 struct netlink_callback *cb)
786{
787 int result;
788
789 switch (cmd) {
790 case IWL_TM_CMD_APP2DEV_READ_TRACE:
791 IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
792 result = iwl_test_trace_dump(tst, skb, cb);
793 break;
794
795 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
796 IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
797 result = iwl_test_buffer_dump(tst, skb, cb);
798 break;
799
800 default:
801 result = 1;
802 break;
803 }
804 return result;
805}
806IWL_EXPORT_SYMBOL(iwl_test_dump);
807
808/*
809 * Multicast a spontaneous messages from the device to the user space.
810 */
811static void iwl_test_send_rx(struct iwl_test *tst,
812 struct iwl_rx_cmd_buffer *rxb)
813{
814 struct sk_buff *skb;
815 struct iwl_rx_packet *data;
816 int length;
817
818 data = rxb_addr(rxb);
819 length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
820
821 /* the length doesn't include len_n_flags field, so add it manually */
822 length += sizeof(__le32);
823
824 skb = iwl_test_alloc_event(tst, length + 20);
825 if (skb == NULL) {
826 IWL_ERR(tst->trans, "Out of memory for message to user\n");
827 return;
828 }
829
830 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
831 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
832 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
833 goto nla_put_failure;
834
835 iwl_test_event(tst, skb);
836 return;
837
838nla_put_failure:
839 kfree_skb(skb);
840 IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
841}
842
843/*
844 * Called whenever a Rx frames is recevied from the device. If notifications to
845 * the user space are requested, sends the frames to the user.
846 */
847void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
848{
849 if (tst->notify)
850 iwl_test_send_rx(tst, rxb);
851}
852IWL_EXPORT_SYMBOL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
deleted file mode 100644
index 8fbd21704840..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ /dev/null
@@ -1,161 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_TEST_H__
65#define __IWL_TEST_H__
66
67#include <linux/types.h>
68#include "iwl-trans.h"
69
70struct iwl_test_trace {
71 u32 size;
72 u32 tsize;
73 u32 nchunks;
74 u8 *cpu_addr;
75 u8 *trace_addr;
76 dma_addr_t dma_addr;
77 bool enabled;
78};
79
80struct iwl_test_mem {
81 u32 size;
82 u32 nchunks;
83 u8 *addr;
84 bool in_read;
85};
86
87/*
88 * struct iwl_test_ops: callback to the op mode
89 *
90 * The structure defines the callbacks that the op_mode should handle,
91 * inorder to handle logic that is out of the scope of iwl_test. The
92 * op_mode must set all the callbacks.
93
94 * @send_cmd: handler that is used by the test object to request the
95 * op_mode to send a command to the fw.
96 *
97 * @valid_hw_addr: handler that is used by the test object to request the
98 * op_mode to check if the given address is a valid address.
99 *
100 * @get_fw_ver: handler used to get the FW version.
101 *
102 * @alloc_reply: handler used by the test object to request the op_mode
103 * to allocate an skb for sending a reply to the user, and initialize
104 * the skb. It is assumed that the test object only fills the required
105 * attributes.
106 *
107 * @reply: handler used by the test object to request the op_mode to reply
108 * to a request. The skb is an skb previously allocated by the the
109 * alloc_reply callback.
110 I
111 * @alloc_event: handler used by the test object to request the op_mode
112 * to allocate an skb for sending an event, and initialize
113 * the skb. It is assumed that the test object only fills the required
114 * attributes.
115 *
116 * @reply: handler used by the test object to request the op_mode to send
117 * an event. The skb is an skb previously allocated by the the
118 * alloc_event callback.
119 */
120struct iwl_test_ops {
121 int (*send_cmd)(struct iwl_op_mode *op_modes,
122 struct iwl_host_cmd *cmd);
123 bool (*valid_hw_addr)(u32 addr);
124 u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
125
126 struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
127 int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
128 struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
129 void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
130};
131
132struct iwl_test {
133 struct iwl_trans *trans;
134 struct iwl_test_ops *ops;
135 struct iwl_test_trace trace;
136 struct iwl_test_mem mem;
137 bool notify;
138};
139
140void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
141 struct iwl_test_ops *ops);
142
143void iwl_test_free(struct iwl_test *tst);
144
145int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
146 void *data, int len);
147
148int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
149
150int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
151 struct netlink_callback *cb);
152
153void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
154
155static inline void iwl_test_enable_notifications(struct iwl_test *tst,
156 bool enable)
157{
158 tst->notify = enable;
159}
160
161#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
deleted file mode 100644
index 98f48a9afc98..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ /dev/null
@@ -1,309 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __IWL_TESTMODE_H__
64#define __IWL_TESTMODE_H__
65
66#include <linux/types.h>
67
68
69/*
70 * Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and
71 * from and kernel space to user space(IWL_TM_CMD_ID_DEV2APP_XX).
72 * The command ID is carried with IWL_TM_ATTR_COMMAND.
73 *
74 * @IWL_TM_CMD_APP2DEV_UCODE:
75 * commands from user application to the uCode,
76 * the actual uCode host command ID is carried with
77 * IWL_TM_ATTR_UCODE_CMD_ID
78 *
79 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
80 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
81 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
82 * commands from user applicaiton to access register
83 *
84 * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
85 * @IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: load initial uCode image
86 * @IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: perform calibration
87 * @IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: load runtime uCode image
88 * @IWL_TM_CMD_APP2DEV_GET_EEPROM: request EEPROM data
89 * @IWL_TM_CMD_APP2DEV_FIXRATE_REQ: set fix MCS
90 * commands fom user space for pure driver level operations
91 *
92 * @IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
93 * @IWL_TM_CMD_APP2DEV_END_TRACE:
94 * @IWL_TM_CMD_APP2DEV_READ_TRACE:
95 * commands fom user space for uCode trace operations
96 *
97 * @IWL_TM_CMD_DEV2APP_SYNC_RSP:
98 * commands from kernel space to carry the synchronous response
99 * to user application
100 * @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT:
101 * commands from kernel space to multicast the spontaneous messages
102 * to user application, or reply of host commands
103 * @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
104 * commands from kernel space to carry the eeprom response
105 * to user application
106 *
107 * @IWL_TM_CMD_APP2DEV_OWNERSHIP:
108 * commands from user application to own change the ownership of the uCode
109 * if application has the ownership, the only host command from
110 * testmode will deliver to uCode. Default owner is driver
111 *
112 * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Wake On Wireless LAN uCode image
113 * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
114 * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
115 * @IWL_TM_CMD_APP2DEV_GET_FW_INFO:
116 * retrieve information of existing loaded uCode image
117 *
118 * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
119 * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
120 * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
121 * Commands to read/write data from periphery or SRAM memory ranges.
122 * Fore reading, a READ command is sent from the userspace and the data
123 * is returned when the user calls a DUMP command.
124 * For writing, only a WRITE command is used.
125 * @IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
126 * Command to enable/disable notifications (currently RX packets) from the
127 * driver to userspace.
128 */
129enum iwl_tm_cmd_t {
130 IWL_TM_CMD_APP2DEV_UCODE = 1,
131 IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
132 IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
133 IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
134 IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
135 IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
136 IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
137 IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW = 8,
138 IWL_TM_CMD_APP2DEV_GET_EEPROM = 9,
139 IWL_TM_CMD_APP2DEV_FIXRATE_REQ = 10,
140 IWL_TM_CMD_APP2DEV_BEGIN_TRACE = 11,
141 IWL_TM_CMD_APP2DEV_END_TRACE = 12,
142 IWL_TM_CMD_APP2DEV_READ_TRACE = 13,
143 IWL_TM_CMD_DEV2APP_SYNC_RSP = 14,
144 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
145 IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
146 IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
147 RESERVED_18 = 18,
148 RESERVED_19 = 19,
149 RESERVED_20 = 20,
150 RESERVED_21 = 21,
151 IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
152 IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
153 IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
154 IWL_TM_CMD_APP2DEV_GET_FW_INFO = 25,
155 IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ = 26,
156 IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP = 27,
157 IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE = 28,
158 IWL_TM_CMD_APP2DEV_NOTIFICATIONS = 29,
159 IWL_TM_CMD_MAX = 30,
160};
161
162/*
163 * Atrribute filed in testmode command
164 * See enum iwl_tm_cmd_t.
165 *
166 * @IWL_TM_ATTR_NOT_APPLICABLE:
167 * The attribute is not applicable or invalid
168 * @IWL_TM_ATTR_COMMAND:
169 * From user space to kernel space:
170 * the command either destines to ucode, driver, or register;
171 * From kernel space to user space:
172 * the command either carries synchronous response,
173 * or the spontaneous message multicast from the device;
174 *
175 * @IWL_TM_ATTR_UCODE_CMD_ID:
176 * @IWL_TM_ATTR_UCODE_CMD_DATA:
177 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
178 * The mandatory fields are :
179 * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
180 * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
181 * to the ucode
182 *
183 * @IWL_TM_ATTR_REG_OFFSET:
184 * @IWL_TM_ATTR_REG_VALUE8:
185 * @IWL_TM_ATTR_REG_VALUE32:
186 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX,
187 * The mandatory fields are:
188 * IWL_TM_ATTR_REG_OFFSET for the offset of the target register;
189 * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value
190 *
191 * @IWL_TM_ATTR_SYNC_RSP:
192 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP,
193 * The mandatory fields are:
194 * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user
195 * application command
196 *
197 * @IWL_TM_ATTR_UCODE_RX_PKT:
198 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
199 * The mandatory fields are:
200 * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user
201 * application
202 *
203 * @IWL_TM_ATTR_EEPROM:
204 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM,
205 * The mandatory fields are:
206 * IWL_TM_ATTR_EEPROM for the data content responging to the user
207 * application
208 *
209 * @IWL_TM_ATTR_TRACE_ADDR:
210 * @IWL_TM_ATTR_TRACE_SIZE:
211 * @IWL_TM_ATTR_TRACE_DUMP:
212 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE,
213 * The mandatory fields are:
214 * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address
215 * IWL_TM_ATTR_MEM_TRACE_SIZE for the trace buffer size
216 * IWL_TM_ATTR_MEM_TRACE_DUMP for the trace dump
217 *
218 * @IWL_TM_ATTR_FIXRATE:
219 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
220 * The mandatory fields are:
221 * IWL_TM_ATTR_FIXRATE for the fixed rate
222 *
223 * @IWL_TM_ATTR_UCODE_OWNER:
224 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
225 * The mandatory fields are:
226 * IWL_TM_ATTR_UCODE_OWNER for the new owner
227 *
228 * @IWL_TM_ATTR_MEM_ADDR:
229 * @IWL_TM_ATTR_BUFFER_SIZE:
230 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ
231 * or IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE.
232 * The mandatory fields are:
233 * IWL_TM_ATTR_MEM_ADDR for the address in SRAM/periphery to read/write
234 * IWL_TM_ATTR_BUFFER_SIZE for the buffer size of data to read/write.
235 *
236 * @IWL_TM_ATTR_BUFFER_DUMP:
237 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP,
238 * IWL_TM_ATTR_BUFFER_DUMP is used for the data that was read.
239 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE,
240 * this attribute contains the data to write.
241 *
242 * @IWL_TM_ATTR_FW_VERSION:
243 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
244 * IWL_TM_ATTR_FW_VERSION for the uCode version
245 *
246 * @IWL_TM_ATTR_DEVICE_ID:
247 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
248 * IWL_TM_ATTR_DEVICE_ID for the device ID information
249 *
250 * @IWL_TM_ATTR_FW_TYPE:
251 * @IWL_TM_ATTR_FW_INST_SIZE:
252 * @IWL_TM_ATTR_FW_DATA_SIZE:
253 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_INFO,
254 * The mandatory fields are:
255 * IWL_TM_ATTR_FW_TYPE for the uCode type (INIT/RUNTIME/...)
256 * IWL_TM_ATTR_FW_INST_SIZE for the size of instruction section
257 * IWL_TM_ATTR_FW_DATA_SIZE for the size of data section
258 *
259 * @IWL_TM_ATTR_UCODE_CMD_SKB:
260 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE this flag
261 * indicates that the user wants to receive the response of the command
262 * in a reply SKB. If it's not present, the response is not returned.
263 * @IWL_TM_ATTR_ENABLE_NOTIFICATIONS:
264 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_NOTIFICATIONS, this
265 * flag enables (if present) or disables (if not) the forwarding
266 * to userspace.
267 */
268enum iwl_tm_attr_t {
269 IWL_TM_ATTR_NOT_APPLICABLE = 0,
270 IWL_TM_ATTR_COMMAND = 1,
271 IWL_TM_ATTR_UCODE_CMD_ID = 2,
272 IWL_TM_ATTR_UCODE_CMD_DATA = 3,
273 IWL_TM_ATTR_REG_OFFSET = 4,
274 IWL_TM_ATTR_REG_VALUE8 = 5,
275 IWL_TM_ATTR_REG_VALUE32 = 6,
276 IWL_TM_ATTR_SYNC_RSP = 7,
277 IWL_TM_ATTR_UCODE_RX_PKT = 8,
278 IWL_TM_ATTR_EEPROM = 9,
279 IWL_TM_ATTR_TRACE_ADDR = 10,
280 IWL_TM_ATTR_TRACE_SIZE = 11,
281 IWL_TM_ATTR_TRACE_DUMP = 12,
282 IWL_TM_ATTR_FIXRATE = 13,
283 IWL_TM_ATTR_UCODE_OWNER = 14,
284 IWL_TM_ATTR_MEM_ADDR = 15,
285 IWL_TM_ATTR_BUFFER_SIZE = 16,
286 IWL_TM_ATTR_BUFFER_DUMP = 17,
287 IWL_TM_ATTR_FW_VERSION = 18,
288 IWL_TM_ATTR_DEVICE_ID = 19,
289 IWL_TM_ATTR_FW_TYPE = 20,
290 IWL_TM_ATTR_FW_INST_SIZE = 21,
291 IWL_TM_ATTR_FW_DATA_SIZE = 22,
292 IWL_TM_ATTR_UCODE_CMD_SKB = 23,
293 IWL_TM_ATTR_ENABLE_NOTIFICATION = 24,
294 IWL_TM_ATTR_MAX = 25,
295};
296
297/* uCode trace buffer */
298#define TRACE_BUFF_SIZE_MAX 0x200000
299#define TRACE_BUFF_SIZE_MIN 0x20000
300#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
301#define TRACE_BUFF_PADD 0x2000
302
303/* Maximum data size of each dump it packet */
304#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
305
306/* Address offset of data segment in SRAM */
307#define SRAM_DATA_SEG_OFFSET 0x800000
308
309#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 7a13790b5bfe..8d91422c5982 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -183,13 +183,12 @@ struct iwl_rx_packet {
183 * @CMD_ASYNC: Return right away and don't want for the response 183 * @CMD_ASYNC: Return right away and don't want for the response
184 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 184 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
185 * response. The caller needs to call iwl_free_resp when done. 185 * response. The caller needs to call iwl_free_resp when done.
186 * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
187 */ 186 */
188enum CMD_MODE { 187enum CMD_MODE {
189 CMD_SYNC = 0, 188 CMD_SYNC = 0,
190 CMD_ASYNC = BIT(0), 189 CMD_ASYNC = BIT(0),
191 CMD_WANT_SKB = BIT(1), 190 CMD_WANT_SKB = BIT(1),
192 CMD_ON_DEMAND = BIT(2), 191 CMD_SEND_IN_RFKILL = BIT(2),
193}; 192};
194 193
195#define DEF_CMD_PAYLOAD_SIZE 320 194#define DEF_CMD_PAYLOAD_SIZE 320
@@ -427,8 +426,9 @@ struct iwl_trans_ops {
427 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 426 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
428 void (*stop_device)(struct iwl_trans *trans); 427 void (*stop_device)(struct iwl_trans *trans);
429 428
430 void (*d3_suspend)(struct iwl_trans *trans); 429 void (*d3_suspend)(struct iwl_trans *trans, bool test);
431 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status); 430 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
431 bool test);
432 432
433 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 433 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
434 434
@@ -455,7 +455,7 @@ struct iwl_trans_ops {
455 int (*read_mem)(struct iwl_trans *trans, u32 addr, 455 int (*read_mem)(struct iwl_trans *trans, u32 addr,
456 void *buf, int dwords); 456 void *buf, int dwords);
457 int (*write_mem)(struct iwl_trans *trans, u32 addr, 457 int (*write_mem)(struct iwl_trans *trans, u32 addr,
458 void *buf, int dwords); 458 const void *buf, int dwords);
459 void (*configure)(struct iwl_trans *trans, 459 void (*configure)(struct iwl_trans *trans,
460 const struct iwl_trans_config *trans_cfg); 460 const struct iwl_trans_config *trans_cfg);
461 void (*set_pmi)(struct iwl_trans *trans, bool state); 461 void (*set_pmi)(struct iwl_trans *trans, bool state);
@@ -587,17 +587,18 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
587 trans->state = IWL_TRANS_NO_FW; 587 trans->state = IWL_TRANS_NO_FW;
588} 588}
589 589
590static inline void iwl_trans_d3_suspend(struct iwl_trans *trans) 590static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
591{ 591{
592 might_sleep(); 592 might_sleep();
593 trans->ops->d3_suspend(trans); 593 trans->ops->d3_suspend(trans, test);
594} 594}
595 595
596static inline int iwl_trans_d3_resume(struct iwl_trans *trans, 596static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
597 enum iwl_d3_status *status) 597 enum iwl_d3_status *status,
598 bool test)
598{ 599{
599 might_sleep(); 600 might_sleep();
600 return trans->ops->d3_resume(trans, status); 601 return trans->ops->d3_resume(trans, status, test);
601} 602}
602 603
603static inline int iwl_trans_send_cmd(struct iwl_trans *trans, 604static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
@@ -761,7 +762,7 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
761} 762}
762 763
763static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 764static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
764 void *buf, int dwords) 765 const void *buf, int dwords)
765{ 766{
766 return trans->ops->write_mem(trans, addr, buf, dwords); 767 return trans->ops->write_mem(trans, addr, buf, dwords);
767} 768}
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 2acc44b40986..ff856e543ae8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -3,7 +3,7 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o 3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
4iwlmvm-y += scan.o time-event.o rs.o 4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o bt-coex.o 5iwlmvm-y += power.o bt-coex.o
6iwlmvm-y += led.o 6iwlmvm-y += led.o tt.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o 7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o 8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
9 9
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 810bfa5f6de0..dbd622a3929c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -174,7 +174,7 @@ static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
174static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = { 174static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
175 cpu_to_le32(0xaaaaaaaa), 175 cpu_to_le32(0xaaaaaaaa),
176 cpu_to_le32(0xaaaaaaaa), 176 cpu_to_le32(0xaaaaaaaa),
177 cpu_to_le32(0xaeaaaaaa), 177 cpu_to_le32(0xaaaaaaaa),
178 cpu_to_le32(0xaaaaaaaa), 178 cpu_to_le32(0xaaaaaaaa),
179 cpu_to_le32(0xcc00ff28), 179 cpu_to_le32(0xcc00ff28),
180 cpu_to_le32(0x0000aaaa), 180 cpu_to_le32(0x0000aaaa),
@@ -202,6 +202,22 @@ static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
202 cpu_to_le32(0x00000000), 202 cpu_to_le32(0x00000000),
203}; 203};
204 204
205/* single shared antenna */
206static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
207 cpu_to_le32(0x40000000),
208 cpu_to_le32(0x00000000),
209 cpu_to_le32(0x44000000),
210 cpu_to_le32(0x00000000),
211 cpu_to_le32(0x40000000),
212 cpu_to_le32(0x00000000),
213 cpu_to_le32(0x44000000),
214 cpu_to_le32(0x00000000),
215 cpu_to_le32(0xC0004000),
216 cpu_to_le32(0xF0005000),
217 cpu_to_le32(0xC0004000),
218 cpu_to_le32(0xF0005000),
219};
220
205int iwl_send_bt_init_conf(struct iwl_mvm *mvm) 221int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
206{ 222{
207 struct iwl_bt_coex_cmd cmd = { 223 struct iwl_bt_coex_cmd cmd = {
@@ -225,7 +241,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
225 BT_VALID_REDUCED_TX_POWER | 241 BT_VALID_REDUCED_TX_POWER |
226 BT_VALID_LUT); 242 BT_VALID_LUT);
227 243
228 if (is_loose_coex()) 244 if (mvm->cfg->bt_shared_single_ant)
245 memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup,
246 sizeof(iwl_single_shared_ant_lookup));
247 else if (is_loose_coex())
229 memcpy(&cmd.decision_lut, iwl_loose_lookup, 248 memcpy(&cmd.decision_lut, iwl_loose_lookup,
230 sizeof(iwl_tight_lookup)); 249 sizeof(iwl_tight_lookup));
231 else 250 else
@@ -351,6 +370,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
351 enum ieee80211_band band; 370 enum ieee80211_band band;
352 int ave_rssi; 371 int ave_rssi;
353 372
373 lockdep_assert_held(&mvm->mutex);
354 if (vif->type != NL80211_IFTYPE_STATION) 374 if (vif->type != NL80211_IFTYPE_STATION)
355 return; 375 return;
356 376
@@ -365,7 +385,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
365 smps_mode = IEEE80211_SMPS_AUTOMATIC; 385 smps_mode = IEEE80211_SMPS_AUTOMATIC;
366 386
367 if (band != IEEE80211_BAND_2GHZ) { 387 if (band != IEEE80211_BAND_2GHZ) {
368 ieee80211_request_smps(vif, smps_mode); 388 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
389 smps_mode);
369 return; 390 return;
370 } 391 }
371 392
@@ -380,7 +401,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
380 mvmvif->id, data->notif->bt_status, 401 mvmvif->id, data->notif->bt_status,
381 data->notif->bt_traffic_load, smps_mode); 402 data->notif->bt_traffic_load, smps_mode);
382 403
383 ieee80211_request_smps(vif, smps_mode); 404 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
384 405
385 /* don't reduce the Tx power if in loose scheme */ 406 /* don't reduce the Tx power if in loose scheme */
386 if (is_loose_coex()) 407 if (is_loose_coex())
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 16bbdcc8627a..7e5e5c2f9f87 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -63,6 +63,7 @@
63 63
64#include <linux/etherdevice.h> 64#include <linux/etherdevice.h>
65#include <linux/ip.h> 65#include <linux/ip.h>
66#include <linux/fs.h>
66#include <net/cfg80211.h> 67#include <net/cfg80211.h>
67#include <net/ipv6.h> 68#include <net/ipv6.h>
68#include <net/tcp.h> 69#include <net/tcp.h>
@@ -419,8 +420,7 @@ static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
419 return cpu_to_le16(be16_to_cpu((__force __be16)check)); 420 return cpu_to_le16(be16_to_cpu((__force __be16)check));
420} 421}
421 422
422static void iwl_mvm_build_tcp_packet(struct iwl_mvm *mvm, 423static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
423 struct ieee80211_vif *vif,
424 struct cfg80211_wowlan_tcp *tcp, 424 struct cfg80211_wowlan_tcp *tcp,
425 void *_pkt, u8 *mask, 425 void *_pkt, u8 *mask,
426 __le16 *pseudo_hdr_csum, 426 __le16 *pseudo_hdr_csum,
@@ -566,21 +566,21 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
566 566
567 /* SYN (TX) */ 567 /* SYN (TX) */
568 iwl_mvm_build_tcp_packet( 568 iwl_mvm_build_tcp_packet(
569 mvm, vif, tcp, cfg->syn_tx.data, NULL, 569 vif, tcp, cfg->syn_tx.data, NULL,
570 &cfg->syn_tx.info.tcp_pseudo_header_checksum, 570 &cfg->syn_tx.info.tcp_pseudo_header_checksum,
571 MVM_TCP_TX_SYN); 571 MVM_TCP_TX_SYN);
572 cfg->syn_tx.info.tcp_payload_length = 0; 572 cfg->syn_tx.info.tcp_payload_length = 0;
573 573
574 /* SYN/ACK (RX) */ 574 /* SYN/ACK (RX) */
575 iwl_mvm_build_tcp_packet( 575 iwl_mvm_build_tcp_packet(
576 mvm, vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, 576 vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
577 &cfg->synack_rx.info.tcp_pseudo_header_checksum, 577 &cfg->synack_rx.info.tcp_pseudo_header_checksum,
578 MVM_TCP_RX_SYNACK); 578 MVM_TCP_RX_SYNACK);
579 cfg->synack_rx.info.tcp_payload_length = 0; 579 cfg->synack_rx.info.tcp_payload_length = 0;
580 580
581 /* KEEPALIVE/ACK (TX) */ 581 /* KEEPALIVE/ACK (TX) */
582 iwl_mvm_build_tcp_packet( 582 iwl_mvm_build_tcp_packet(
583 mvm, vif, tcp, cfg->keepalive_tx.data, NULL, 583 vif, tcp, cfg->keepalive_tx.data, NULL,
584 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum, 584 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
585 MVM_TCP_TX_DATA); 585 MVM_TCP_TX_DATA);
586 cfg->keepalive_tx.info.tcp_payload_length = 586 cfg->keepalive_tx.info.tcp_payload_length =
@@ -604,7 +604,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
604 604
605 /* ACK (RX) */ 605 /* ACK (RX) */
606 iwl_mvm_build_tcp_packet( 606 iwl_mvm_build_tcp_packet(
607 mvm, vif, tcp, cfg->keepalive_ack_rx.data, 607 vif, tcp, cfg->keepalive_ack_rx.data,
608 cfg->keepalive_ack_rx.rx_mask, 608 cfg->keepalive_ack_rx.rx_mask,
609 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum, 609 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
610 MVM_TCP_RX_ACK); 610 MVM_TCP_RX_ACK);
@@ -612,7 +612,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
612 612
613 /* WAKEUP (RX) */ 613 /* WAKEUP (RX) */
614 iwl_mvm_build_tcp_packet( 614 iwl_mvm_build_tcp_packet(
615 mvm, vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, 615 vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
616 &cfg->wake_rx.info.tcp_pseudo_header_checksum, 616 &cfg->wake_rx.info.tcp_pseudo_header_checksum,
617 MVM_TCP_RX_WAKE); 617 MVM_TCP_RX_WAKE);
618 cfg->wake_rx.info.tcp_payload_length = 618 cfg->wake_rx.info.tcp_payload_length =
@@ -620,7 +620,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
620 620
621 /* FIN */ 621 /* FIN */
622 iwl_mvm_build_tcp_packet( 622 iwl_mvm_build_tcp_packet(
623 mvm, vif, tcp, cfg->fin_tx.data, NULL, 623 vif, tcp, cfg->fin_tx.data, NULL,
624 &cfg->fin_tx.info.tcp_pseudo_header_checksum, 624 &cfg->fin_tx.info.tcp_pseudo_header_checksum,
625 MVM_TCP_TX_FIN); 625 MVM_TCP_TX_FIN);
626 cfg->fin_tx.info.tcp_payload_length = 0; 626 cfg->fin_tx.info.tcp_payload_length = 0;
@@ -756,7 +756,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
756 return 0; 756 return 0;
757} 757}
758 758
759int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 759static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
760 struct cfg80211_wowlan *wowlan,
761 bool test)
760{ 762{
761 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 763 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
762 struct iwl_d3_iter_data suspend_iter_data = { 764 struct iwl_d3_iter_data suspend_iter_data = {
@@ -769,7 +771,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
769 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 771 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
770 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; 772 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
771 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 773 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
772 struct iwl_d3_manager_config d3_cfg_cmd = { 774 struct iwl_d3_manager_config d3_cfg_cmd_data = {
773 /* 775 /*
774 * Program the minimum sleep time to 10 seconds, as many 776 * Program the minimum sleep time to 10 seconds, as many
775 * platforms have issues processing a wakeup signal while 777 * platforms have issues processing a wakeup signal while
@@ -777,17 +779,30 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
777 */ 779 */
778 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), 780 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
779 }; 781 };
782 struct iwl_host_cmd d3_cfg_cmd = {
783 .id = D3_CONFIG_CMD,
784 .flags = CMD_SYNC | CMD_WANT_SKB,
785 .data[0] = &d3_cfg_cmd_data,
786 .len[0] = sizeof(d3_cfg_cmd_data),
787 };
780 struct wowlan_key_data key_data = { 788 struct wowlan_key_data key_data = {
781 .use_rsc_tsc = false, 789 .use_rsc_tsc = false,
782 .tkip = &tkip_cmd, 790 .tkip = &tkip_cmd,
783 .use_tkip = false, 791 .use_tkip = false,
784 }; 792 };
785 int ret, i; 793 int ret, i;
794 int len __maybe_unused;
786 u16 seq; 795 u16 seq;
787 u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT; 796 u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
788 797
789 if (WARN_ON(!wowlan)) 798 if (!wowlan) {
799 /*
800 * mac80211 shouldn't get here, but for D3 test
801 * it doesn't warrant a warning
802 */
803 WARN_ON(!test);
790 return -EINVAL; 804 return -EINVAL;
805 }
791 806
792 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); 807 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
793 if (!key_data.rsc_tsc) 808 if (!key_data.rsc_tsc)
@@ -1007,15 +1022,37 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1007 if (ret) 1022 if (ret)
1008 goto out; 1023 goto out;
1009 1024
1025 ret = iwl_mvm_power_update_mode(mvm, vif);
1026 if (ret)
1027 goto out;
1028
1029#ifdef CONFIG_IWLWIFI_DEBUGFS
1030 if (mvm->d3_wake_sysassert)
1031 d3_cfg_cmd_data.wakeup_flags |=
1032 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1033#endif
1034
1010 /* must be last -- this switches firmware state */ 1035 /* must be last -- this switches firmware state */
1011 ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC, 1036 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1012 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1013 if (ret) 1037 if (ret)
1014 goto out; 1038 goto out;
1039#ifdef CONFIG_IWLWIFI_DEBUGFS
1040 len = le32_to_cpu(d3_cfg_cmd.resp_pkt->len_n_flags) &
1041 FH_RSCSR_FRAME_SIZE_MSK;
1042 if (len >= sizeof(u32) * 2) {
1043 mvm->d3_test_pme_ptr =
1044 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
1045 } else if (test) {
1046 /* in test mode we require the pointer */
1047 ret = -EIO;
1048 goto out;
1049 }
1050#endif
1051 iwl_free_resp(&d3_cfg_cmd);
1015 1052
1016 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1053 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1017 1054
1018 iwl_trans_d3_suspend(mvm->trans); 1055 iwl_trans_d3_suspend(mvm->trans, test);
1019 out: 1056 out:
1020 mvm->aux_sta.sta_id = old_aux_sta_id; 1057 mvm->aux_sta.sta_id = old_aux_sta_id;
1021 mvm_ap_sta->sta_id = old_ap_sta_id; 1058 mvm_ap_sta->sta_id = old_ap_sta_id;
@@ -1030,6 +1067,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1030 return ret; 1067 return ret;
1031} 1068}
1032 1069
1070int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1071{
1072 return __iwl_mvm_suspend(hw, wowlan, false);
1073}
1074
1033static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, 1075static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1034 struct ieee80211_vif *vif) 1076 struct ieee80211_vif *vif)
1035{ 1077{
@@ -1214,9 +1256,28 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1214 iwl_free_resp(&cmd); 1256 iwl_free_resp(&cmd);
1215} 1257}
1216 1258
1217int iwl_mvm_resume(struct ieee80211_hw *hw) 1259static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
1260{
1261#ifdef CONFIG_IWLWIFI_DEBUGFS
1262 const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
1263 u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
1264 u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1265
1266 if (!mvm->store_d3_resume_sram)
1267 return;
1268
1269 if (!mvm->d3_resume_sram) {
1270 mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
1271 if (!mvm->d3_resume_sram)
1272 return;
1273 }
1274
1275 iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
1276#endif
1277}
1278
1279static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1218{ 1280{
1219 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1220 struct iwl_d3_iter_data resume_iter_data = { 1281 struct iwl_d3_iter_data resume_iter_data = {
1221 .mvm = mvm, 1282 .mvm = mvm,
1222 }; 1283 };
@@ -1236,7 +1297,7 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
1236 1297
1237 vif = resume_iter_data.vif; 1298 vif = resume_iter_data.vif;
1238 1299
1239 ret = iwl_trans_d3_resume(mvm->trans, &d3_status); 1300 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
1240 if (ret) 1301 if (ret)
1241 goto out_unlock; 1302 goto out_unlock;
1242 1303
@@ -1245,12 +1306,15 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
1245 goto out_unlock; 1306 goto out_unlock;
1246 } 1307 }
1247 1308
1309 /* query SRAM first in case we want event logging */
1310 iwl_mvm_read_d3_sram(mvm);
1311
1248 iwl_mvm_query_wakeup_reasons(mvm, vif); 1312 iwl_mvm_query_wakeup_reasons(mvm, vif);
1249 1313
1250 out_unlock: 1314 out_unlock:
1251 mutex_unlock(&mvm->mutex); 1315 mutex_unlock(&mvm->mutex);
1252 1316
1253 if (vif) 1317 if (!test && vif)
1254 ieee80211_resume_disconnect(vif); 1318 ieee80211_resume_disconnect(vif);
1255 1319
1256 /* return 1 to reconfigure the device */ 1320 /* return 1 to reconfigure the device */
@@ -1258,9 +1322,106 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
1258 return 1; 1322 return 1;
1259} 1323}
1260 1324
1325int iwl_mvm_resume(struct ieee80211_hw *hw)
1326{
1327 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1328
1329 return __iwl_mvm_resume(mvm, false);
1330}
1331
1261void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) 1332void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
1262{ 1333{
1263 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1334 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1264 1335
1265 device_set_wakeup_enable(mvm->trans->dev, enabled); 1336 device_set_wakeup_enable(mvm->trans->dev, enabled);
1266} 1337}
1338
1339#ifdef CONFIG_IWLWIFI_DEBUGFS
1340static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
1341{
1342 struct iwl_mvm *mvm = inode->i_private;
1343 int err;
1344
1345 if (mvm->d3_test_active)
1346 return -EBUSY;
1347
1348 file->private_data = inode->i_private;
1349
1350 ieee80211_stop_queues(mvm->hw);
1351 synchronize_net();
1352
1353 /* start pseudo D3 */
1354 rtnl_lock();
1355 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
1356 rtnl_unlock();
1357 if (err > 0)
1358 err = -EINVAL;
1359 if (err) {
1360 ieee80211_wake_queues(mvm->hw);
1361 return err;
1362 }
1363 mvm->d3_test_active = true;
1364 return 0;
1365}
1366
1367static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
1368 size_t count, loff_t *ppos)
1369{
1370 struct iwl_mvm *mvm = file->private_data;
1371 u32 pme_asserted;
1372
1373 while (true) {
1374 pme_asserted = iwl_trans_read_mem32(mvm->trans,
1375 mvm->d3_test_pme_ptr);
1376 if (pme_asserted)
1377 break;
1378 if (msleep_interruptible(100))
1379 break;
1380 }
1381
1382 return 0;
1383}
1384
1385static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
1386 struct ieee80211_vif *vif)
1387{
1388 if (vif->type == NL80211_IFTYPE_STATION)
1389 ieee80211_connection_loss(vif);
1390}
1391
1392static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
1393{
1394 struct iwl_mvm *mvm = inode->i_private;
1395 int remaining_time = 10;
1396
1397 mvm->d3_test_active = false;
1398 __iwl_mvm_resume(mvm, true);
1399 iwl_abort_notification_waits(&mvm->notif_wait);
1400 ieee80211_restart_hw(mvm->hw);
1401
1402 /* wait for restart and disconnect all interfaces */
1403 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1404 remaining_time > 0) {
1405 remaining_time--;
1406 msleep(1000);
1407 }
1408
1409 if (remaining_time == 0)
1410 IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
1411
1412 ieee80211_iterate_active_interfaces_atomic(
1413 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1414 iwl_mvm_d3_test_disconn_work_iter, NULL);
1415
1416 ieee80211_wake_queues(mvm->hw);
1417
1418 return 0;
1419}
1420
1421const struct file_operations iwl_dbgfs_d3_test_ops = {
1422 .llseek = no_llseek,
1423 .open = iwl_mvm_d3_test_open,
1424 .read = iwl_mvm_d3_test_read,
1425 .release = iwl_mvm_d3_test_release,
1426};
1427#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 2053dccefcd6..e56ed2a84888 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -145,15 +145,18 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
145 char *buf; 145 char *buf;
146 u8 *ptr; 146 u8 *ptr;
147 147
148 if (!mvm->ucode_loaded)
149 return -EINVAL;
150
148 /* default is to dump the entire data segment */ 151 /* default is to dump the entire data segment */
149 if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) { 152 if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
150 mvm->dbgfs_sram_offset = 0x800000;
151 if (!mvm->ucode_loaded)
152 return -EINVAL;
153 img = &mvm->fw->img[mvm->cur_ucode]; 153 img = &mvm->fw->img[mvm->cur_ucode];
154 mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; 154 ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
155 len = img->sec[IWL_UCODE_SECTION_DATA].len;
156 } else {
157 ofs = mvm->dbgfs_sram_offset;
158 len = mvm->dbgfs_sram_len;
155 } 159 }
156 len = mvm->dbgfs_sram_len;
157 160
158 bufsz = len * 4 + 256; 161 bufsz = len * 4 + 256;
159 buf = kzalloc(bufsz, GFP_KERNEL); 162 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -167,12 +170,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
167 } 170 }
168 171
169 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len); 172 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
170 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", 173 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", ofs);
171 mvm->dbgfs_sram_offset);
172 174
173 iwl_trans_read_mem_bytes(mvm->trans, 175 iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
174 mvm->dbgfs_sram_offset,
175 ptr, len);
176 for (ofs = 0; ofs < len; ofs += 16) { 176 for (ofs = 0; ofs < len; ofs += 16) {
177 pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs); 177 pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
178 hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos, 178 hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
@@ -300,6 +300,168 @@ static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
300 return count; 300 return count;
301} 301}
302 302
303static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
304 struct ieee80211_vif *vif,
305 enum iwl_dbgfs_pm_mask param, int val)
306{
307 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
308 struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
309
310 dbgfs_pm->mask |= param;
311
312 switch (param) {
313 case MVM_DEBUGFS_PM_KEEP_ALIVE: {
314 struct ieee80211_hw *hw = mvm->hw;
315 int dtimper = hw->conf.ps_dtim_period ?: 1;
316 int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
317
318 IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
319 if (val * MSEC_PER_SEC < 3 * dtimper_msec) {
320 IWL_WARN(mvm,
321 "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
322 val * MSEC_PER_SEC, 3 * dtimper_msec);
323 }
324 dbgfs_pm->keep_alive_seconds = val;
325 break;
326 }
327 case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
328 IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
329 val ? "enabled" : "disabled");
330 dbgfs_pm->skip_over_dtim = val;
331 break;
332 case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
333 IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
334 dbgfs_pm->skip_dtim_periods = val;
335 break;
336 case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
337 IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
338 dbgfs_pm->rx_data_timeout = val;
339 break;
340 case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
341 IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
342 dbgfs_pm->tx_data_timeout = val;
343 break;
344 case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
345 IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
346 dbgfs_pm->disable_power_off = val;
347 case MVM_DEBUGFS_PM_LPRX_ENA:
348 IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
349 dbgfs_pm->lprx_ena = val;
350 break;
351 case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
352 IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
353 dbgfs_pm->lprx_rssi_threshold = val;
354 break;
355 }
356}
357
358static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
359 const char __user *user_buf,
360 size_t count, loff_t *ppos)
361{
362 struct ieee80211_vif *vif = file->private_data;
363 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
364 struct iwl_mvm *mvm = mvmvif->dbgfs_data;
365 enum iwl_dbgfs_pm_mask param;
366 char buf[32] = {};
367 int val;
368 int ret;
369
370 if (copy_from_user(buf, user_buf, sizeof(buf)))
371 return -EFAULT;
372
373 if (!strncmp("keep_alive=", buf, 11)) {
374 if (sscanf(buf + 11, "%d", &val) != 1)
375 return -EINVAL;
376 param = MVM_DEBUGFS_PM_KEEP_ALIVE;
377 } else if (!strncmp("skip_over_dtim=", buf, 15)) {
378 if (sscanf(buf + 15, "%d", &val) != 1)
379 return -EINVAL;
380 param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
381 } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
382 if (sscanf(buf + 18, "%d", &val) != 1)
383 return -EINVAL;
384 param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
385 } else if (!strncmp("rx_data_timeout=", buf, 16)) {
386 if (sscanf(buf + 16, "%d", &val) != 1)
387 return -EINVAL;
388 param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
389 } else if (!strncmp("tx_data_timeout=", buf, 16)) {
390 if (sscanf(buf + 16, "%d", &val) != 1)
391 return -EINVAL;
392 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
393 } else if (!strncmp("disable_power_off=", buf, 18)) {
394 if (sscanf(buf + 18, "%d", &val) != 1)
395 return -EINVAL;
396 param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
397 } else if (!strncmp("lprx=", buf, 5)) {
398 if (sscanf(buf + 5, "%d", &val) != 1)
399 return -EINVAL;
400 param = MVM_DEBUGFS_PM_LPRX_ENA;
401 } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
402 if (sscanf(buf + 20, "%d", &val) != 1)
403 return -EINVAL;
404 if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
405 POWER_LPRX_RSSI_THRESHOLD_MIN)
406 return -EINVAL;
407 param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
408 } else {
409 return -EINVAL;
410 }
411
412 mutex_lock(&mvm->mutex);
413 iwl_dbgfs_update_pm(mvm, vif, param, val);
414 ret = iwl_mvm_power_update_mode(mvm, vif);
415 mutex_unlock(&mvm->mutex);
416
417 return ret ?: count;
418}
419
420static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
421 char __user *user_buf,
422 size_t count, loff_t *ppos)
423{
424 struct ieee80211_vif *vif = file->private_data;
425 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
426 struct iwl_mvm *mvm = mvmvif->dbgfs_data;
427 struct iwl_powertable_cmd cmd = {};
428 char buf[256];
429 int bufsz = sizeof(buf);
430 int pos = 0;
431
432 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
433
434 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
435 (cmd.flags &
436 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
437 0 : 1);
438 pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
439 le32_to_cpu(cmd.skip_dtim_periods));
440 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
441 iwlmvm_mod_params.power_scheme);
442 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
443 le16_to_cpu(cmd.flags));
444 pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
445 cmd.keep_alive_seconds);
446
447 if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
448 pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
449 (cmd.flags &
450 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
451 1 : 0);
452 pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
453 le32_to_cpu(cmd.rx_data_timeout));
454 pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
455 le32_to_cpu(cmd.tx_data_timeout));
456 if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
457 pos += scnprintf(buf+pos, bufsz-pos,
458 "lprx_rssi_threshold = %d\n",
459 le32_to_cpu(cmd.lprx_rssi_threshold));
460 }
461
462 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
463}
464
303static ssize_t iwl_dbgfs_mac_params_read(struct file *file, 465static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
304 char __user *user_buf, 466 char __user *user_buf,
305 size_t count, loff_t *ppos) 467 size_t count, loff_t *ppos)
@@ -481,6 +643,255 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
481 return count; 643 return count;
482} 644}
483 645
646static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
647 enum iwl_dbgfs_bf_mask param, int value)
648{
649 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
650 struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
651
652 dbgfs_bf->mask |= param;
653
654 switch (param) {
655 case MVM_DEBUGFS_BF_ENERGY_DELTA:
656 dbgfs_bf->bf_energy_delta = value;
657 break;
658 case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
659 dbgfs_bf->bf_roaming_energy_delta = value;
660 break;
661 case MVM_DEBUGFS_BF_ROAMING_STATE:
662 dbgfs_bf->bf_roaming_state = value;
663 break;
664 case MVM_DEBUGFS_BF_TEMPERATURE_DELTA:
665 dbgfs_bf->bf_temperature_delta = value;
666 break;
667 case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
668 dbgfs_bf->bf_enable_beacon_filter = value;
669 break;
670 case MVM_DEBUGFS_BF_DEBUG_FLAG:
671 dbgfs_bf->bf_debug_flag = value;
672 break;
673 case MVM_DEBUGFS_BF_ESCAPE_TIMER:
674 dbgfs_bf->bf_escape_timer = value;
675 break;
676 case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
677 dbgfs_bf->ba_enable_beacon_abort = value;
678 break;
679 case MVM_DEBUGFS_BA_ESCAPE_TIMER:
680 dbgfs_bf->ba_escape_timer = value;
681 break;
682 }
683}
684
685static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
686 const char __user *user_buf,
687 size_t count, loff_t *ppos)
688{
689 struct ieee80211_vif *vif = file->private_data;
690 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
691 struct iwl_mvm *mvm = mvmvif->dbgfs_data;
692 enum iwl_dbgfs_bf_mask param;
693 char buf[256];
694 int buf_size;
695 int value;
696 int ret = 0;
697
698 memset(buf, 0, sizeof(buf));
699 buf_size = min(count, sizeof(buf) - 1);
700 if (copy_from_user(buf, user_buf, buf_size))
701 return -EFAULT;
702
703 if (!strncmp("bf_energy_delta=", buf, 16)) {
704 if (sscanf(buf+16, "%d", &value) != 1)
705 return -EINVAL;
706 if (value < IWL_BF_ENERGY_DELTA_MIN ||
707 value > IWL_BF_ENERGY_DELTA_MAX)
708 return -EINVAL;
709 param = MVM_DEBUGFS_BF_ENERGY_DELTA;
710 } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
711 if (sscanf(buf+24, "%d", &value) != 1)
712 return -EINVAL;
713 if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
714 value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
715 return -EINVAL;
716 param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
717 } else if (!strncmp("bf_roaming_state=", buf, 17)) {
718 if (sscanf(buf+17, "%d", &value) != 1)
719 return -EINVAL;
720 if (value < IWL_BF_ROAMING_STATE_MIN ||
721 value > IWL_BF_ROAMING_STATE_MAX)
722 return -EINVAL;
723 param = MVM_DEBUGFS_BF_ROAMING_STATE;
724 } else if (!strncmp("bf_temperature_delta=", buf, 21)) {
725 if (sscanf(buf+21, "%d", &value) != 1)
726 return -EINVAL;
727 if (value < IWL_BF_TEMPERATURE_DELTA_MIN ||
728 value > IWL_BF_TEMPERATURE_DELTA_MAX)
729 return -EINVAL;
730 param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA;
731 } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
732 if (sscanf(buf+24, "%d", &value) != 1)
733 return -EINVAL;
734 if (value < 0 || value > 1)
735 return -EINVAL;
736 param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
737 } else if (!strncmp("bf_debug_flag=", buf, 14)) {
738 if (sscanf(buf+14, "%d", &value) != 1)
739 return -EINVAL;
740 if (value < 0 || value > 1)
741 return -EINVAL;
742 param = MVM_DEBUGFS_BF_DEBUG_FLAG;
743 } else if (!strncmp("bf_escape_timer=", buf, 16)) {
744 if (sscanf(buf+16, "%d", &value) != 1)
745 return -EINVAL;
746 if (value < IWL_BF_ESCAPE_TIMER_MIN ||
747 value > IWL_BF_ESCAPE_TIMER_MAX)
748 return -EINVAL;
749 param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
750 } else if (!strncmp("ba_escape_timer=", buf, 16)) {
751 if (sscanf(buf+16, "%d", &value) != 1)
752 return -EINVAL;
753 if (value < IWL_BA_ESCAPE_TIMER_MIN ||
754 value > IWL_BA_ESCAPE_TIMER_MAX)
755 return -EINVAL;
756 param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
757 } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
758 if (sscanf(buf+23, "%d", &value) != 1)
759 return -EINVAL;
760 if (value < 0 || value > 1)
761 return -EINVAL;
762 param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
763 } else {
764 return -EINVAL;
765 }
766
767 mutex_lock(&mvm->mutex);
768 iwl_dbgfs_update_bf(vif, param, value);
769 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
770 ret = iwl_mvm_disable_beacon_filter(mvm, vif);
771 } else {
772 if (mvmvif->bf_enabled)
773 ret = iwl_mvm_enable_beacon_filter(mvm, vif);
774 else
775 ret = iwl_mvm_disable_beacon_filter(mvm, vif);
776 }
777 mutex_unlock(&mvm->mutex);
778
779 return ret ?: count;
780}
781
782static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
783 char __user *user_buf,
784 size_t count, loff_t *ppos)
785{
786 struct ieee80211_vif *vif = file->private_data;
787 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
788 char buf[256];
789 int pos = 0;
790 const size_t bufsz = sizeof(buf);
791 struct iwl_beacon_filter_cmd cmd = {
792 .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,
793 .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT,
794 .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,
795 .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,
796 .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT,
797 .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,
798 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),
799 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT),
800 .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT,
801 };
802
803 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
804 if (mvmvif->bf_enabled)
805 cmd.bf_enable_beacon_filter = 1;
806 else
807 cmd.bf_enable_beacon_filter = 0;
808
809 pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
810 cmd.bf_energy_delta);
811 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
812 cmd.bf_roaming_energy_delta);
813 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
814 cmd.bf_roaming_state);
815 pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n",
816 cmd.bf_temperature_delta);
817 pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
818 cmd.bf_enable_beacon_filter);
819 pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
820 cmd.bf_debug_flag);
821 pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
822 cmd.bf_escape_timer);
823 pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
824 cmd.ba_escape_timer);
825 pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
826 cmd.ba_enable_beacon_abort);
827
828 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
829}
830
831#ifdef CONFIG_PM_SLEEP
832static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
833 const char __user *user_buf,
834 size_t count, loff_t *ppos)
835{
836 struct iwl_mvm *mvm = file->private_data;
837 char buf[8] = {};
838 int store;
839
840 if (copy_from_user(buf, user_buf, sizeof(buf)))
841 return -EFAULT;
842
843 if (sscanf(buf, "%d", &store) != 1)
844 return -EINVAL;
845
846 mvm->store_d3_resume_sram = store;
847
848 return count;
849}
850
851static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
852 size_t count, loff_t *ppos)
853{
854 struct iwl_mvm *mvm = file->private_data;
855 const struct fw_img *img;
856 int ofs, len, pos = 0;
857 size_t bufsz, ret;
858 char *buf;
859 u8 *ptr = mvm->d3_resume_sram;
860
861 img = &mvm->fw->img[IWL_UCODE_WOWLAN];
862 len = img->sec[IWL_UCODE_SECTION_DATA].len;
863
864 bufsz = len * 4 + 256;
865 buf = kzalloc(bufsz, GFP_KERNEL);
866 if (!buf)
867 return -ENOMEM;
868
869 pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
870 mvm->store_d3_resume_sram ? "en" : "dis");
871
872 if (ptr) {
873 for (ofs = 0; ofs < len; ofs += 16) {
874 pos += scnprintf(buf + pos, bufsz - pos,
875 "0x%.4x ", ofs);
876 hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
877 bufsz - pos, false);
878 pos += strlen(buf + pos);
879 if (bufsz - pos > 0)
880 buf[pos++] = '\n';
881 }
882 } else {
883 pos += scnprintf(buf + pos, bufsz - pos,
884 "(no data captured)\n");
885 }
886
887 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
888
889 kfree(buf);
890
891 return ret;
892}
893#endif
894
484#define MVM_DEBUGFS_READ_FILE_OPS(name) \ 895#define MVM_DEBUGFS_READ_FILE_OPS(name) \
485static const struct file_operations iwl_dbgfs_##name##_ops = { \ 896static const struct file_operations iwl_dbgfs_##name##_ops = { \
486 .read = iwl_dbgfs_##name##_read, \ 897 .read = iwl_dbgfs_##name##_read, \
@@ -524,9 +935,14 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
524MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); 935MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
525MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); 936MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
526MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart); 937MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
938#ifdef CONFIG_PM_SLEEP
939MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
940#endif
527 941
528/* Interface specific debugfs entries */ 942/* Interface specific debugfs entries */
529MVM_DEBUGFS_READ_FILE_OPS(mac_params); 943MVM_DEBUGFS_READ_FILE_OPS(mac_params);
944MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params);
945MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params);
530 946
531int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) 947int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
532{ 948{
@@ -542,6 +958,13 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
542 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); 958 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
543 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); 959 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
544 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); 960 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
961#ifdef CONFIG_PM_SLEEP
962 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
963 MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
964 if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
965 mvm->debugfs_dir, &mvm->d3_wake_sysassert))
966 goto err;
967#endif
545 968
546 /* 969 /*
547 * Create a symlink with mac80211. It will be removed when mac80211 970 * Create a symlink with mac80211. It will be removed when mac80211
@@ -577,9 +1000,19 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
577 return; 1000 return;
578 } 1001 }
579 1002
1003 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
1004 vif->type == NL80211_IFTYPE_STATION && !vif->p2p)
1005 MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
1006 S_IRUSR);
1007
580 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 1008 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
581 S_IRUSR); 1009 S_IRUSR);
582 1010
1011 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
1012 mvmvif == mvm->bf_allowed_vif)
1013 MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
1014 S_IRUSR | S_IWUSR);
1015
583 /* 1016 /*
584 * Create symlink for convenience pointing to interface specific 1017 * Create symlink for convenience pointing to interface specific
585 * debugfs entries for the driver. For example, under 1018 * debugfs entries for the driver. For example, under
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 51e015d1dfb2..6f8b2c16ae17 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -75,13 +75,15 @@ enum iwl_d3_wakeup_flags {
75 * struct iwl_d3_manager_config - D3 manager configuration command 75 * struct iwl_d3_manager_config - D3 manager configuration command
76 * @min_sleep_time: minimum sleep time (in usec) 76 * @min_sleep_time: minimum sleep time (in usec)
77 * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags 77 * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
78 * @wakeup_host_timer: force wakeup after this many seconds
78 * 79 *
79 * The structure is used for the D3_CONFIG_CMD command. 80 * The structure is used for the D3_CONFIG_CMD command.
80 */ 81 */
81struct iwl_d3_manager_config { 82struct iwl_d3_manager_config {
82 __le32 min_sleep_time; 83 __le32 min_sleep_time;
83 __le32 wakeup_flags; 84 __le32 wakeup_flags;
84} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */ 85 __le32 wakeup_host_timer;
86} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
85 87
86 88
87/* TODO: OFFLOADS_QUERY_API_S_VER_1 */ 89/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index d68640ea41d4..98b1feb43d38 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -71,7 +71,13 @@
71#define MAC_INDEX_MIN_DRIVER 0 71#define MAC_INDEX_MIN_DRIVER 0
72#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX 72#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
73 73
74#define AC_NUM 4 /* Number of access categories */ 74enum iwl_ac {
75 AC_BK,
76 AC_BE,
77 AC_VI,
78 AC_VO,
79 AC_NUM,
80};
75 81
76/** 82/**
77 * enum iwl_mac_protection_flags - MAC context flags 83 * enum iwl_mac_protection_flags - MAC context flags
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 81fe45f46be7..a6da359a80c3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -66,6 +66,11 @@
66 66
67/* Power Management Commands, Responses, Notifications */ 67/* Power Management Commands, Responses, Notifications */
68 68
69/* Radio LP RX Energy Threshold measured in dBm */
70#define POWER_LPRX_RSSI_THRESHOLD 75
71#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
72#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
73
69/** 74/**
70 * enum iwl_scan_flags - masks for power table command flags 75 * enum iwl_scan_flags - masks for power table command flags
71 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off 76 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
@@ -101,20 +106,107 @@ enum iwl_power_flags {
101 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to 106 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
102 * PSM transition - legacy PM 107 * PSM transition - legacy PM
103 * @sleep_interval: not in use 108 * @sleep_interval: not in use
104 * @keep_alive_beacons: not in use 109 * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
110 * is set. For example, if it is required to skip over
111 * one DTIM, this value need to be set to 2 (DTIM periods).
105 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. 112 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
106 * Default: 80dbm 113 * Default: 80dbm
107 */ 114 */
108struct iwl_powertable_cmd { 115struct iwl_powertable_cmd {
109 /* PM_POWER_TABLE_CMD_API_S_VER_5 */ 116 /* PM_POWER_TABLE_CMD_API_S_VER_6 */
110 __le16 flags; 117 __le16 flags;
111 u8 keep_alive_seconds; 118 u8 keep_alive_seconds;
112 u8 debug_flags; 119 u8 debug_flags;
113 __le32 rx_data_timeout; 120 __le32 rx_data_timeout;
114 __le32 tx_data_timeout; 121 __le32 tx_data_timeout;
115 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 122 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
116 __le32 keep_alive_beacons; 123 __le32 skip_dtim_periods;
117 __le32 lprx_rssi_threshold; 124 __le32 lprx_rssi_threshold;
118} __packed; 125} __packed;
119 126
127/**
128 * struct iwl_beacon_filter_cmd
129 * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
130 * @id_and_color: MAC contex identifier
131 * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
132 * to driver if delta in Energy values calculated for this and last
133 * passed beacon is greater than this threshold. Zero value means that
134 * the Energy change is ignored for beacon filtering, and beacon will
135 * not be forced to be sent to driver regardless of this delta. Typical
136 * energy delta 5dB.
137 * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
138 * Send beacon to driver if delta in Energy values calculated for this
139 * and last passed beacon is greater than this threshold. Zero value
140 * means that the Energy change is ignored for beacon filtering while in
141 * Roaming state, typical energy delta 1dB.
142 * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
143 * calculated for current beacon is less than the threshold, use
144 * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
145 * Threshold. Typical energy threshold is -72dBm.
146 * @bf_temperature_delta: Send Beacon to driver if delta in temperature values
147 * calculated for this and the last passed beacon is greater than this
148 * threshold. Zero value means that the temperature changeis ignored for
149 * beacon filtering; beacons will not be forced to be sent to driver
150 * regardless of whether its temerature has been changed.
151 * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
152 * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
153 * for a specific period of time. Units: Beacons.
154 * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
155 * for a longer period of time then this escape-timeout. Units: Beacons.
156 * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
157 */
158struct iwl_beacon_filter_cmd {
159 u8 bf_energy_delta;
160 u8 bf_roaming_energy_delta;
161 u8 bf_roaming_state;
162 u8 bf_temperature_delta;
163 u8 bf_enable_beacon_filter;
164 u8 bf_debug_flag;
165 __le16 reserved1;
166 __le32 bf_escape_timer;
167 __le32 ba_escape_timer;
168 u8 ba_enable_beacon_abort;
169 u8 reserved2[3];
170} __packed;
171
172/* Beacon filtering and beacon abort */
173#define IWL_BF_ENERGY_DELTA_DEFAULT 5
174#define IWL_BF_ENERGY_DELTA_MAX 255
175#define IWL_BF_ENERGY_DELTA_MIN 0
176
177#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
178#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
179#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
180
181#define IWL_BF_ROAMING_STATE_DEFAULT 72
182#define IWL_BF_ROAMING_STATE_MAX 255
183#define IWL_BF_ROAMING_STATE_MIN 0
184
185#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5
186#define IWL_BF_TEMPERATURE_DELTA_MAX 255
187#define IWL_BF_TEMPERATURE_DELTA_MIN 0
188
189#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
190
191#define IWL_BF_DEBUG_FLAG_DEFAULT 0
192
193#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
194#define IWL_BF_ESCAPE_TIMER_MAX 1024
195#define IWL_BF_ESCAPE_TIMER_MIN 0
196
197#define IWL_BA_ESCAPE_TIMER_DEFAULT 3
198#define IWL_BA_ESCAPE_TIMER_MAX 1024
199#define IWL_BA_ESCAPE_TIMER_MIN 0
200
201#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
202
203#define IWL_BF_CMD_CONFIG_DEFAULTS \
204 .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \
205 .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \
206 .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \
207 .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \
208 .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \
209 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
210 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
211
120#endif 212#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 007a93b25bd7..700cce731770 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -134,6 +134,7 @@ enum iwl_tx_flags {
134#define TX_CMD_SEC_WEP 0x01 134#define TX_CMD_SEC_WEP 0x01
135#define TX_CMD_SEC_CCM 0x02 135#define TX_CMD_SEC_CCM 0x02
136#define TX_CMD_SEC_TKIP 0x03 136#define TX_CMD_SEC_TKIP 0x03
137#define TX_CMD_SEC_MSK 0x07
137#define TX_CMD_SEC_WEP_KEY_IDX_POS 6 138#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
138#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 139#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
139#define TX_CMD_SEC_KEY128 0x08 140#define TX_CMD_SEC_KEY128 0x08
@@ -227,10 +228,11 @@ struct iwl_tx_cmd {
227 __le16 len; 228 __le16 len;
228 __le16 next_frame_len; 229 __le16 next_frame_len;
229 __le32 tx_flags; 230 __le32 tx_flags;
230 /* DRAM_SCRATCH_API_U_VER_1 */ 231 struct {
231 u8 try_cnt; 232 u8 try_cnt;
232 u8 btkill_cnt; 233 u8 btkill_cnt;
233 __le16 reserved; 234 __le16 reserved;
235 } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
234 __le32 rate_n_flags; 236 __le32 rate_n_flags;
235 u8 sta_id; 237 u8 sta_id;
236 u8 sec_ctl; 238 u8 sec_ctl;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index c6384555aab4..cbfb3beae783 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -139,6 +139,9 @@ enum {
139 /* Power */ 139 /* Power */
140 POWER_TABLE_CMD = 0x77, 140 POWER_TABLE_CMD = 0x77,
141 141
142 /* Thermal Throttling*/
143 REPLY_THERMAL_MNG_BACKOFF = 0x7e,
144
142 /* Scanning */ 145 /* Scanning */
143 SCAN_REQUEST_CMD = 0x80, 146 SCAN_REQUEST_CMD = 0x80,
144 SCAN_ABORT_CMD = 0x81, 147 SCAN_ABORT_CMD = 0x81,
@@ -161,6 +164,8 @@ enum {
161 CARD_STATE_CMD = 0xa0, 164 CARD_STATE_CMD = 0xa0,
162 CARD_STATE_NOTIFICATION = 0xa1, 165 CARD_STATE_NOTIFICATION = 0xa1,
163 166
167 MISSED_BEACONS_NOTIFICATION = 0xa2,
168
164 REPLY_RX_PHY_CMD = 0xc0, 169 REPLY_RX_PHY_CMD = 0xc0,
165 REPLY_RX_MPDU_CMD = 0xc1, 170 REPLY_RX_MPDU_CMD = 0xc1,
166 BA_NOTIF = 0xc5, 171 BA_NOTIF = 0xc5,
@@ -170,6 +175,8 @@ enum {
170 BT_COEX_PROT_ENV = 0xcd, 175 BT_COEX_PROT_ENV = 0xcd,
171 BT_PROFILE_NOTIFICATION = 0xce, 176 BT_PROFILE_NOTIFICATION = 0xce,
172 177
178 REPLY_BEACON_FILTERING_CMD = 0xd2,
179
173 REPLY_DEBUG_CMD = 0xf0, 180 REPLY_DEBUG_CMD = 0xf0,
174 DEBUG_LOG_MSG = 0xf7, 181 DEBUG_LOG_MSG = 0xf7,
175 182
@@ -938,6 +945,24 @@ struct iwl_card_state_notif {
938} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ 945} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
939 946
940/** 947/**
948 * struct iwl_missed_beacons_notif - information on missed beacons
949 * ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
950 * @mac_id: interface ID
951 * @consec_missed_beacons_since_last_rx: number of consecutive missed
952 * beacons since last RX.
953 * @consec_missed_beacons: number of consecutive missed beacons
954 * @num_expected_beacons:
955 * @num_recvd_beacons:
956 */
957struct iwl_missed_beacons_notif {
958 __le32 mac_id;
959 __le32 consec_missed_beacons_since_last_rx;
960 __le32 consec_missed_beacons;
961 __le32 num_expected_beacons;
962 __le32 num_recvd_beacons;
963} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
964
965/**
941 * struct iwl_set_calib_default_cmd - set default value for calibration. 966 * struct iwl_set_calib_default_cmd - set default value for calibration.
942 * ( SET_CALIB_DEFAULT_CMD = 0x8e ) 967 * ( SET_CALIB_DEFAULT_CMD = 0x8e )
943 * @calib_index: the calibration to set value for 968 * @calib_index: the calibration to set value for
@@ -975,4 +1000,212 @@ struct iwl_mcast_filter_cmd {
975 u8 addr_list[0]; 1000 u8 addr_list[0];
976} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ 1001} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
977 1002
1003struct mvm_statistics_dbg {
1004 __le32 burst_check;
1005 __le32 burst_count;
1006 __le32 wait_for_silence_timeout_cnt;
1007 __le32 reserved[3];
1008} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
1009
1010struct mvm_statistics_div {
1011 __le32 tx_on_a;
1012 __le32 tx_on_b;
1013 __le32 exec_time;
1014 __le32 probe_time;
1015 __le32 rssi_ant;
1016 __le32 reserved2;
1017} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
1018
1019struct mvm_statistics_general_common {
1020 __le32 temperature; /* radio temperature */
1021 __le32 temperature_m; /* radio voltage */
1022 struct mvm_statistics_dbg dbg;
1023 __le32 sleep_time;
1024 __le32 slots_out;
1025 __le32 slots_idle;
1026 __le32 ttl_timestamp;
1027 struct mvm_statistics_div div;
1028 __le32 rx_enable_counter;
1029 /*
1030 * num_of_sos_states:
1031 * count the number of times we have to re-tune
1032 * in order to get out of bad PHY status
1033 */
1034 __le32 num_of_sos_states;
1035} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
1036
1037struct mvm_statistics_rx_non_phy {
1038 __le32 bogus_cts; /* CTS received when not expecting CTS */
1039 __le32 bogus_ack; /* ACK received when not expecting ACK */
1040 __le32 non_bssid_frames; /* number of frames with BSSID that
1041 * doesn't belong to the STA BSSID */
1042 __le32 filtered_frames; /* count frames that were dumped in the
1043 * filtering process */
1044 __le32 non_channel_beacons; /* beacons with our bss id but not on
1045 * our serving channel */
1046 __le32 channel_beacons; /* beacons with our bss id and in our
1047 * serving channel */
1048 __le32 num_missed_bcon; /* number of missed beacons */
1049 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
1050 * ADC was in saturation */
1051 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
1052 * for INA */
1053 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
1054 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
1055 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
1056 __le32 interference_data_flag; /* flag for interference data
1057 * availability. 1 when data is
1058 * available. */
1059 __le32 channel_load; /* counts RX Enable time in uSec */
1060 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
1061 * and CCK) counter */
1062 __le32 beacon_rssi_a;
1063 __le32 beacon_rssi_b;
1064 __le32 beacon_rssi_c;
1065 __le32 beacon_energy_a;
1066 __le32 beacon_energy_b;
1067 __le32 beacon_energy_c;
1068 __le32 num_bt_kills;
1069 __le32 mac_id;
1070 __le32 directed_data_mpdu;
1071} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
1072
1073struct mvm_statistics_rx_phy {
1074 __le32 ina_cnt;
1075 __le32 fina_cnt;
1076 __le32 plcp_err;
1077 __le32 crc32_err;
1078 __le32 overrun_err;
1079 __le32 early_overrun_err;
1080 __le32 crc32_good;
1081 __le32 false_alarm_cnt;
1082 __le32 fina_sync_err_cnt;
1083 __le32 sfd_timeout;
1084 __le32 fina_timeout;
1085 __le32 unresponded_rts;
1086 __le32 rxe_frame_limit_overrun;
1087 __le32 sent_ack_cnt;
1088 __le32 sent_cts_cnt;
1089 __le32 sent_ba_rsp_cnt;
1090 __le32 dsp_self_kill;
1091 __le32 mh_format_err;
1092 __le32 re_acq_main_rssi_sum;
1093 __le32 reserved;
1094} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
1095
1096struct mvm_statistics_rx_ht_phy {
1097 __le32 plcp_err;
1098 __le32 overrun_err;
1099 __le32 early_overrun_err;
1100 __le32 crc32_good;
1101 __le32 crc32_err;
1102 __le32 mh_format_err;
1103 __le32 agg_crc32_good;
1104 __le32 agg_mpdu_cnt;
1105 __le32 agg_cnt;
1106 __le32 unsupport_mcs;
1107} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
1108
1109#define MAX_CHAINS 3
1110
1111struct mvm_statistics_tx_non_phy_agg {
1112 __le32 ba_timeout;
1113 __le32 ba_reschedule_frames;
1114 __le32 scd_query_agg_frame_cnt;
1115 __le32 scd_query_no_agg;
1116 __le32 scd_query_agg;
1117 __le32 scd_query_mismatch;
1118 __le32 frame_not_ready;
1119 __le32 underrun;
1120 __le32 bt_prio_kill;
1121 __le32 rx_ba_rsp_cnt;
1122 __s8 txpower[MAX_CHAINS];
1123 __s8 reserved;
1124 __le32 reserved2;
1125} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
1126
1127struct mvm_statistics_tx_channel_width {
1128 __le32 ext_cca_narrow_ch20[1];
1129 __le32 ext_cca_narrow_ch40[2];
1130 __le32 ext_cca_narrow_ch80[3];
1131 __le32 ext_cca_narrow_ch160[4];
1132 __le32 last_tx_ch_width_indx;
1133 __le32 rx_detected_per_ch_width[4];
1134 __le32 success_per_ch_width[4];
1135 __le32 fail_per_ch_width[4];
1136}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
1137
1138struct mvm_statistics_tx {
1139 __le32 preamble_cnt;
1140 __le32 rx_detected_cnt;
1141 __le32 bt_prio_defer_cnt;
1142 __le32 bt_prio_kill_cnt;
1143 __le32 few_bytes_cnt;
1144 __le32 cts_timeout;
1145 __le32 ack_timeout;
1146 __le32 expected_ack_cnt;
1147 __le32 actual_ack_cnt;
1148 __le32 dump_msdu_cnt;
1149 __le32 burst_abort_next_frame_mismatch_cnt;
1150 __le32 burst_abort_missing_next_frame_cnt;
1151 __le32 cts_timeout_collision;
1152 __le32 ack_or_ba_timeout_collision;
1153 struct mvm_statistics_tx_non_phy_agg agg;
1154 struct mvm_statistics_tx_channel_width channel_width;
1155} __packed; /* STATISTICS_TX_API_S_VER_4 */
1156
1157
1158struct mvm_statistics_bt_activity {
1159 __le32 hi_priority_tx_req_cnt;
1160 __le32 hi_priority_tx_denied_cnt;
1161 __le32 lo_priority_tx_req_cnt;
1162 __le32 lo_priority_tx_denied_cnt;
1163 __le32 hi_priority_rx_req_cnt;
1164 __le32 hi_priority_rx_denied_cnt;
1165 __le32 lo_priority_rx_req_cnt;
1166 __le32 lo_priority_rx_denied_cnt;
1167} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
1168
1169struct mvm_statistics_general {
1170 struct mvm_statistics_general_common common;
1171 __le32 beacon_filtered;
1172 __le32 missed_beacons;
1173 __s8 beacon_filter_everage_energy;
1174 __s8 beacon_filter_reason;
1175 __s8 beacon_filter_current_energy;
1176 __s8 beacon_filter_reserved;
1177 __le32 beacon_filter_delta_time;
1178 struct mvm_statistics_bt_activity bt_activity;
1179} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
1180
1181struct mvm_statistics_rx {
1182 struct mvm_statistics_rx_phy ofdm;
1183 struct mvm_statistics_rx_phy cck;
1184 struct mvm_statistics_rx_non_phy general;
1185 struct mvm_statistics_rx_ht_phy ofdm_ht;
1186} __packed; /* STATISTICS_RX_API_S_VER_3 */
1187
1188/*
1189 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
1190 *
1191 * By default, uCode issues this notification after receiving a beacon
1192 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
1193 * REPLY_STATISTICS_CMD 0x9c, above.
1194 *
1195 * Statistics counters continue to increment beacon after beacon, but are
1196 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
1197 * 0x9c with CLEAR_STATS bit set (see above).
1198 *
1199 * uCode also issues this notification during scans. uCode clears statistics
1200 * appropriately so that each notification contains statistics for only the
1201 * one channel that has just been scanned.
1202 */
1203
1204struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
1205 __le32 flag;
1206 struct mvm_statistics_rx rx;
1207 struct mvm_statistics_tx tx;
1208 struct mvm_statistics_general general;
1209} __packed;
1210
978#endif /* __fw_api_h__ */ 1211#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index e18c92dd60ec..cd7c0032cc58 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -326,6 +326,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
326 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 326 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
327 WARN_ON(ret); 327 WARN_ON(ret);
328 328
329 /*
330 * abort after reading the nvm in case RF Kill is on, we will complete
331 * the init seq later when RF kill will switch to off
332 */
333 if (iwl_mvm_is_radio_killed(mvm)) {
334 IWL_DEBUG_RF_KILL(mvm,
335 "jump over all phy activities due to RF kill\n");
336 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
337 return 1;
338 }
339
329 /* Send TX valid antennas before triggering calibrations */ 340 /* Send TX valid antennas before triggering calibrations */
330 ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw)); 341 ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
331 if (ret) 342 if (ret)
@@ -388,6 +399,8 @@ out:
388int iwl_mvm_up(struct iwl_mvm *mvm) 399int iwl_mvm_up(struct iwl_mvm *mvm)
389{ 400{
390 int ret, i; 401 int ret, i;
402 struct ieee80211_channel *chan;
403 struct cfg80211_chan_def chandef;
391 404
392 lockdep_assert_held(&mvm->mutex); 405 lockdep_assert_held(&mvm->mutex);
393 406
@@ -400,8 +413,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
400 ret = iwl_run_init_mvm_ucode(mvm, false); 413 ret = iwl_run_init_mvm_ucode(mvm, false);
401 if (ret && !iwlmvm_mod_params.init_dbg) { 414 if (ret && !iwlmvm_mod_params.init_dbg) {
402 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); 415 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
416 /* this can't happen */
417 if (WARN_ON(ret > 0))
418 ret = -ERFKILL;
403 goto error; 419 goto error;
404 } 420 }
421 /* should stop & start HW since that INIT image just loaded */
422 iwl_trans_stop_hw(mvm->trans, false);
423 ret = iwl_trans_start_hw(mvm->trans);
424 if (ret)
425 return ret;
405 } 426 }
406 427
407 if (iwlmvm_mod_params.init_dbg) 428 if (iwlmvm_mod_params.init_dbg)
@@ -443,8 +464,22 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
443 if (ret) 464 if (ret)
444 goto error; 465 goto error;
445 466
446 IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); 467 /* Add all the PHY contexts */
468 chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
469 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
470 for (i = 0; i < NUM_PHY_CTX; i++) {
471 /*
472 * The channel used here isn't relevant as it's
473 * going to be overwritten in the other flows.
474 * For now use the first channel we have.
475 */
476 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
477 &chandef, 1, 1);
478 if (ret)
479 goto error;
480 }
447 481
482 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
448 return 0; 483 return 0;
449 error: 484 error:
450 iwl_trans_stop_device(mvm->trans); 485 iwl_trans_stop_device(mvm->trans);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index b2cc3d98e0f7..94aae9c8562c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -193,14 +193,11 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
193u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, 193u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
194 struct ieee80211_vif *vif) 194 struct ieee80211_vif *vif)
195{ 195{
196 u32 qmask, ac; 196 u32 qmask = 0, ac;
197 197
198 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 198 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
199 return BIT(IWL_MVM_OFFCHANNEL_QUEUE); 199 return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
200 200
201 qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
202 BIT(vif->cab_queue) : 0;
203
204 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 201 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
205 if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) 202 if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
206 qmask |= BIT(vif->hw_queue[ac]); 203 qmask |= BIT(vif->hw_queue[ac]);
@@ -227,7 +224,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
227 .found_vif = false, 224 .found_vif = false,
228 }; 225 };
229 u32 ac; 226 u32 ac;
230 int ret; 227 int ret, i;
231 228
232 /* 229 /*
233 * Allocate a MAC ID and a TSF for this MAC, along with the queues 230 * Allocate a MAC ID and a TSF for this MAC, along with the queues
@@ -335,6 +332,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
335 mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; 332 mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
336 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; 333 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
337 334
335 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
336 mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
337
338 return 0; 338 return 0;
339 339
340exit_fail: 340exit_fail:
@@ -362,7 +362,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
362 break; 362 break;
363 case NL80211_IFTYPE_AP: 363 case NL80211_IFTYPE_AP:
364 iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue, 364 iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
365 IWL_MVM_TX_FIFO_VO); 365 IWL_MVM_TX_FIFO_MCAST);
366 /* fall through */ 366 /* fall through */
367 default: 367 default:
368 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 368 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -550,6 +550,10 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
550 cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]); 550 cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
551 } 551 }
552 552
553 /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
554 if (vif->type == NL80211_IFTYPE_AP)
555 cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
556
553 if (vif->bss_conf.qos) 557 if (vif->bss_conf.qos)
554 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 558 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
555 559
@@ -861,6 +865,30 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
861 return ret; 865 return ret;
862} 866}
863 867
868struct iwl_mvm_mac_ap_iterator_data {
869 struct iwl_mvm *mvm;
870 struct ieee80211_vif *vif;
871 u32 beacon_device_ts;
872 u16 beacon_int;
873};
874
875/* Find the beacon_device_ts and beacon_int for a managed interface */
876static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
877 struct ieee80211_vif *vif)
878{
879 struct iwl_mvm_mac_ap_iterator_data *data = _data;
880
881 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
882 return;
883
884 /* Station client has higher priority over P2P client*/
885 if (vif->p2p && data->beacon_device_ts)
886 return;
887
888 data->beacon_device_ts = vif->bss_conf.sync_device_ts;
889 data->beacon_int = vif->bss_conf.beacon_int;
890}
891
864/* 892/*
865 * Fill the specific data for mac context of type AP of P2P GO 893 * Fill the specific data for mac context of type AP of P2P GO
866 */ 894 */
@@ -870,6 +898,11 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
870 bool add) 898 bool add)
871{ 899{
872 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 900 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
901 struct iwl_mvm_mac_ap_iterator_data data = {
902 .mvm = mvm,
903 .vif = vif,
904 .beacon_device_ts = 0
905 };
873 906
874 ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); 907 ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
875 ctxt_ap->bi_reciprocal = 908 ctxt_ap->bi_reciprocal =
@@ -883,16 +916,33 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
883 ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); 916 ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
884 917
885 /* 918 /*
886 * Only read the system time when the MAC is being added, when we 919 * Only set the beacon time when the MAC is being added, when we
887 * just modify the MAC then we should keep the time -- the firmware 920 * just modify the MAC then we should keep the time -- the firmware
888 * can otherwise have a "jumping" TBTT. 921 * can otherwise have a "jumping" TBTT.
889 */ 922 */
890 if (add) 923 if (add) {
891 mvmvif->ap_beacon_time = 924 /*
892 iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); 925 * If there is a station/P2P client interface which is
926 * associated, set the AP's TBTT far enough from the station's
927 * TBTT. Otherwise, set it to the current system time
928 */
929 ieee80211_iterate_active_interfaces_atomic(
930 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
931 iwl_mvm_mac_ap_iterator, &data);
932
933 if (data.beacon_device_ts) {
934 u32 rand = (prandom_u32() % (80 - 20)) + 20;
935 mvmvif->ap_beacon_time = data.beacon_device_ts +
936 ieee80211_tu_to_usec(data.beacon_int * rand /
937 100);
938 } else {
939 mvmvif->ap_beacon_time =
940 iwl_read_prph(mvm->trans,
941 DEVICE_SYSTEM_TIME_REG);
942 }
943 }
893 944
894 ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time); 945 ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
895
896 ctxt_ap->beacon_tsf = 0; /* unused */ 946 ctxt_ap->beacon_tsf = 0; /* unused */
897 947
898 /* TODO: Assume that the beacon id == mac context id */ 948 /* TODO: Assume that the beacon id == mac context id */
@@ -1047,3 +1097,28 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1047 rate); 1097 rate);
1048 return 0; 1098 return 0;
1049} 1099}
1100
1101static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
1102 struct ieee80211_vif *vif)
1103{
1104 u16 *id = _data;
1105 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1106
1107 if (mvmvif->id == *id)
1108 ieee80211_beacon_loss(vif);
1109}
1110
1111int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
1112 struct iwl_rx_cmd_buffer *rxb,
1113 struct iwl_device_cmd *cmd)
1114{
1115 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1116 struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data;
1117 u16 id = (u16)le32_to_cpu(missed_beacons->mac_id);
1118
1119 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1120 IEEE80211_IFACE_ITER_NORMAL,
1121 iwl_mvm_beacon_loss_iterator,
1122 &id);
1123 return 0;
1124}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index a5eb8c82f16a..e08683b20531 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -81,12 +81,12 @@
81static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 81static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
82 { 82 {
83 .max = 1, 83 .max = 1,
84 .types = BIT(NL80211_IFTYPE_STATION) | 84 .types = BIT(NL80211_IFTYPE_STATION),
85 BIT(NL80211_IFTYPE_AP),
86 }, 85 },
87 { 86 {
88 .max = 1, 87 .max = 1,
89 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | 88 .types = BIT(NL80211_IFTYPE_AP) |
89 BIT(NL80211_IFTYPE_P2P_CLIENT) |
90 BIT(NL80211_IFTYPE_P2P_GO), 90 BIT(NL80211_IFTYPE_P2P_GO),
91 }, 91 },
92 { 92 {
@@ -127,6 +127,17 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
127}; 127};
128#endif 128#endif
129 129
130static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
131{
132 int i;
133
134 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
135 for (i = 0; i < NUM_PHY_CTX; i++) {
136 mvm->phy_ctxts[i].id = i;
137 mvm->phy_ctxts[i].ref = 0;
138 }
139}
140
130int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 141int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
131{ 142{
132 struct ieee80211_hw *hw = mvm->hw; 143 struct ieee80211_hw *hw = mvm->hw;
@@ -141,7 +152,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
141 IEEE80211_HW_SUPPORTS_PS | 152 IEEE80211_HW_SUPPORTS_PS |
142 IEEE80211_HW_SUPPORTS_DYNAMIC_PS | 153 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
143 IEEE80211_HW_AMPDU_AGGREGATION | 154 IEEE80211_HW_AMPDU_AGGREGATION |
144 IEEE80211_HW_TIMING_BEACON_ONLY; 155 IEEE80211_HW_TIMING_BEACON_ONLY |
156 IEEE80211_HW_CONNECTION_MONITOR;
145 157
146 hw->queues = IWL_MVM_FIRST_AGG_QUEUE; 158 hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
147 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 159 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -158,7 +170,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
158 170
159 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 171 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
160 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 172 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
161 hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt); 173 hw->chanctx_data_size = sizeof(u16);
162 174
163 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 175 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
164 BIT(NL80211_IFTYPE_P2P_CLIENT) | 176 BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -193,6 +205,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
193 hw->wiphy->n_addresses++; 205 hw->wiphy->n_addresses++;
194 } 206 }
195 207
208 iwl_mvm_reset_phy_ctxts(mvm);
209
196 /* we create the 802.11 header and a max-length SSID element */ 210 /* we create the 802.11 header and a max-length SSID element */
197 hw->wiphy->max_scan_ie_len = 211 hw->wiphy->max_scan_ie_len =
198 mvm->fw->ucode_capa.max_probe_length - 24 - 34; 212 mvm->fw->ucode_capa.max_probe_length - 24 - 34;
@@ -222,20 +236,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
222 mvm->trans->ops->d3_suspend && 236 mvm->trans->ops->d3_suspend &&
223 mvm->trans->ops->d3_resume && 237 mvm->trans->ops->d3_resume &&
224 device_can_wakeup(mvm->trans->dev)) { 238 device_can_wakeup(mvm->trans->dev)) {
225 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 239 mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
226 WIPHY_WOWLAN_DISCONNECT | 240 WIPHY_WOWLAN_DISCONNECT |
227 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 241 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
228 WIPHY_WOWLAN_RFKILL_RELEASE; 242 WIPHY_WOWLAN_RFKILL_RELEASE;
229 if (!iwlwifi_mod_params.sw_crypto) 243 if (!iwlwifi_mod_params.sw_crypto)
230 hw->wiphy->wowlan.flags |= 244 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
231 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 245 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
232 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 246 WIPHY_WOWLAN_4WAY_HANDSHAKE;
233 WIPHY_WOWLAN_4WAY_HANDSHAKE; 247
234 248 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
235 hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 249 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
236 hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 250 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
237 hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 251 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
238 hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; 252 hw->wiphy->wowlan = &mvm->wowlan;
239 } 253 }
240#endif 254#endif
241 255
@@ -252,8 +266,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
252{ 266{
253 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 267 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
254 268
255 if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) { 269 if (iwl_mvm_is_radio_killed(mvm)) {
256 IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n"); 270 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
257 goto drop; 271 goto drop;
258 } 272 }
259 273
@@ -345,8 +359,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
345 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 359 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
346 spin_unlock_bh(&mvm->time_event_lock); 360 spin_unlock_bh(&mvm->time_event_lock);
347 361
348 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 362 mvmvif->phy_ctxt = NULL;
349 mvmvif->phy_ctxt = NULL;
350} 363}
351 364
352static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 365static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
@@ -363,6 +376,9 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
363 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, 376 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
364 iwl_mvm_cleanup_iterator, mvm); 377 iwl_mvm_cleanup_iterator, mvm);
365 378
379 mvm->p2p_device_vif = NULL;
380
381 iwl_mvm_reset_phy_ctxts(mvm);
366 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 382 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
367 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); 383 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
368 384
@@ -456,6 +472,20 @@ static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
456 iwl_mvm_power_update_mode(mvm, vif); 472 iwl_mvm_power_update_mode(mvm, vif);
457} 473}
458 474
475static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
476{
477 u16 i;
478
479 lockdep_assert_held(&mvm->mutex);
480
481 for (i = 0; i < NUM_PHY_CTX; i++)
482 if (!mvm->phy_ctxts[i].ref)
483 return &mvm->phy_ctxts[i];
484
485 IWL_ERR(mvm, "No available PHY context\n");
486 return NULL;
487}
488
459static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 489static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
460 struct ieee80211_vif *vif) 490 struct ieee80211_vif *vif)
461{ 491{
@@ -530,32 +560,34 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
530 */ 560 */
531 iwl_mvm_power_update_mode(mvm, vif); 561 iwl_mvm_power_update_mode(mvm, vif);
532 562
563 /* beacon filtering */
564 if (!mvm->bf_allowed_vif &&
565 vif->type == NL80211_IFTYPE_STATION && !vif->p2p){
566 mvm->bf_allowed_vif = mvmvif;
567 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
568 }
569
570 ret = iwl_mvm_disable_beacon_filter(mvm, vif);
571 if (ret)
572 goto out_release;
573
533 /* 574 /*
534 * P2P_DEVICE interface does not have a channel context assigned to it, 575 * P2P_DEVICE interface does not have a channel context assigned to it,
535 * so a dedicated PHY context is allocated to it and the corresponding 576 * so a dedicated PHY context is allocated to it and the corresponding
536 * MAC context is bound to it at this stage. 577 * MAC context is bound to it at this stage.
537 */ 578 */
538 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 579 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
539 struct ieee80211_channel *chan;
540 struct cfg80211_chan_def chandef;
541 580
542 mvmvif->phy_ctxt = &mvm->phy_ctxt_roc; 581 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
543 582 if (!mvmvif->phy_ctxt) {
544 /* 583 ret = -ENOSPC;
545 * The channel used here isn't relevant as it's
546 * going to be overwritten as part of the ROC flow.
547 * For now use the first channel we have.
548 */
549 chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
550 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
551 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt,
552 &chandef, 1, 1);
553 if (ret)
554 goto out_remove_mac; 584 goto out_remove_mac;
585 }
555 586
587 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
556 ret = iwl_mvm_binding_add_vif(mvm, vif); 588 ret = iwl_mvm_binding_add_vif(mvm, vif);
557 if (ret) 589 if (ret)
558 goto out_remove_phy; 590 goto out_unref_phy;
559 591
560 ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta); 592 ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
561 if (ret) 593 if (ret)
@@ -571,27 +603,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
571 603
572 out_unbind: 604 out_unbind:
573 iwl_mvm_binding_remove_vif(mvm, vif); 605 iwl_mvm_binding_remove_vif(mvm, vif);
574 out_remove_phy: 606 out_unref_phy:
575 iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt); 607 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
576 out_remove_mac: 608 out_remove_mac:
577 mvmvif->phy_ctxt = NULL; 609 mvmvif->phy_ctxt = NULL;
578 iwl_mvm_mac_ctxt_remove(mvm, vif); 610 iwl_mvm_mac_ctxt_remove(mvm, vif);
579 out_release: 611 out_release:
580 /*
581 * TODO: remove this temporary code.
582 * Currently MVM FW supports power management only on single MAC.
583 * Check if only one additional interface remains after releasing
584 * current one. Update power mode on the remaining interface.
585 */
586 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 612 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
587 mvm->vif_count--; 613 mvm->vif_count--;
588 IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n", 614 ieee80211_iterate_active_interfaces(
589 mvm->vif_count); 615 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
590 if (mvm->vif_count == 1) { 616 iwl_mvm_power_update_iterator, mvm);
591 ieee80211_iterate_active_interfaces(
592 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
593 iwl_mvm_power_update_iterator, mvm);
594 }
595 iwl_mvm_mac_ctxt_release(mvm, vif); 617 iwl_mvm_mac_ctxt_release(mvm, vif);
596 out_unlock: 618 out_unlock:
597 mutex_unlock(&mvm->mutex); 619 mutex_unlock(&mvm->mutex);
@@ -629,8 +651,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
629 * By now, all the AC queues are empty. The AGG queues are 651 * By now, all the AC queues are empty. The AGG queues are
630 * empty too. We already got all the Tx responses for all the 652 * empty too. We already got all the Tx responses for all the
631 * packets in the queues. The drain work can have been 653 * packets in the queues. The drain work can have been
632 * triggered. Flush it. This work item takes the mutex, so kill 654 * triggered. Flush it.
633 * it before we take it.
634 */ 655 */
635 flush_work(&mvm->sta_drained_wk); 656 flush_work(&mvm->sta_drained_wk);
636 } 657 }
@@ -646,6 +667,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
646 667
647 mutex_lock(&mvm->mutex); 668 mutex_lock(&mvm->mutex);
648 669
670 if (mvm->bf_allowed_vif == mvmvif) {
671 mvm->bf_allowed_vif = NULL;
672 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
673 }
674
649 iwl_mvm_vif_dbgfs_clean(mvm, vif); 675 iwl_mvm_vif_dbgfs_clean(mvm, vif);
650 676
651 /* 677 /*
@@ -661,7 +687,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
661 mvm->p2p_device_vif = NULL; 687 mvm->p2p_device_vif = NULL;
662 iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta); 688 iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
663 iwl_mvm_binding_remove_vif(mvm, vif); 689 iwl_mvm_binding_remove_vif(mvm, vif);
664 iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt); 690 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
665 mvmvif->phy_ctxt = NULL; 691 mvmvif->phy_ctxt = NULL;
666 } 692 }
667 693
@@ -748,7 +774,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
748 if (ret) 774 if (ret)
749 IWL_ERR(mvm, "failed to update quotas\n"); 775 IWL_ERR(mvm, "failed to update quotas\n");
750 } 776 }
751 } else if (changes & BSS_CHANGED_DTIM_PERIOD) { 777 ret = iwl_mvm_power_update_mode(mvm, vif);
778 if (ret)
779 IWL_ERR(mvm, "failed to update power mode\n");
780 } else if (changes & BSS_CHANGED_BEACON_INFO) {
752 /* 781 /*
753 * We received a beacon _after_ association so 782 * We received a beacon _after_ association so
754 * remove the session protection. 783 * remove the session protection.
@@ -756,19 +785,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
756 iwl_mvm_remove_time_event(mvm, mvmvif, 785 iwl_mvm_remove_time_event(mvm, mvmvif,
757 &mvmvif->time_event_data); 786 &mvmvif->time_event_data);
758 } else if (changes & BSS_CHANGED_PS) { 787 } else if (changes & BSS_CHANGED_PS) {
759 /* 788 ret = iwl_mvm_power_update_mode(mvm, vif);
760 * TODO: remove this temporary code. 789 if (ret)
761 * Currently MVM FW supports power management only on single 790 IWL_ERR(mvm, "failed to update power mode\n");
762 * MAC. Avoid power mode update if more than one interface
763 * is active.
764 */
765 IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
766 mvm->vif_count);
767 if (mvm->vif_count == 1) {
768 ret = iwl_mvm_power_update_mode(mvm, vif);
769 if (ret)
770 IWL_ERR(mvm, "failed to update power mode\n");
771 }
772 } 791 }
773} 792}
774 793
@@ -999,9 +1018,13 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
999 mvmvif->phy_ctxt->channel->band); 1018 mvmvif->phy_ctxt->channel->band);
1000 } else if (old_state == IEEE80211_STA_ASSOC && 1019 } else if (old_state == IEEE80211_STA_ASSOC &&
1001 new_state == IEEE80211_STA_AUTHORIZED) { 1020 new_state == IEEE80211_STA_AUTHORIZED) {
1021 /* enable beacon filtering */
1022 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
1002 ret = 0; 1023 ret = 0;
1003 } else if (old_state == IEEE80211_STA_AUTHORIZED && 1024 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
1004 new_state == IEEE80211_STA_ASSOC) { 1025 new_state == IEEE80211_STA_ASSOC) {
1026 /* disable beacon filtering */
1027 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
1005 ret = 0; 1028 ret = 0;
1006 } else if (old_state == IEEE80211_STA_ASSOC && 1029 } else if (old_state == IEEE80211_STA_ASSOC &&
1007 new_state == IEEE80211_STA_AUTH) { 1030 new_state == IEEE80211_STA_AUTH) {
@@ -1167,29 +1190,107 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
1167 enum ieee80211_roc_type type) 1190 enum ieee80211_roc_type type)
1168{ 1191{
1169 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1192 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1193 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1170 struct cfg80211_chan_def chandef; 1194 struct cfg80211_chan_def chandef;
1171 int ret; 1195 struct iwl_mvm_phy_ctxt *phy_ctxt;
1196 int ret, i;
1197
1198 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
1199 duration, type);
1172 1200
1173 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { 1201 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
1174 IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type); 1202 IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type);
1175 return -EINVAL; 1203 return -EINVAL;
1176 } 1204 }
1177 1205
1178 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
1179 duration, type);
1180
1181 mutex_lock(&mvm->mutex); 1206 mutex_lock(&mvm->mutex);
1182 1207
1208 for (i = 0; i < NUM_PHY_CTX; i++) {
1209 phy_ctxt = &mvm->phy_ctxts[i];
1210 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
1211 continue;
1212
1213 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
1214 /*
1215 * Unbind the P2P_DEVICE from the current PHY context,
1216 * and if the PHY context is not used remove it.
1217 */
1218 ret = iwl_mvm_binding_remove_vif(mvm, vif);
1219 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
1220 goto out_unlock;
1221
1222 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1223
1224 /* Bind the P2P_DEVICE to the current PHY Context */
1225 mvmvif->phy_ctxt = phy_ctxt;
1226
1227 ret = iwl_mvm_binding_add_vif(mvm, vif);
1228 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
1229 goto out_unlock;
1230
1231 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1232 goto schedule_time_event;
1233 }
1234 }
1235
1236 /* Need to update the PHY context only if the ROC channel changed */
1237 if (channel == mvmvif->phy_ctxt->channel)
1238 goto schedule_time_event;
1239
1183 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); 1240 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
1184 ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc,
1185 &chandef, 1, 1);
1186 1241
1242 /*
1243 * Change the PHY context configuration as it is currently referenced
1244 * only by the P2P Device MAC
1245 */
1246 if (mvmvif->phy_ctxt->ref == 1) {
1247 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
1248 &chandef, 1, 1);
1249 if (ret)
1250 goto out_unlock;
1251 } else {
1252 /*
1253 * The PHY context is shared with other MACs. Need to remove the
1254 * P2P Device from the binding, allocate an new PHY context and
1255 * create a new binding
1256 */
1257 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1258 if (!phy_ctxt) {
1259 ret = -ENOSPC;
1260 goto out_unlock;
1261 }
1262
1263 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
1264 1, 1);
1265 if (ret) {
1266 IWL_ERR(mvm, "Failed to change PHY context\n");
1267 goto out_unlock;
1268 }
1269
1270 /* Unbind the P2P_DEVICE from the current PHY context */
1271 ret = iwl_mvm_binding_remove_vif(mvm, vif);
1272 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
1273 goto out_unlock;
1274
1275 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1276
1277 /* Bind the P2P_DEVICE to the new allocated PHY context */
1278 mvmvif->phy_ctxt = phy_ctxt;
1279
1280 ret = iwl_mvm_binding_add_vif(mvm, vif);
1281 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
1282 goto out_unlock;
1283
1284 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1285 }
1286
1287schedule_time_event:
1187 /* Schedule the time events */ 1288 /* Schedule the time events */
1188 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); 1289 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
1189 1290
1291out_unlock:
1190 mutex_unlock(&mvm->mutex); 1292 mutex_unlock(&mvm->mutex);
1191 IWL_DEBUG_MAC80211(mvm, "leave\n"); 1293 IWL_DEBUG_MAC80211(mvm, "leave\n");
1192
1193 return ret; 1294 return ret;
1194} 1295}
1195 1296
@@ -1211,15 +1312,30 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
1211 struct ieee80211_chanctx_conf *ctx) 1312 struct ieee80211_chanctx_conf *ctx)
1212{ 1313{
1213 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1314 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1214 struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; 1315 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
1316 struct iwl_mvm_phy_ctxt *phy_ctxt;
1215 int ret; 1317 int ret;
1216 1318
1319 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
1320
1217 mutex_lock(&mvm->mutex); 1321 mutex_lock(&mvm->mutex);
1322 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1323 if (!phy_ctxt) {
1324 ret = -ENOSPC;
1325 goto out;
1326 }
1218 1327
1219 IWL_DEBUG_MAC80211(mvm, "Add PHY context\n"); 1328 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
1220 ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def, 1329 ctx->rx_chains_static,
1221 ctx->rx_chains_static, 1330 ctx->rx_chains_dynamic);
1222 ctx->rx_chains_dynamic); 1331 if (ret) {
1332 IWL_ERR(mvm, "Failed to add PHY context\n");
1333 goto out;
1334 }
1335
1336 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
1337 *phy_ctxt_id = phy_ctxt->id;
1338out:
1223 mutex_unlock(&mvm->mutex); 1339 mutex_unlock(&mvm->mutex);
1224 return ret; 1340 return ret;
1225} 1341}
@@ -1228,10 +1344,11 @@ static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
1228 struct ieee80211_chanctx_conf *ctx) 1344 struct ieee80211_chanctx_conf *ctx)
1229{ 1345{
1230 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1346 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1231 struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; 1347 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
1348 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
1232 1349
1233 mutex_lock(&mvm->mutex); 1350 mutex_lock(&mvm->mutex);
1234 iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt); 1351 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
1235 mutex_unlock(&mvm->mutex); 1352 mutex_unlock(&mvm->mutex);
1236} 1353}
1237 1354
@@ -1240,7 +1357,16 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
1240 u32 changed) 1357 u32 changed)
1241{ 1358{
1242 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1359 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1243 struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; 1360 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
1361 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
1362
1363 if (WARN_ONCE((phy_ctxt->ref > 1) &&
1364 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
1365 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
1366 IEEE80211_CHANCTX_CHANGE_RADAR)),
1367 "Cannot change PHY. Ref=%d, changed=0x%X\n",
1368 phy_ctxt->ref, changed))
1369 return;
1244 1370
1245 mutex_lock(&mvm->mutex); 1371 mutex_lock(&mvm->mutex);
1246 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def, 1372 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
@@ -1254,13 +1380,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
1254 struct ieee80211_chanctx_conf *ctx) 1380 struct ieee80211_chanctx_conf *ctx)
1255{ 1381{
1256 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1382 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1257 struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv; 1383 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
1384 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
1258 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1385 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1259 int ret; 1386 int ret;
1260 1387
1261 mutex_lock(&mvm->mutex); 1388 mutex_lock(&mvm->mutex);
1262 1389
1263 mvmvif->phy_ctxt = phyctx; 1390 mvmvif->phy_ctxt = phy_ctxt;
1264 1391
1265 switch (vif->type) { 1392 switch (vif->type) {
1266 case NL80211_IFTYPE_AP: 1393 case NL80211_IFTYPE_AP:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 9f46b23801bc..d40d7db185d6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -73,7 +73,6 @@
73#include "iwl-trans.h" 73#include "iwl-trans.h"
74#include "iwl-notif-wait.h" 74#include "iwl-notif-wait.h"
75#include "iwl-eeprom-parse.h" 75#include "iwl-eeprom-parse.h"
76#include "iwl-test.h"
77#include "iwl-trans.h" 76#include "iwl-trans.h"
78#include "sta.h" 77#include "sta.h"
79#include "fw-api.h" 78#include "fw-api.h"
@@ -88,6 +87,7 @@ enum iwl_mvm_tx_fifo {
88 IWL_MVM_TX_FIFO_BE, 87 IWL_MVM_TX_FIFO_BE,
89 IWL_MVM_TX_FIFO_VI, 88 IWL_MVM_TX_FIFO_VI,
90 IWL_MVM_TX_FIFO_VO, 89 IWL_MVM_TX_FIFO_VO,
90 IWL_MVM_TX_FIFO_MCAST = 5,
91}; 91};
92 92
93extern struct ieee80211_ops iwl_mvm_hw_ops; 93extern struct ieee80211_ops iwl_mvm_hw_ops;
@@ -109,6 +109,7 @@ extern struct iwl_mvm_mod_params iwlmvm_mod_params;
109struct iwl_mvm_phy_ctxt { 109struct iwl_mvm_phy_ctxt {
110 u16 id; 110 u16 id;
111 u16 color; 111 u16 color;
112 u32 ref;
112 113
113 /* 114 /*
114 * TODO: This should probably be removed. Currently here only for rate 115 * TODO: This should probably be removed. Currently here only for rate
@@ -149,6 +150,64 @@ enum iwl_power_scheme {
149 150
150#define IWL_CONN_MAX_LISTEN_INTERVAL 70 151#define IWL_CONN_MAX_LISTEN_INTERVAL 70
151 152
153#ifdef CONFIG_IWLWIFI_DEBUGFS
154enum iwl_dbgfs_pm_mask {
155 MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
156 MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
157 MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
158 MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
159 MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
160 MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
161 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
162 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
163};
164
165struct iwl_dbgfs_pm {
166 u8 keep_alive_seconds;
167 u32 rx_data_timeout;
168 u32 tx_data_timeout;
169 bool skip_over_dtim;
170 u8 skip_dtim_periods;
171 bool disable_power_off;
172 bool lprx_ena;
173 u32 lprx_rssi_threshold;
174 int mask;
175};
176
177/* beacon filtering */
178
179enum iwl_dbgfs_bf_mask {
180 MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
181 MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
182 MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
183 MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3),
184 MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4),
185 MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5),
186 MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6),
187 MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7),
188 MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8),
189};
190
191struct iwl_dbgfs_bf {
192 u8 bf_energy_delta;
193 u8 bf_roaming_energy_delta;
194 u8 bf_roaming_state;
195 u8 bf_temperature_delta;
196 u8 bf_enable_beacon_filter;
197 u8 bf_debug_flag;
198 u32 bf_escape_timer;
199 u32 ba_escape_timer;
200 u8 ba_enable_beacon_abort;
201 int mask;
202};
203#endif
204
205enum iwl_mvm_smps_type_request {
206 IWL_MVM_SMPS_REQ_BT_COEX,
207 IWL_MVM_SMPS_REQ_TT,
208 NUM_IWL_MVM_SMPS_REQ,
209};
210
152/** 211/**
153 * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context 212 * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
154 * @id: between 0 and 3 213 * @id: between 0 and 3
@@ -163,6 +222,8 @@ enum iwl_power_scheme {
163 * @bcast_sta: station used for broadcast packets. Used by the following 222 * @bcast_sta: station used for broadcast packets. Used by the following
164 * vifs: P2P_DEVICE, GO and AP. 223 * vifs: P2P_DEVICE, GO and AP.
165 * @beacon_skb: the skb used to hold the AP/GO beacon template 224 * @beacon_skb: the skb used to hold the AP/GO beacon template
225 * @smps_requests: the requests of of differents parts of the driver, regard
226 the desired smps mode.
166 */ 227 */
167struct iwl_mvm_vif { 228struct iwl_mvm_vif {
168 u16 id; 229 u16 id;
@@ -172,6 +233,8 @@ struct iwl_mvm_vif {
172 bool uploaded; 233 bool uploaded;
173 bool ap_active; 234 bool ap_active;
174 bool monitor_active; 235 bool monitor_active;
236 /* indicate whether beacon filtering is enabled */
237 bool bf_enabled;
175 238
176 u32 ap_beacon_time; 239 u32 ap_beacon_time;
177 240
@@ -214,7 +277,11 @@ struct iwl_mvm_vif {
214 struct dentry *dbgfs_dir; 277 struct dentry *dbgfs_dir;
215 struct dentry *dbgfs_slink; 278 struct dentry *dbgfs_slink;
216 void *dbgfs_data; 279 void *dbgfs_data;
280 struct iwl_dbgfs_pm dbgfs_pm;
281 struct iwl_dbgfs_bf dbgfs_bf;
217#endif 282#endif
283
284 enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
218}; 285};
219 286
220static inline struct iwl_mvm_vif * 287static inline struct iwl_mvm_vif *
@@ -223,12 +290,6 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
223 return (void *)vif->drv_priv; 290 return (void *)vif->drv_priv;
224} 291}
225 292
226enum iwl_mvm_status {
227 IWL_MVM_STATUS_HW_RFKILL,
228 IWL_MVM_STATUS_ROC_RUNNING,
229 IWL_MVM_STATUS_IN_HW_RESTART,
230};
231
232enum iwl_scan_status { 293enum iwl_scan_status {
233 IWL_MVM_SCAN_NONE, 294 IWL_MVM_SCAN_NONE,
234 IWL_MVM_SCAN_OS, 295 IWL_MVM_SCAN_OS,
@@ -246,6 +307,65 @@ struct iwl_nvm_section {
246 const u8 *data; 307 const u8 *data;
247}; 308};
248 309
310/*
311 * Tx-backoff threshold
312 * @temperature: The threshold in Celsius
313 * @backoff: The tx-backoff in uSec
314 */
315struct iwl_tt_tx_backoff {
316 s32 temperature;
317 u32 backoff;
318};
319
320#define TT_TX_BACKOFF_SIZE 6
321
322/**
323 * struct iwl_tt_params - thermal throttling parameters
324 * @ct_kill_entry: CT Kill entry threshold
325 * @ct_kill_exit: CT Kill exit threshold
326 * @ct_kill_duration: The time intervals (in uSec) in which the driver needs
327 * to checks whether to exit CT Kill.
328 * @dynamic_smps_entry: Dynamic SMPS entry threshold
329 * @dynamic_smps_exit: Dynamic SMPS exit threshold
330 * @tx_protection_entry: TX protection entry threshold
331 * @tx_protection_exit: TX protection exit threshold
332 * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
333 * @support_ct_kill: Support CT Kill?
334 * @support_dynamic_smps: Support dynamic SMPS?
335 * @support_tx_protection: Support tx protection?
336 * @support_tx_backoff: Support tx-backoff?
337 */
338struct iwl_tt_params {
339 s32 ct_kill_entry;
340 s32 ct_kill_exit;
341 u32 ct_kill_duration;
342 s32 dynamic_smps_entry;
343 s32 dynamic_smps_exit;
344 s32 tx_protection_entry;
345 s32 tx_protection_exit;
346 struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
347 bool support_ct_kill;
348 bool support_dynamic_smps;
349 bool support_tx_protection;
350 bool support_tx_backoff;
351};
352
353/**
354 * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
355 * @ct_kill_exit: worker to exit thermal kill
356 * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
357 * @tx_backoff: The current thremal throttling tx backoff in uSec.
358 * @params: Parameters to configure the thermal throttling algorithm.
359 * @throttle: Is thermal throttling is active?
360 */
361struct iwl_mvm_tt_mgmt {
362 struct delayed_work ct_kill_exit;
363 bool dynamic_smps;
364 u32 tx_backoff;
365 const struct iwl_tt_params *params;
366 bool throttle;
367};
368
249struct iwl_mvm { 369struct iwl_mvm {
250 /* for logger access */ 370 /* for logger access */
251 struct device *dev; 371 struct device *dev;
@@ -266,6 +386,12 @@ struct iwl_mvm {
266 386
267 unsigned long status; 387 unsigned long status;
268 388
389 /*
390 * for beacon filtering -
391 * currently only one interface can be supported
392 */
393 struct iwl_mvm_vif *bf_allowed_vif;
394
269 enum iwl_ucode_type cur_ucode; 395 enum iwl_ucode_type cur_ucode;
270 bool ucode_loaded; 396 bool ucode_loaded;
271 bool init_ucode_run; 397 bool init_ucode_run;
@@ -313,7 +439,7 @@ struct iwl_mvm {
313 bool prevent_power_down_d3; 439 bool prevent_power_down_d3;
314#endif 440#endif
315 441
316 struct iwl_mvm_phy_ctxt phy_ctxt_roc; 442 struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
317 443
318 struct list_head time_event_list; 444 struct list_head time_event_list;
319 spinlock_t time_event_lock; 445 spinlock_t time_event_lock;
@@ -337,12 +463,24 @@ struct iwl_mvm {
337 struct ieee80211_vif *p2p_device_vif; 463 struct ieee80211_vif *p2p_device_vif;
338 464
339#ifdef CONFIG_PM_SLEEP 465#ifdef CONFIG_PM_SLEEP
466 struct wiphy_wowlan_support wowlan;
340 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; 467 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
468#ifdef CONFIG_IWLWIFI_DEBUGFS
469 u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
470 bool d3_test_active;
471 bool store_d3_resume_sram;
472 void *d3_resume_sram;
473 u32 d3_test_pme_ptr;
474#endif
341#endif 475#endif
342 476
343 /* BT-Coex */ 477 /* BT-Coex */
344 u8 bt_kill_msk; 478 u8 bt_kill_msk;
345 struct iwl_bt_coex_profile_notif last_bt_notif; 479 struct iwl_bt_coex_profile_notif last_bt_notif;
480
481 /* Thermal Throttling and CTkill */
482 struct iwl_mvm_tt_mgmt thermal_throttle;
483 s32 temperature; /* Celsius */
346}; 484};
347 485
348/* Extract MVM priv from op_mode and _hw */ 486/* Extract MVM priv from op_mode and _hw */
@@ -352,6 +490,19 @@ struct iwl_mvm {
352#define IWL_MAC80211_GET_MVM(_hw) \ 490#define IWL_MAC80211_GET_MVM(_hw) \
353 IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) 491 IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
354 492
493enum iwl_mvm_status {
494 IWL_MVM_STATUS_HW_RFKILL,
495 IWL_MVM_STATUS_HW_CTKILL,
496 IWL_MVM_STATUS_ROC_RUNNING,
497 IWL_MVM_STATUS_IN_HW_RESTART,
498};
499
500static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
501{
502 return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
503 test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
504}
505
355extern const u8 iwl_mvm_ac_to_tx_fifo[]; 506extern const u8 iwl_mvm_ac_to_tx_fifo[];
356 507
357struct iwl_rate_info { 508struct iwl_rate_info {
@@ -443,8 +594,10 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
443int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, 594int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
444 struct cfg80211_chan_def *chandef, 595 struct cfg80211_chan_def *chandef,
445 u8 chains_static, u8 chains_dynamic); 596 u8 chains_static, u8 chains_dynamic);
446void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, 597void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
447 struct iwl_mvm_phy_ctxt *ctxt); 598 struct iwl_mvm_phy_ctxt *ctxt);
599void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
600 struct iwl_mvm_phy_ctxt *ctxt);
448 601
449/* MAC (virtual interface) programming */ 602/* MAC (virtual interface) programming */
450int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 603int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -459,6 +612,9 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
459int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, 612int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
460 struct iwl_rx_cmd_buffer *rxb, 613 struct iwl_rx_cmd_buffer *rxb,
461 struct iwl_device_cmd *cmd); 614 struct iwl_device_cmd *cmd);
615int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
616 struct iwl_rx_cmd_buffer *rxb,
617 struct iwl_device_cmd *cmd);
462 618
463/* Bindings */ 619/* Bindings */
464int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 620int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -523,6 +679,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
523 struct inet6_dev *idev); 679 struct inet6_dev *idev);
524void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 680void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
525 struct ieee80211_vif *vif, int idx); 681 struct ieee80211_vif *vif, int idx);
682extern const struct file_operations iwl_dbgfs_d3_test_ops;
526 683
527/* BT Coex */ 684/* BT Coex */
528int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm); 685int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -534,4 +691,31 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
534 enum ieee80211_rssi_event rssi_event); 691 enum ieee80211_rssi_event rssi_event);
535void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 692void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
536 693
694/* beacon filtering */
695#ifdef CONFIG_IWLWIFI_DEBUGFS
696void
697iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
698 struct iwl_beacon_filter_cmd *cmd);
699#else
700static inline void
701iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
702 struct iwl_beacon_filter_cmd *cmd)
703{}
704#endif
705int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
706 struct ieee80211_vif *vif);
707int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
708 struct ieee80211_vif *vif);
709
710/* SMPS */
711void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
712 enum iwl_mvm_smps_type_request req_type,
713 enum ieee80211_smps_mode smps_request);
714
715/* Thermal management and CT-kill */
716void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
717void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
718void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
719void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
720
537#endif /* __IWL_MVM_H__ */ 721#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index b8ec02f89acc..edb94ea31654 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -60,6 +60,7 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#include <linux/firmware.h>
63#include "iwl-trans.h" 64#include "iwl-trans.h"
64#include "mvm.h" 65#include "mvm.h"
65#include "iwl-eeprom-parse.h" 66#include "iwl-eeprom-parse.h"
@@ -75,31 +76,56 @@ static const int nvm_to_read[] = {
75}; 76};
76 77
77/* Default NVM size to read */ 78/* Default NVM size to read */
78#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024); 79#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
80#define IWL_MAX_NVM_SECTION_SIZE 6000
79 81
80static inline void iwl_nvm_fill_read(struct iwl_nvm_access_cmd *cmd, 82#define NVM_WRITE_OPCODE 1
81 u16 offset, u16 length, u16 section) 83#define NVM_READ_OPCODE 0
84
85/*
86 * prepare the NVM host command w/ the pointers to the nvm buffer
87 * and send it to fw
88 */
89static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
90 u16 offset, u16 length, const u8 *data)
82{ 91{
83 cmd->offset = cpu_to_le16(offset); 92 struct iwl_nvm_access_cmd nvm_access_cmd = {
84 cmd->length = cpu_to_le16(length); 93 .offset = cpu_to_le16(offset),
85 cmd->type = cpu_to_le16(section); 94 .length = cpu_to_le16(length),
95 .type = cpu_to_le16(section),
96 .op_code = NVM_WRITE_OPCODE,
97 };
98 struct iwl_host_cmd cmd = {
99 .id = NVM_ACCESS_CMD,
100 .len = { sizeof(struct iwl_nvm_access_cmd), length },
101 .flags = CMD_SYNC | CMD_SEND_IN_RFKILL,
102 .data = { &nvm_access_cmd, data },
103 /* data may come from vmalloc, so use _DUP */
104 .dataflags = { 0, IWL_HCMD_DFL_DUP },
105 };
106
107 return iwl_mvm_send_cmd(mvm, &cmd);
86} 108}
87 109
88static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, 110static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
89 u16 offset, u16 length, u8 *data) 111 u16 offset, u16 length, u8 *data)
90{ 112{
91 struct iwl_nvm_access_cmd nvm_access_cmd = {}; 113 struct iwl_nvm_access_cmd nvm_access_cmd = {
114 .offset = cpu_to_le16(offset),
115 .length = cpu_to_le16(length),
116 .type = cpu_to_le16(section),
117 .op_code = NVM_READ_OPCODE,
118 };
92 struct iwl_nvm_access_resp *nvm_resp; 119 struct iwl_nvm_access_resp *nvm_resp;
93 struct iwl_rx_packet *pkt; 120 struct iwl_rx_packet *pkt;
94 struct iwl_host_cmd cmd = { 121 struct iwl_host_cmd cmd = {
95 .id = NVM_ACCESS_CMD, 122 .id = NVM_ACCESS_CMD,
96 .flags = CMD_SYNC | CMD_WANT_SKB, 123 .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
97 .data = { &nvm_access_cmd, }, 124 .data = { &nvm_access_cmd, },
98 }; 125 };
99 int ret, bytes_read, offset_read; 126 int ret, bytes_read, offset_read;
100 u8 *resp_data; 127 u8 *resp_data;
101 128
102 iwl_nvm_fill_read(&nvm_access_cmd, offset, length, section);
103 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd); 129 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
104 130
105 ret = iwl_mvm_send_cmd(mvm, &cmd); 131 ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -144,6 +170,30 @@ exit:
144 return ret; 170 return ret;
145} 171}
146 172
173static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
174 const u8 *data, u16 length)
175{
176 int offset = 0;
177
178 /* copy data in chunks of 2k (and remainder if any) */
179
180 while (offset < length) {
181 int chunk_size, ret;
182
183 chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
184 length - offset);
185
186 ret = iwl_nvm_write_chunk(mvm, section, offset,
187 chunk_size, data + offset);
188 if (ret < 0)
189 return ret;
190
191 offset += chunk_size;
192 }
193
194 return 0;
195}
196
147/* 197/*
148 * Reads an NVM section completely. 198 * Reads an NVM section completely.
149 * NICs prior to 7000 family doesn't have a real NVM, but just read 199 * NICs prior to 7000 family doesn't have a real NVM, but just read
@@ -177,7 +227,8 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
177 offset += ret; 227 offset += ret;
178 } 228 }
179 229
180 IWL_INFO(mvm, "NVM section %d read completed\n", section); 230 IWL_DEBUG_EEPROM(mvm->trans->dev,
231 "NVM section %d read completed\n", section);
181 return offset; 232 return offset;
182} 233}
183 234
@@ -200,7 +251,130 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
200 hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data; 251 hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
201 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; 252 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
202 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; 253 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
203 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib); 254 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
255 iwl_fw_valid_tx_ant(mvm->fw),
256 iwl_fw_valid_rx_ant(mvm->fw));
257}
258
259#define MAX_NVM_FILE_LEN 16384
260
261/*
262 * HOW TO CREATE THE NVM FILE FORMAT:
263 * ------------------------------
264 * 1. create hex file, format:
265 * 3800 -> header
266 * 0000 -> header
267 * 5a40 -> data
268 *
269 * rev - 6 bit (word1)
270 * len - 10 bit (word1)
271 * id - 4 bit (word2)
272 * rsv - 12 bit (word2)
273 *
274 * 2. flip 8bits with 8 bits per line to get the right NVM file format
275 *
276 * 3. create binary file from the hex file
277 *
278 * 4. save as "iNVM_xxx.bin" under /lib/firmware
279 */
280static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
281{
282 int ret, section_id, section_size;
283 const struct firmware *fw_entry;
284 const struct {
285 __le16 word1;
286 __le16 word2;
287 u8 data[];
288 } *file_sec;
289 const u8 *eof;
290
291#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
292#define NVM_WORD2_ID(x) (x >> 12)
293
294 /*
295 * Obtain NVM image via request_firmware. Since we already used
296 * request_firmware_nowait() for the firmware binary load and only
297 * get here after that we assume the NVM request can be satisfied
298 * synchronously.
299 */
300 ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file,
301 mvm->trans->dev);
302 if (ret) {
303 IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
304 iwlwifi_mod_params.nvm_file, ret);
305 return ret;
306 }
307
308 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
309 iwlwifi_mod_params.nvm_file, fw_entry->size);
310
311 if (fw_entry->size < sizeof(*file_sec)) {
312 IWL_ERR(mvm, "NVM file too small\n");
313 ret = -EINVAL;
314 goto out;
315 }
316
317 if (fw_entry->size > MAX_NVM_FILE_LEN) {
318 IWL_ERR(mvm, "NVM file too large\n");
319 ret = -EINVAL;
320 goto out;
321 }
322
323 eof = fw_entry->data + fw_entry->size;
324
325 file_sec = (void *)fw_entry->data;
326
327 while (true) {
328 if (file_sec->data > eof) {
329 IWL_ERR(mvm,
330 "ERROR - NVM file too short for section header\n");
331 ret = -EINVAL;
332 break;
333 }
334
335 /* check for EOF marker */
336 if (!file_sec->word1 && !file_sec->word2) {
337 ret = 0;
338 break;
339 }
340
341 section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
342 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
343
344 if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
345 IWL_ERR(mvm, "ERROR - section too large (%d)\n",
346 section_size);
347 ret = -EINVAL;
348 break;
349 }
350
351 if (!section_size) {
352 IWL_ERR(mvm, "ERROR - section empty\n");
353 ret = -EINVAL;
354 break;
355 }
356
357 if (file_sec->data + section_size > eof) {
358 IWL_ERR(mvm,
359 "ERROR - NVM file too short for section (%d bytes)\n",
360 section_size);
361 ret = -EINVAL;
362 break;
363 }
364
365 ret = iwl_nvm_write_section(mvm, section_id, file_sec->data,
366 section_size);
367 if (ret < 0) {
368 IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
369 break;
370 }
371
372 /* advance to the next section */
373 file_sec = (void *)(file_sec->data + section_size);
374 }
375out:
376 release_firmware(fw_entry);
377 return ret;
204} 378}
205 379
206int iwl_nvm_init(struct iwl_mvm *mvm) 380int iwl_nvm_init(struct iwl_mvm *mvm)
@@ -208,6 +382,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
208 int ret, i, section; 382 int ret, i, section;
209 u8 *nvm_buffer, *temp; 383 u8 *nvm_buffer, *temp;
210 384
385 /* load external NVM if configured */
386 if (iwlwifi_mod_params.nvm_file) {
387 /* move to External NVM flow */
388 ret = iwl_mvm_load_external_nvm(mvm);
389 if (ret)
390 return ret;
391 }
392
393 /* Read From FW NVM */
394 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
395
211 /* TODO: find correct NVM max size for a section */ 396 /* TODO: find correct NVM max size for a section */
212 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, 397 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
213 GFP_KERNEL); 398 GFP_KERNEL);
@@ -231,8 +416,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
231 if (ret < 0) 416 if (ret < 0)
232 return ret; 417 return ret;
233 418
234 ret = 0;
235 mvm->nvm_data = iwl_parse_nvm_sections(mvm); 419 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
420 if (!mvm->nvm_data)
421 return -ENODATA;
236 422
237 return ret; 423 return 0;
238} 424}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index b29c31a41594..af79a14063a9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -215,17 +215,22 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
215 RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false), 215 RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
216 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), 216 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
217 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), 217 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
218
219 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
220 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
221 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
222
218 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), 223 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
219 224
220 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), 225 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
221 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false), 226 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
222 227
223 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
224 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
225
226 RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), 228 RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
227 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), 229 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
228 230
231 RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
232 false),
233
229 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), 234 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
230}; 235};
231#undef RX_HANDLER 236#undef RX_HANDLER
@@ -288,11 +293,14 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
288 CMD(NET_DETECT_HOTSPOTS_CMD), 293 CMD(NET_DETECT_HOTSPOTS_CMD),
289 CMD(NET_DETECT_HOTSPOTS_QUERY_CMD), 294 CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
290 CMD(CARD_STATE_NOTIFICATION), 295 CMD(CARD_STATE_NOTIFICATION),
296 CMD(MISSED_BEACONS_NOTIFICATION),
291 CMD(BT_COEX_PRIO_TABLE), 297 CMD(BT_COEX_PRIO_TABLE),
292 CMD(BT_COEX_PROT_ENV), 298 CMD(BT_COEX_PROT_ENV),
293 CMD(BT_PROFILE_NOTIFICATION), 299 CMD(BT_PROFILE_NOTIFICATION),
294 CMD(BT_CONFIG), 300 CMD(BT_CONFIG),
295 CMD(MCAST_FILTER_CMD), 301 CMD(MCAST_FILTER_CMD),
302 CMD(REPLY_BEACON_FILTERING_CMD),
303 CMD(REPLY_THERMAL_MNG_BACKOFF),
296}; 304};
297#undef CMD 305#undef CMD
298 306
@@ -393,10 +401,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
393 if (err) 401 if (err)
394 goto out_free; 402 goto out_free;
395 403
404 iwl_mvm_tt_initialize(mvm);
405
396 mutex_lock(&mvm->mutex); 406 mutex_lock(&mvm->mutex);
397 err = iwl_run_init_mvm_ucode(mvm, true); 407 err = iwl_run_init_mvm_ucode(mvm, true);
398 mutex_unlock(&mvm->mutex); 408 mutex_unlock(&mvm->mutex);
399 if (err && !iwlmvm_mod_params.init_dbg) { 409 /* returns 0 if successful, 1 if success but in rfkill */
410 if (err < 0 && !iwlmvm_mod_params.init_dbg) {
400 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); 411 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
401 goto out_free; 412 goto out_free;
402 } 413 }
@@ -439,10 +450,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
439 450
440 iwl_mvm_leds_exit(mvm); 451 iwl_mvm_leds_exit(mvm);
441 452
453 iwl_mvm_tt_exit(mvm);
454
442 ieee80211_unregister_hw(mvm->hw); 455 ieee80211_unregister_hw(mvm->hw);
443 456
444 kfree(mvm->scan_cmd); 457 kfree(mvm->scan_cmd);
445 458
459#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
460 kfree(mvm->d3_resume_sram);
461#endif
462
446 iwl_trans_stop_hw(mvm->trans, true); 463 iwl_trans_stop_hw(mvm->trans, true);
447 464
448 iwl_phy_db_free(mvm->phy_db); 465 iwl_phy_db_free(mvm->phy_db);
@@ -589,6 +606,16 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
589 ieee80211_wake_queue(mvm->hw, mq); 606 ieee80211_wake_queue(mvm->hw, mq);
590} 607}
591 608
609void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
610{
611 if (state)
612 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
613 else
614 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
615
616 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
617}
618
592static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 619static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
593{ 620{
594 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 621 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -598,7 +625,7 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
598 else 625 else
599 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); 626 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
600 627
601 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); 628 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
602} 629}
603 630
604static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 631static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index a28a1d1f23eb..a8652ddd6bed 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -195,21 +195,6 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
195 return ret; 195 return ret;
196} 196}
197 197
198
199struct phy_ctx_used_data {
200 unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)];
201};
202
203static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw,
204 struct ieee80211_chanctx_conf *ctx,
205 void *_data)
206{
207 struct phy_ctx_used_data *data = _data;
208 struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
209
210 __set_bit(phy_ctxt->id, data->used);
211}
212
213/* 198/*
214 * Send a command to add a PHY context based on the current HW configuration. 199 * Send a command to add a PHY context based on the current HW configuration.
215 */ 200 */
@@ -217,34 +202,28 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
217 struct cfg80211_chan_def *chandef, 202 struct cfg80211_chan_def *chandef,
218 u8 chains_static, u8 chains_dynamic) 203 u8 chains_static, u8 chains_dynamic)
219{ 204{
220 struct phy_ctx_used_data data = { 205 int ret;
221 .used = { },
222 };
223
224 /*
225 * If this is a regular PHY context (not the ROC one)
226 * skip the ROC PHY context's ID.
227 */
228 if (ctxt != &mvm->phy_ctxt_roc)
229 __set_bit(mvm->phy_ctxt_roc.id, data.used);
230 206
207 WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
208 ctxt->ref);
231 lockdep_assert_held(&mvm->mutex); 209 lockdep_assert_held(&mvm->mutex);
232 ctxt->color++;
233 210
234 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 211 ctxt->channel = chandef->chan;
235 ieee80211_iter_chan_contexts_atomic( 212 ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
236 mvm->hw, iwl_mvm_phy_ctx_used_iter, &data); 213 chains_static, chains_dynamic,
214 FW_CTXT_ACTION_ADD, 0);
237 215
238 ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX); 216 return ret;
239 if (WARN_ONCE(ctxt->id == NUM_PHY_CTX, 217}
240 "Failed to init PHY context - no free ID!\n"))
241 return -EIO;
242 }
243 218
244 ctxt->channel = chandef->chan; 219/*
245 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, 220 * Update the number of references to the given PHY context. This is valid only
246 chains_static, chains_dynamic, 221 * in case the PHY context was already created, i.e., its reference count > 0.
247 FW_CTXT_ACTION_ADD, 0); 222 */
223void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
224{
225 lockdep_assert_held(&mvm->mutex);
226 ctxt->ref++;
248} 227}
249 228
250/* 229/*
@@ -264,23 +243,12 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
264 FW_CTXT_ACTION_MODIFY, 0); 243 FW_CTXT_ACTION_MODIFY, 0);
265} 244}
266 245
267/* 246void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
268 * Send a command to the FW to remove the given phy context.
269 * Once the command is sent, regardless of success or failure, the context is
270 * marked as invalid
271 */
272void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
273{ 247{
274 struct iwl_phy_context_cmd cmd;
275 int ret;
276
277 lockdep_assert_held(&mvm->mutex); 248 lockdep_assert_held(&mvm->mutex);
278 249
279 iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0); 250 if (WARN_ON_ONCE(!ctxt))
280 ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC, 251 return;
281 sizeof(struct iwl_phy_context_cmd), 252
282 &cmd); 253 ctxt->ref--;
283 if (ret)
284 IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n",
285 ctxt->id);
286} 254}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index ed77e437aac4..e7ca965a89b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -75,6 +75,54 @@
75 75
76#define POWER_KEEP_ALIVE_PERIOD_SEC 25 76#define POWER_KEEP_ALIVE_PERIOD_SEC 25
77 77
78static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
79 struct iwl_beacon_filter_cmd *cmd)
80{
81 int ret;
82
83 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
84 sizeof(struct iwl_beacon_filter_cmd), cmd);
85
86 if (!ret) {
87 IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
88 cmd->ba_enable_beacon_abort);
89 IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
90 cmd->ba_escape_timer);
91 IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
92 cmd->bf_debug_flag);
93 IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
94 cmd->bf_enable_beacon_filter);
95 IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
96 cmd->bf_energy_delta);
97 IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
98 cmd->bf_escape_timer);
99 IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
100 cmd->bf_roaming_energy_delta);
101 IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
102 cmd->bf_roaming_state);
103 IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n",
104 cmd->bf_temperature_delta);
105 }
106 return ret;
107}
108
109static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
110 struct ieee80211_vif *vif, bool enable)
111{
112 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
113 struct iwl_beacon_filter_cmd cmd = {
114 IWL_BF_CMD_CONFIG_DEFAULTS,
115 .bf_enable_beacon_filter = 1,
116 .ba_enable_beacon_abort = enable,
117 };
118
119 if (!mvmvif->bf_enabled)
120 return 0;
121
122 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
123 return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
124}
125
78static void iwl_mvm_power_log(struct iwl_mvm *mvm, 126static void iwl_mvm_power_log(struct iwl_mvm *mvm,
79 struct iwl_powertable_cmd *cmd) 127 struct iwl_powertable_cmd *cmd)
80{ 128{
@@ -89,8 +137,12 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
89 le32_to_cpu(cmd->rx_data_timeout)); 137 le32_to_cpu(cmd->rx_data_timeout));
90 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", 138 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
91 le32_to_cpu(cmd->tx_data_timeout)); 139 le32_to_cpu(cmd->tx_data_timeout));
92 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", 140 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
93 cmd->lprx_rssi_threshold); 141 IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
142 le32_to_cpu(cmd->skip_dtim_periods));
143 if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
144 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
145 le32_to_cpu(cmd->lprx_rssi_threshold));
94 } 146 }
95} 147}
96 148
@@ -103,6 +155,8 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
103 int dtimper, dtimper_msec; 155 int dtimper, dtimper_msec;
104 int keep_alive; 156 int keep_alive;
105 bool radar_detect = false; 157 bool radar_detect = false;
158 struct iwl_mvm_vif *mvmvif __maybe_unused =
159 iwl_mvm_vif_from_mac80211(vif);
106 160
107 /* 161 /*
108 * Regardless of power management state the driver must set 162 * Regardless of power management state the driver must set
@@ -115,12 +169,27 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
115 return; 169 return;
116 170
117 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 171 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
172 if (!vif->bss_conf.assoc)
173 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
118 174
175#ifdef CONFIG_IWLWIFI_DEBUGFS
176 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
177 mvmvif->dbgfs_pm.disable_power_off)
178 cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
179#endif
119 if (!vif->bss_conf.ps) 180 if (!vif->bss_conf.ps)
120 return; 181 return;
121 182
122 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); 183 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
123 184
185 if (vif->bss_conf.beacon_rate &&
186 (vif->bss_conf.beacon_rate->bitrate == 10 ||
187 vif->bss_conf.beacon_rate->bitrate == 60)) {
188 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
189 cmd->lprx_rssi_threshold =
190 cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
191 }
192
124 dtimper = hw->conf.ps_dtim_period ?: 1; 193 dtimper = hw->conf.ps_dtim_period ?: 1;
125 194
126 /* Check if radar detection is required on current channel */ 195 /* Check if radar detection is required on current channel */
@@ -135,8 +204,11 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
135 204
136 /* Check skip over DTIM conditions */ 205 /* Check skip over DTIM conditions */
137 if (!radar_detect && (dtimper <= 10) && 206 if (!radar_detect && (dtimper <= 10) &&
138 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP)) 207 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
208 mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
139 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); 209 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
210 cmd->skip_dtim_periods = cpu_to_le32(3);
211 }
140 212
141 /* Check that keep alive period is at least 3 * DTIM */ 213 /* Check that keep alive period is at least 3 * DTIM */
142 dtimper_msec = dtimper * vif->bss_conf.beacon_int; 214 dtimper_msec = dtimper * vif->bss_conf.beacon_int;
@@ -145,27 +217,85 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
145 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); 217 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
146 cmd->keep_alive_seconds = keep_alive; 218 cmd->keep_alive_seconds = keep_alive;
147 219
148 cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); 220 if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
149 cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); 221 cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
222 cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
223 } else {
224 cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
225 cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
226 }
227
228#ifdef CONFIG_IWLWIFI_DEBUGFS
229 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
230 cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
231 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
232 if (mvmvif->dbgfs_pm.skip_over_dtim)
233 cmd->flags |=
234 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
235 else
236 cmd->flags &=
237 cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
238 }
239 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
240 cmd->rx_data_timeout =
241 cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
242 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
243 cmd->tx_data_timeout =
244 cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
245 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
246 cmd->skip_dtim_periods =
247 cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
248 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
249 if (mvmvif->dbgfs_pm.lprx_ena)
250 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
251 else
252 cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
253 }
254 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
255 cmd->lprx_rssi_threshold =
256 cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
257#endif /* CONFIG_IWLWIFI_DEBUGFS */
150} 258}
151 259
152int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 260int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
153{ 261{
262 int ret;
263 bool ba_enable;
154 struct iwl_powertable_cmd cmd = {}; 264 struct iwl_powertable_cmd cmd = {};
155 265
156 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 266 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
157 return 0; 267 return 0;
158 268
269 /*
270 * TODO: The following vif_count verification is temporary condition.
271 * Avoid power mode update if more than one interface is currently
272 * active. Remove this condition when FW will support power management
273 * on multiple MACs.
274 */
275 IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
276 mvm->vif_count);
277 if (mvm->vif_count > 1)
278 return 0;
279
159 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 280 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
160 iwl_mvm_power_log(mvm, &cmd); 281 iwl_mvm_power_log(mvm, &cmd);
161 282
162 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, 283 ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
163 sizeof(cmd), &cmd); 284 sizeof(cmd), &cmd);
285 if (ret)
286 return ret;
287
288 ba_enable = !!(cmd.flags &
289 cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
290
291 return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
164} 292}
165 293
166int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 294int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
167{ 295{
168 struct iwl_powertable_cmd cmd = {}; 296 struct iwl_powertable_cmd cmd = {};
297 struct iwl_mvm_vif *mvmvif __maybe_unused =
298 iwl_mvm_vif_from_mac80211(vif);
169 299
170 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 300 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
171 return 0; 301 return 0;
@@ -173,8 +303,82 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
173 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 303 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
174 cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 304 cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
175 305
306#ifdef CONFIG_IWLWIFI_DEBUGFS
307 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
308 mvmvif->dbgfs_pm.disable_power_off)
309 cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
310#endif
176 iwl_mvm_power_log(mvm, &cmd); 311 iwl_mvm_power_log(mvm, &cmd);
177 312
178 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC, 313 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
179 sizeof(cmd), &cmd); 314 sizeof(cmd), &cmd);
180} 315}
316
317#ifdef CONFIG_IWLWIFI_DEBUGFS
318void
319iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
320 struct iwl_beacon_filter_cmd *cmd)
321{
322 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
323 struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
324
325 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
326 cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta;
327 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
328 cmd->bf_roaming_energy_delta =
329 dbgfs_bf->bf_roaming_energy_delta;
330 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
331 cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state;
332 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA)
333 cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta;
334 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
335 cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag;
336 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
337 cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
338 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
339 cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
340 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
341 cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort;
342}
343#endif
344
345int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
346 struct ieee80211_vif *vif)
347{
348 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
349 struct iwl_beacon_filter_cmd cmd = {
350 IWL_BF_CMD_CONFIG_DEFAULTS,
351 .bf_enable_beacon_filter = 1,
352 };
353 int ret;
354
355 if (mvmvif != mvm->bf_allowed_vif ||
356 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
357 return 0;
358
359 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
360 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
361
362 if (!ret)
363 mvmvif->bf_enabled = true;
364
365 return ret;
366}
367
368int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
369 struct ieee80211_vif *vif)
370{
371 struct iwl_beacon_filter_cmd cmd = {};
372 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
373 int ret;
374
375 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
376 return 0;
377
378 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
379
380 if (!ret)
381 mvmvif->bf_enabled = false;
382
383 return ret;
384}
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index a1e3e923ea3e..29d49cf0fdb2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -169,27 +169,34 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
169 num_active_bindings++; 169 num_active_bindings++;
170 } 170 }
171 171
172 if (!num_active_bindings) 172 quota = 0;
173 goto send_cmd; 173 quota_rem = 0;
174 174 if (num_active_bindings) {
175 quota = IWL_MVM_MAX_QUOTA / num_active_bindings; 175 quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
176 quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings; 176 quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
177 }
177 178
178 for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { 179 for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
179 if (data.n_interfaces[i] <= 0) 180 if (data.colors[i] < 0)
180 continue; 181 continue;
181 182
182 cmd.quotas[idx].id_and_color = 183 cmd.quotas[idx].id_and_color =
183 cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i])); 184 cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
184 cmd.quotas[idx].quota = cpu_to_le32(quota); 185
185 cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); 186 if (data.n_interfaces[i] <= 0) {
187 cmd.quotas[idx].quota = cpu_to_le32(0);
188 cmd.quotas[idx].max_duration = cpu_to_le32(0);
189 } else {
190 cmd.quotas[idx].quota = cpu_to_le32(quota);
191 cmd.quotas[idx].max_duration =
192 cpu_to_le32(IWL_MVM_MAX_QUOTA);
193 }
186 idx++; 194 idx++;
187 } 195 }
188 196
189 /* Give the remainder of the session to the first binding */ 197 /* Give the remainder of the session to the first binding */
190 le32_add_cpu(&cmd.quotas[0].quota, quota_rem); 198 le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
191 199
192send_cmd:
193 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, 200 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
194 sizeof(cmd), &cmd); 201 sizeof(cmd), &cmd);
195 if (ret) 202 if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index b99fe3163866..b328a988c130 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -401,24 +401,29 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
401 401
402 load = rs_tl_get_load(lq_data, tid); 402 load = rs_tl_get_load(lq_data, tid);
403 403
404 if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { 404 /*
405 IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", 405 * Don't create TX aggregation sessions when in high
406 sta->addr, tid); 406 * BT traffic, as they would just be disrupted by BT.
407 ret = ieee80211_start_tx_ba_session(sta, tid, 5000); 407 */
408 if (ret == -EAGAIN) { 408 if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
409 /* 409 IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
410 * driver and mac80211 is out of sync 410 BT_MBOX_MSG(&mvm->last_bt_notif,
411 * this might be cause by reloading firmware 411 3, TRAFFIC_LOAD));
412 * stop the tx ba session here 412 return ret;
413 */ 413 }
414 IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n", 414
415 tid); 415 IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
416 ieee80211_stop_tx_ba_session(sta, tid); 416 sta->addr, tid);
417 } 417 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
418 } else { 418 if (ret == -EAGAIN) {
419 IWL_DEBUG_HT(mvm, 419 /*
420 "Aggregation not enabled for tid %d because load = %u\n", 420 * driver and mac80211 is out of sync
421 tid, load); 421 * this might be cause by reloading firmware
422 * stop the tx ba session here
423 */
424 IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
425 tid);
426 ieee80211_stop_tx_ba_session(sta, tid);
422 } 427 }
423 return ret; 428 return ret;
424} 429}
@@ -1519,6 +1524,29 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1519 u8 update_search_tbl_counter = 0; 1524 u8 update_search_tbl_counter = 0;
1520 int ret; 1525 int ret;
1521 1526
1527 switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
1528 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1529 /* nothing */
1530 break;
1531 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1532 /* avoid antenna B unless MIMO */
1533 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1534 tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
1535 break;
1536 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1537 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1538 /* avoid antenna B and MIMO */
1539 valid_tx_ant =
1540 first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
1541 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1542 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1543 break;
1544 default:
1545 IWL_ERR(mvm, "Invalid BT load %d",
1546 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
1547 break;
1548 }
1549
1522 start_action = tbl->action; 1550 start_action = tbl->action;
1523 while (1) { 1551 while (1) {
1524 lq_sta->action_counter++; 1552 lq_sta->action_counter++;
@@ -1532,7 +1560,9 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1532 tx_chains_num <= 2)) 1560 tx_chains_num <= 2))
1533 break; 1561 break;
1534 1562
1535 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1563 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1564 BT_MBOX_MSG(&mvm->last_bt_notif, 3,
1565 TRAFFIC_LOAD) == 0)
1536 break; 1566 break;
1537 1567
1538 memcpy(search_tbl, tbl, sz); 1568 memcpy(search_tbl, tbl, sz);
@@ -1654,6 +1684,28 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1654 u8 update_search_tbl_counter = 0; 1684 u8 update_search_tbl_counter = 0;
1655 int ret; 1685 int ret;
1656 1686
1687 switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
1688 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1689 /* nothing */
1690 break;
1691 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1692 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1693 /* avoid antenna B and MIMO */
1694 if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
1695 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1696 break;
1697 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1698 /* avoid antenna B unless MIMO */
1699 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
1700 tbl->action == IWL_MIMO2_SWITCH_SISO_C)
1701 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1702 break;
1703 default:
1704 IWL_ERR(mvm, "Invalid BT load %d",
1705 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
1706 break;
1707 }
1708
1657 start_action = tbl->action; 1709 start_action = tbl->action;
1658 while (1) { 1710 while (1) {
1659 lq_sta->action_counter++; 1711 lq_sta->action_counter++;
@@ -1791,6 +1843,28 @@ static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
1791 int ret; 1843 int ret;
1792 u8 update_search_tbl_counter = 0; 1844 u8 update_search_tbl_counter = 0;
1793 1845
1846 switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
1847 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1848 /* nothing */
1849 break;
1850 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1851 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1852 /* avoid antenna B and MIMO */
1853 if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
1854 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1855 break;
1856 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1857 /* avoid antenna B unless MIMO */
1858 if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
1859 tbl->action == IWL_MIMO3_SWITCH_SISO_C)
1860 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1861 break;
1862 default:
1863 IWL_ERR(mvm, "Invalid BT load %d",
1864 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
1865 break;
1866 }
1867
1794 start_action = tbl->action; 1868 start_action = tbl->action;
1795 while (1) { 1869 while (1) {
1796 lq_sta->action_counter++; 1870 lq_sta->action_counter++;
@@ -2302,6 +2376,32 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2302 (current_tpt > (100 * tbl->expected_tpt[low])))) 2376 (current_tpt > (100 * tbl->expected_tpt[low]))))
2303 scale_action = 0; 2377 scale_action = 0;
2304 2378
2379 if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
2380 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2381 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2382 if (lq_sta->last_bt_traffic >
2383 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
2384 /*
2385 * don't set scale_action, don't want to scale up if
2386 * the rate scale doesn't otherwise think that is a
2387 * good idea.
2388 */
2389 } else if (lq_sta->last_bt_traffic <=
2390 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
2391 scale_action = -1;
2392 }
2393 }
2394 lq_sta->last_bt_traffic =
2395 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
2396
2397 if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
2398 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2399 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2400 /* search for a new modulation */
2401 rs_stay_in_table(lq_sta, true);
2402 goto lq_update;
2403 }
2404
2305 switch (scale_action) { 2405 switch (scale_action) {
2306 case -1: 2406 case -1:
2307 /* Decrease starting rate, update uCode's rate table */ 2407 /* Decrease starting rate, update uCode's rate table */
@@ -2783,6 +2883,13 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2783 2883
2784 lq_cmd->agg_time_limit = 2884 lq_cmd->agg_time_limit =
2785 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); 2885 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2886
2887 /*
2888 * overwrite if needed, pass aggregation time limit
2889 * to uCode in uSec - This is racy - but heh, at least it helps...
2890 */
2891 if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
2892 lq_cmd->agg_time_limit = cpu_to_le16(1200);
2786} 2893}
2787 2894
2788static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 2895static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -3081,3 +3188,29 @@ void iwl_mvm_rate_control_unregister(void)
3081{ 3188{
3082 ieee80211_rate_control_unregister(&rs_mvm_ops); 3189 ieee80211_rate_control_unregister(&rs_mvm_ops);
3083} 3190}
3191
3192/**
3193 * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
3194 * Tx protection, according to this rquest and previous requests,
3195 * and send the LQ command.
3196 * @lq: The LQ command
3197 * @mvmsta: The station
3198 * @enable: Enable Tx protection?
3199 */
3200int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
3201 struct iwl_mvm_sta *mvmsta, bool enable)
3202{
3203 lockdep_assert_held(&mvm->mutex);
3204
3205 if (enable) {
3206 if (mvmsta->tx_protection == 0)
3207 lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
3208 mvmsta->tx_protection++;
3209 } else {
3210 mvmsta->tx_protection--;
3211 if (mvmsta->tx_protection == 0)
3212 lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK;
3213 }
3214
3215 return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false);
3216}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 219c6857cc0f..cff4f6da7733 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -358,6 +358,18 @@ struct iwl_lq_sta {
358 u8 last_bt_traffic; 358 u8 last_bt_traffic;
359}; 359};
360 360
361enum iwl_bt_coex_profile_traffic_load {
362 IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
363 IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
364 IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
365 IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
366/*
367 * There are no more even though below is a u8, the
368 * indication from the BT device only has two bits.
369 */
370};
371
372
361static inline u8 num_of_ant(u8 mask) 373static inline u8 num_of_ant(u8 mask)
362{ 374{
363 return !!((mask) & ANT_A) + 375 return !!((mask) & ANT_A) +
@@ -390,4 +402,9 @@ extern int iwl_mvm_rate_control_register(void);
390 */ 402 */
391extern void iwl_mvm_rate_control_unregister(void); 403extern void iwl_mvm_rate_control_unregister(void);
392 404
405struct iwl_mvm_sta;
406
407int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
408 struct iwl_mvm_sta *mvmsta, bool enable);
409
393#endif /* __rs__ */ 410#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 4dfc21a3e83e..e4930d5027d2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -363,3 +363,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
363 rxb, &rx_status); 363 rxb, &rx_status);
364 return 0; 364 return 0;
365} 365}
366
367/*
368 * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
369 *
370 * TODO: This handler is implemented partially.
371 * It only gets the NIC's temperature.
372 */
373int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
374 struct iwl_rx_cmd_buffer *rxb,
375 struct iwl_device_cmd *cmd)
376{
377 struct iwl_rx_packet *pkt = rxb_addr(rxb);
378 struct iwl_notif_statistics *stats = (void *)&pkt->data;
379 struct mvm_statistics_general_common *common = &stats->general.common;
380
381 if (mvm->temperature != le32_to_cpu(common->temperature)) {
382 mvm->temperature = le32_to_cpu(common->temperature);
383 iwl_mvm_tt_handler(mvm);
384 }
385
386 return 0;
387}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 2476e43799d5..2157b0f8ced5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -298,12 +298,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
298 else 298 else
299 cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); 299 cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
300 300
301 /*
302 * TODO: This is a WA due to a bug in the FW AUX framework that does not
303 * properly handle time events that fail to be scheduled
304 */
305 cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
306
307 cmd->repeats = cpu_to_le32(1); 301 cmd->repeats = cpu_to_le32(1);
308 302
309 /* 303 /*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 5c664ed54400..62fe5209093b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -64,6 +64,7 @@
64 64
65#include "mvm.h" 65#include "mvm.h"
66#include "sta.h" 66#include "sta.h"
67#include "rs.h"
67 68
68static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm) 69static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
69{ 70{
@@ -217,6 +218,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
217 mvmvif->color); 218 mvmvif->color);
218 mvm_sta->vif = vif; 219 mvm_sta->vif = vif;
219 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 220 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
221 mvm_sta->tx_protection = 0;
222 mvm_sta->tt_tx_protection = false;
220 223
221 /* HW restart, don't assume the memory has been zeroed */ 224 /* HW restart, don't assume the memory has been zeroed */
222 atomic_set(&mvm->pending_frames[sta_id], 0); 225 atomic_set(&mvm->pending_frames[sta_id], 0);
@@ -226,9 +229,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
226 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) 229 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
227 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); 230 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
228 231
229 if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
230 mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
231
232 /* for HW restart - need to reset the seq_number etc... */ 232 /* for HW restart - need to reset the seq_number etc... */
233 memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data)); 233 memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
234 234
@@ -798,21 +798,23 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
798 min(mvmsta->max_agg_bufsize, buf_size); 798 min(mvmsta->max_agg_bufsize, buf_size);
799 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; 799 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
800 800
801 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
802 sta->addr, tid);
803
801 if (mvm->cfg->ht_params->use_rts_for_aggregation) { 804 if (mvm->cfg->ht_params->use_rts_for_aggregation) {
802 /* 805 /*
803 * switch to RTS/CTS if it is the prefer protection 806 * switch to RTS/CTS if it is the prefer protection
804 * method for HT traffic 807 * method for HT traffic
808 * this function also sends the LQ command
805 */ 809 */
806 mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK; 810 return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
811 mvmsta, true);
807 /* 812 /*
808 * TODO: remove the TLC_RTS flag when we tear down the last 813 * TODO: remove the TLC_RTS flag when we tear down the last
809 * AGG session (agg_tids_count in DVM) 814 * AGG session (agg_tids_count in DVM)
810 */ 815 */
811 } 816 }
812 817
813 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
814 sta->addr, tid);
815
816 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false); 818 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
817} 819}
818 820
@@ -1287,17 +1289,11 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1287 struct iwl_mvm_add_sta_cmd cmd = { 1289 struct iwl_mvm_add_sta_cmd cmd = {
1288 .add_modify = STA_MODE_MODIFY, 1290 .add_modify = STA_MODE_MODIFY,
1289 .sta_id = mvmsta->sta_id, 1291 .sta_id = mvmsta->sta_id,
1290 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 1292 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
1291 .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
1292 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 1293 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1293 }; 1294 };
1294 int ret; 1295 int ret;
1295 1296
1296 /*
1297 * Same modify mask for sleep_tx_count and sleep_state_flags but this
1298 * should be fine since if we set the STA as "awake", then
1299 * sleep_tx_count is not relevant.
1300 */
1301 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); 1297 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1302 if (ret) 1298 if (ret)
1303 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1299 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index a4ddce77aaae..94b265eb32b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -250,7 +250,6 @@ enum iwl_mvm_agg_state {
250 * the first packet to be sent in legacy HW queue in Tx AGG stop flow. 250 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
251 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that 251 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
252 * we are ready to finish the Tx AGG stop / start flow. 252 * we are ready to finish the Tx AGG stop / start flow.
253 * @wait_for_ba: Expect block-ack before next Tx reply
254 */ 253 */
255struct iwl_mvm_tid_data { 254struct iwl_mvm_tid_data {
256 u16 seq_number; 255 u16 seq_number;
@@ -260,7 +259,6 @@ struct iwl_mvm_tid_data {
260 enum iwl_mvm_agg_state state; 259 enum iwl_mvm_agg_state state;
261 u16 txq_id; 260 u16 txq_id;
262 u16 ssn; 261 u16 ssn;
263 bool wait_for_ba;
264}; 262};
265 263
266/** 264/**
@@ -275,6 +273,8 @@ struct iwl_mvm_tid_data {
275 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx 273 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
276 * and from Tx response flow, it needs a spinlock. 274 * and from Tx response flow, it needs a spinlock.
277 * @tid_data: per tid data. Look at %iwl_mvm_tid_data. 275 * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
276 * @tx_protection: reference counter for controlling the Tx protection.
277 * @tt_tx_protection: is thermal throttling enable Tx protection?
278 * 278 *
279 * When mac80211 creates a station it reserves some space (hw->sta_data_size) 279 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
280 * in the structure for use by driver. This structure is placed in that 280 * in the structure for use by driver. This structure is placed in that
@@ -296,6 +296,10 @@ struct iwl_mvm_sta {
296#ifdef CONFIG_PM_SLEEP 296#ifdef CONFIG_PM_SLEEP
297 u16 last_seq_ctl; 297 u16 last_seq_ctl;
298#endif 298#endif
299
300 /* Temporary, until the new TLC will control the Tx protection */
301 s8 tx_protection;
302 bool tt_tx_protection;
299}; 303};
300 304
301/** 305/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
new file mode 100644
index 000000000000..d6ae7f16ac11
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -0,0 +1,530 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include "mvm.h"
65#include "iwl-config.h"
66#include "iwl-io.h"
67#include "iwl-csr.h"
68#include "iwl-prph.h"
69
70#define OTP_DTS_DIODE_DEVIATION 96 /*in words*/
71/* VBG - Voltage Band Gap error data (temperature offset) */
72#define OTP_WP_DTS_VBG (OTP_DTS_DIODE_DEVIATION + 2)
73#define MEAS_VBG_MIN_VAL 2300
74#define MEAS_VBG_MAX_VAL 3000
75#define MEAS_VBG_DEFAULT_VAL 2700
76#define DTS_DIODE_VALID(flags) (flags & DTS_DIODE_REG_FLAGS_PASS_ONCE)
77#define MIN_TEMPERATURE 0
78#define MAX_TEMPERATURE 125
79#define TEMPERATURE_ERROR (MAX_TEMPERATURE + 1)
80#define PTAT_DIGITAL_VALUE_MIN_VALUE 0
81#define PTAT_DIGITAL_VALUE_MAX_VALUE 0xFF
82#define DTS_VREFS_NUM 5
83static inline u32 DTS_DIODE_GET_VREFS_ID(u32 flags)
84{
85 return (flags & DTS_DIODE_REG_FLAGS_VREFS_ID) >>
86 DTS_DIODE_REG_FLAGS_VREFS_ID_POS;
87}
88
89#define CALC_VREFS_MIN_DIFF 43
90#define CALC_VREFS_MAX_DIFF 51
91#define CALC_LUT_SIZE (1 + CALC_VREFS_MAX_DIFF - CALC_VREFS_MIN_DIFF)
92#define CALC_LUT_INDEX_OFFSET CALC_VREFS_MIN_DIFF
93#define CALC_TEMPERATURE_RESULT_SHIFT_OFFSET 23
94
95/*
96 * @digital_value: The diode's digital-value sampled (temperature/voltage)
97 * @vref_low: The lower voltage-reference (the vref just below the diode's
98 * sampled digital-value)
99 * @vref_high: The higher voltage-reference (the vref just above the diode's
100 * sampled digital-value)
101 * @flags: bits[1:0]: The ID of the Vrefs pair (lowVref,highVref)
102 * bits[6:2]: Reserved.
103 * bits[7:7]: Indicates completion of at least 1 successful sample
104 * since last DTS reset.
105 */
106struct iwl_mvm_dts_diode_bits {
107 u8 digital_value;
108 u8 vref_low;
109 u8 vref_high;
110 u8 flags;
111} __packed;
112
113union dts_diode_results {
114 u32 reg_value;
115 struct iwl_mvm_dts_diode_bits bits;
116} __packed;
117
118static s16 iwl_mvm_dts_get_volt_band_gap(struct iwl_mvm *mvm)
119{
120 struct iwl_nvm_section calib_sec;
121 const __le16 *calib;
122 u16 vbg;
123
124 /* TODO: move parsing to NVM code */
125 calib_sec = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION];
126 calib = (__le16 *)calib_sec.data;
127
128 vbg = le16_to_cpu(calib[OTP_WP_DTS_VBG]);
129
130 if (vbg < MEAS_VBG_MIN_VAL || vbg > MEAS_VBG_MAX_VAL)
131 vbg = MEAS_VBG_DEFAULT_VAL;
132
133 return vbg;
134}
135
136static u16 iwl_mvm_dts_get_ptat_deviation_offset(struct iwl_mvm *mvm)
137{
138 const u8 *calib;
139 u8 ptat, pa1, pa2, median;
140
141 /* TODO: move parsing to NVM code */
142 calib = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION].data;
143 ptat = calib[OTP_DTS_DIODE_DEVIATION];
144 pa1 = calib[OTP_DTS_DIODE_DEVIATION + 1];
145 pa2 = calib[OTP_DTS_DIODE_DEVIATION + 2];
146
147 /* get the median: */
148 if (ptat > pa1) {
149 if (ptat > pa2)
150 median = (pa1 > pa2) ? pa1 : pa2;
151 else
152 median = ptat;
153 } else {
154 if (pa1 > pa2)
155 median = (ptat > pa2) ? ptat : pa2;
156 else
157 median = pa1;
158 }
159
160 return ptat - median;
161}
162
163static u8 iwl_mvm_dts_calibrate_ptat_deviation(struct iwl_mvm *mvm, u8 value)
164{
165 /* Calibrate the PTAT digital value, based on PTAT deviation data: */
166 s16 new_val = value - iwl_mvm_dts_get_ptat_deviation_offset(mvm);
167
168 if (new_val > PTAT_DIGITAL_VALUE_MAX_VALUE)
169 new_val = PTAT_DIGITAL_VALUE_MAX_VALUE;
170 else if (new_val < PTAT_DIGITAL_VALUE_MIN_VALUE)
171 new_val = PTAT_DIGITAL_VALUE_MIN_VALUE;
172
173 return new_val;
174}
175
176static bool dts_get_adjacent_vrefs(struct iwl_mvm *mvm,
177 union dts_diode_results *avg_ptat)
178{
179 u8 vrefs_results[DTS_VREFS_NUM];
180 u8 low_vref_index = 0, flags;
181 u32 reg;
182
183 reg = iwl_read_prph(mvm->trans, DTSC_VREF_AVG);
184 memcpy(vrefs_results, &reg, sizeof(reg));
185 reg = iwl_read_prph(mvm->trans, DTSC_VREF5_AVG);
186 vrefs_results[4] = reg & 0xff;
187
188 if (avg_ptat->bits.digital_value < vrefs_results[0] ||
189 avg_ptat->bits.digital_value > vrefs_results[4])
190 return false;
191
192 if (avg_ptat->bits.digital_value > vrefs_results[3])
193 low_vref_index = 3;
194 else if (avg_ptat->bits.digital_value > vrefs_results[2])
195 low_vref_index = 2;
196 else if (avg_ptat->bits.digital_value > vrefs_results[1])
197 low_vref_index = 1;
198
199 avg_ptat->bits.vref_low = vrefs_results[low_vref_index];
200 avg_ptat->bits.vref_high = vrefs_results[low_vref_index + 1];
201 flags = avg_ptat->bits.flags;
202 avg_ptat->bits.flags =
203 (flags & ~DTS_DIODE_REG_FLAGS_VREFS_ID) |
204 (low_vref_index & DTS_DIODE_REG_FLAGS_VREFS_ID);
205 return true;
206}
207
208/*
209 * return true it the results are valid, and false otherwise.
210 */
211static bool dts_read_ptat_avg_results(struct iwl_mvm *mvm,
212 union dts_diode_results *avg_ptat)
213{
214 u32 reg;
215 u8 tmp;
216
217 /* fill the diode value and pass_once with avg-reg results */
218 reg = iwl_read_prph(mvm->trans, DTSC_PTAT_AVG);
219 reg &= DTS_DIODE_REG_DIG_VAL | DTS_DIODE_REG_PASS_ONCE;
220 avg_ptat->reg_value = reg;
221
222 /* calibrate the PTAT digital value */
223 tmp = avg_ptat->bits.digital_value;
224 tmp = iwl_mvm_dts_calibrate_ptat_deviation(mvm, tmp);
225 avg_ptat->bits.digital_value = tmp;
226
227 /*
228 * fill vrefs fields, based on the avgVrefs results
229 * and the diode value
230 */
231 return dts_get_adjacent_vrefs(mvm, avg_ptat) &&
232 DTS_DIODE_VALID(avg_ptat->bits.flags);
233}
234
235static s32 calculate_nic_temperature(union dts_diode_results avg_ptat,
236 u16 volt_band_gap)
237{
238 u32 tmp_result;
239 u8 vrefs_diff;
240 /*
241 * For temperature calculation (at the end, shift right by 23)
242 * LUT[(D2-D1)] = ROUND{ 2^23 / ((D2-D1)*9*10) }
243 * (D2-D1) == 43 44 45 46 47 48 49 50 51
244 */
245 static const u16 calc_lut[CALC_LUT_SIZE] = {
246 2168, 2118, 2071, 2026, 1983, 1942, 1902, 1864, 1828,
247 };
248
249 /*
250 * The diff between the high and low voltage-references is assumed
251 * to be strictly be in range of [60,68]
252 */
253 vrefs_diff = avg_ptat.bits.vref_high - avg_ptat.bits.vref_low;
254
255 if (vrefs_diff < CALC_VREFS_MIN_DIFF ||
256 vrefs_diff > CALC_VREFS_MAX_DIFF)
257 return TEMPERATURE_ERROR;
258
259 /* calculate the result: */
260 tmp_result =
261 vrefs_diff * (DTS_DIODE_GET_VREFS_ID(avg_ptat.bits.flags) + 9);
262 tmp_result += avg_ptat.bits.digital_value;
263 tmp_result -= avg_ptat.bits.vref_high;
264
265 /* multiply by the LUT value (based on the diff) */
266 tmp_result *= calc_lut[vrefs_diff - CALC_LUT_INDEX_OFFSET];
267
268 /*
269 * Get the BandGap (the voltage refereces source) error data
270 * (temperature offset)
271 */
272 tmp_result *= volt_band_gap;
273
274 /*
275 * here, tmp_result value can be up to 32-bits. We want to right-shift
276 * it *without* sign-extend.
277 */
278 tmp_result = tmp_result >> CALC_TEMPERATURE_RESULT_SHIFT_OFFSET;
279
280 /*
281 * at this point, tmp_result should be in the range:
282 * 200 <= tmp_result <= 365
283 */
284 return (s16)tmp_result - 240;
285}
286
287static s32 check_nic_temperature(struct iwl_mvm *mvm)
288{
289 u16 volt_band_gap;
290 union dts_diode_results avg_ptat;
291
292 volt_band_gap = iwl_mvm_dts_get_volt_band_gap(mvm);
293
294 /* disable DTS */
295 iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0);
296
297 /* SV initialization */
298 iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 1);
299 iwl_write_prph(mvm->trans, DTSC_CFG_MODE,
300 DTSC_CFG_MODE_PERIODIC);
301
302 /* wait for results */
303 msleep(100);
304 if (!dts_read_ptat_avg_results(mvm, &avg_ptat))
305 return TEMPERATURE_ERROR;
306
307 /* disable DTS */
308 iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0);
309
310 return calculate_nic_temperature(avg_ptat, volt_band_gap);
311}
312
313static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
314{
315 u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
316
317 IWL_ERR(mvm, "Enter CT Kill\n");
318 iwl_mvm_set_hw_ctkill_state(mvm, true);
319 schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
320 round_jiffies_relative(duration * HZ));
321}
322
323static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
324{
325 IWL_ERR(mvm, "Exit CT Kill\n");
326 iwl_mvm_set_hw_ctkill_state(mvm, false);
327}
328
329static void check_exit_ctkill(struct work_struct *work)
330{
331 struct iwl_mvm_tt_mgmt *tt;
332 struct iwl_mvm *mvm;
333 u32 duration;
334 s32 temp;
335
336 tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
337 mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
338
339 duration = tt->params->ct_kill_duration;
340
341 iwl_trans_start_hw(mvm->trans);
342 temp = check_nic_temperature(mvm);
343 iwl_trans_stop_hw(mvm->trans, false);
344
345 if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) {
346 IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n");
347 goto reschedule;
348 }
349 IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
350
351 if (temp <= tt->params->ct_kill_exit) {
352 iwl_mvm_exit_ctkill(mvm);
353 return;
354 }
355
356reschedule:
357 schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
358 round_jiffies(duration * HZ));
359}
360
361static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
362 struct ieee80211_vif *vif)
363{
364 struct iwl_mvm *mvm = _data;
365 enum ieee80211_smps_mode smps_mode;
366
367 lockdep_assert_held(&mvm->mutex);
368
369 if (mvm->thermal_throttle.dynamic_smps)
370 smps_mode = IEEE80211_SMPS_DYNAMIC;
371 else
372 smps_mode = IEEE80211_SMPS_AUTOMATIC;
373
374 if (vif->type != NL80211_IFTYPE_STATION)
375 return;
376
377 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode);
378}
379
380static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
381{
382 struct ieee80211_sta *sta;
383 struct iwl_mvm_sta *mvmsta;
384 int i, err;
385
386 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
387 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
388 lockdep_is_held(&mvm->mutex));
389 if (IS_ERR_OR_NULL(sta))
390 continue;
391 mvmsta = (void *)sta->drv_priv;
392 if (enable == mvmsta->tt_tx_protection)
393 continue;
394 err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
395 mvmsta, enable);
396 if (err) {
397 IWL_ERR(mvm, "Failed to %s Tx protection\n",
398 enable ? "enable" : "disable");
399 } else {
400 IWL_DEBUG_TEMP(mvm, "%s Tx protection\n",
401 enable ? "Enable" : "Disable");
402 mvmsta->tt_tx_protection = enable;
403 }
404 }
405}
406
407static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
408{
409 struct iwl_host_cmd cmd = {
410 .id = REPLY_THERMAL_MNG_BACKOFF,
411 .len = { sizeof(u32), },
412 .data = { &backoff, },
413 .flags = CMD_SYNC,
414 };
415
416 if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
417 IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
418 backoff);
419 mvm->thermal_throttle.tx_backoff = backoff;
420 } else {
421 IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n");
422 }
423}
424
425void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
426{
427 const struct iwl_tt_params *params = mvm->thermal_throttle.params;
428 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
429 s32 temperature = mvm->temperature;
430 bool throttle_enable = false;
431 int i;
432 u32 tx_backoff;
433
434 IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature);
435
436 if (params->support_ct_kill && temperature >= params->ct_kill_entry) {
437 iwl_mvm_enter_ctkill(mvm);
438 return;
439 }
440
441 if (params->support_dynamic_smps) {
442 if (!tt->dynamic_smps &&
443 temperature >= params->dynamic_smps_entry) {
444 IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n");
445 tt->dynamic_smps = true;
446 ieee80211_iterate_active_interfaces_atomic(
447 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
448 iwl_mvm_tt_smps_iterator, mvm);
449 throttle_enable = true;
450 } else if (tt->dynamic_smps &&
451 temperature <= params->dynamic_smps_exit) {
452 IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
453 tt->dynamic_smps = false;
454 ieee80211_iterate_active_interfaces_atomic(
455 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
456 iwl_mvm_tt_smps_iterator, mvm);
457 }
458 }
459
460 if (params->support_tx_protection) {
461 if (temperature >= params->tx_protection_entry) {
462 iwl_mvm_tt_tx_protection(mvm, true);
463 throttle_enable = true;
464 } else if (temperature <= params->tx_protection_exit) {
465 iwl_mvm_tt_tx_protection(mvm, false);
466 }
467 }
468
469 if (params->support_tx_backoff) {
470 tx_backoff = 0;
471 for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
472 if (temperature < params->tx_backoff[i].temperature)
473 break;
474 tx_backoff = params->tx_backoff[i].backoff;
475 }
476 if (tx_backoff != 0)
477 throttle_enable = true;
478 if (tt->tx_backoff != tx_backoff)
479 iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
480 }
481
482 if (!tt->throttle && throttle_enable) {
483 IWL_WARN(mvm,
484 "Due to high temperature thermal throttling initiated\n");
485 tt->throttle = true;
486 } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
487 temperature <= params->tx_protection_exit) {
488 IWL_WARN(mvm,
489 "Temperature is back to normal thermal throttling stopped\n");
490 tt->throttle = false;
491 }
492}
493
494static const struct iwl_tt_params iwl7000_tt_params = {
495 .ct_kill_entry = 118,
496 .ct_kill_exit = 96,
497 .ct_kill_duration = 5,
498 .dynamic_smps_entry = 114,
499 .dynamic_smps_exit = 110,
500 .tx_protection_entry = 114,
501 .tx_protection_exit = 108,
502 .tx_backoff = {
503 {.temperature = 112, .backoff = 200},
504 {.temperature = 113, .backoff = 600},
505 {.temperature = 114, .backoff = 1200},
506 {.temperature = 115, .backoff = 2000},
507 {.temperature = 116, .backoff = 4000},
508 {.temperature = 117, .backoff = 10000},
509 },
510 .support_ct_kill = true,
511 .support_dynamic_smps = true,
512 .support_tx_protection = true,
513 .support_tx_backoff = true,
514};
515
516void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
517{
518 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
519
520 IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
521 tt->params = &iwl7000_tt_params;
522 tt->throttle = false;
523 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
524}
525
526void iwl_mvm_tt_exit(struct iwl_mvm *mvm)
527{
528 cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
529 IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
530}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 48c1891e3df6..f0e96a927407 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -175,7 +175,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
175 * table is controlled by LINK_QUALITY commands 175 * table is controlled by LINK_QUALITY commands
176 */ 176 */
177 177
178 if (ieee80211_is_data(fc)) { 178 if (ieee80211_is_data(fc) && sta) {
179 tx_cmd->initial_rate_index = 0; 179 tx_cmd->initial_rate_index = 0;
180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
181 return; 181 return;
@@ -408,7 +408,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
408 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, 408 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
409 tid, txq_id, seq_number); 409 tid, txq_id, seq_number);
410 410
411 /* NOTE: aggregation will need changes here (for txq id) */
412 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) 411 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
413 goto drop_unlock_sta; 412 goto drop_unlock_sta;
414 413
@@ -610,8 +609,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
610 !(info->flags & IEEE80211_TX_STAT_ACK)) 609 !(info->flags & IEEE80211_TX_STAT_ACK))
611 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 610 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
612 611
613 /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ 612 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
614 if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { 613 if (status != TX_STATUS_SUCCESS) {
615 struct ieee80211_hdr *hdr = (void *)skb->data; 614 struct ieee80211_hdr *hdr = (void *)skb->data;
616 seq_ctl = le16_to_cpu(hdr->seq_ctrl); 615 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
617 } 616 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 687b34e387ac..1e1332839e4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -76,6 +76,11 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
76{ 76{
77 int ret; 77 int ret;
78 78
79#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
80 if (WARN_ON(mvm->d3_test_active))
81 return -EIO;
82#endif
83
79 /* 84 /*
80 * Synchronous commands from this op-mode must hold 85 * Synchronous commands from this op-mode must hold
81 * the mutex, this ensures we don't try to send two 86 * the mutex, this ensures we don't try to send two
@@ -125,6 +130,11 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
125 130
126 lockdep_assert_held(&mvm->mutex); 131 lockdep_assert_held(&mvm->mutex);
127 132
133#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
134 if (WARN_ON(mvm->d3_test_active))
135 return -EIO;
136#endif
137
128 /* 138 /*
129 * Only synchronous commands can wait for status, 139 * Only synchronous commands can wait for status,
130 * we use WANT_SKB so the caller can't. 140 * we use WANT_SKB so the caller can't.
@@ -471,3 +481,34 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
471 481
472 return iwl_mvm_send_cmd(mvm, &cmd); 482 return iwl_mvm_send_cmd(mvm, &cmd);
473} 483}
484
485/**
486 * iwl_mvm_update_smps - Get a requst to change the SMPS mode
487 * @req_type: The part of the driver who call for a change.
488 * @smps_requests: The request to change the SMPS mode.
489 *
490 * Get a requst to change the SMPS mode,
491 * and change it according to all other requests in the driver.
492 */
493void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
494 enum iwl_mvm_smps_type_request req_type,
495 enum ieee80211_smps_mode smps_request)
496{
497 struct iwl_mvm_vif *mvmvif;
498 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
499 int i;
500
501 lockdep_assert_held(&mvm->mutex);
502 mvmvif = iwl_mvm_vif_from_mac80211(vif);
503 mvmvif->smps_requests[req_type] = smps_request;
504 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
505 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
506 smps_mode = IEEE80211_SMPS_STATIC;
507 break;
508 }
509 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
510 smps_mode = IEEE80211_SMPS_DYNAMIC;
511 }
512
513 ieee80211_request_smps(vif, smps_mode);
514}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 8cb53ec2b77b..81f3ea5b09a4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -78,6 +78,7 @@
78 78
79/* Hardware specific file defines the PCI IDs table for that hardware module */ 79/* Hardware specific file defines the PCI IDs table for that hardware module */
80static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { 80static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
81#if IS_ENABLED(CONFIG_IWLDVM)
81 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ 82 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
82 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ 83 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
83 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ 84 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -253,13 +254,60 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
253 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, 254 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
254 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, 255 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
255 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, 256 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
257#endif /* CONFIG_IWLDVM */
256 258
259#if IS_ENABLED(CONFIG_IWLMVM)
257/* 7000 Series */ 260/* 7000 Series */
258 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, 261 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
259 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_2ac_cfg)}, 262 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
263 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
264 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
265 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
266 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
267 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
268 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
269 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
270 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
271 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
272 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
273 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
274 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
275 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)},
277 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)},
278 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
279 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
280 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
260 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, 281 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
261 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)}, 282 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
262 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)}, 283 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
284 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
285 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
286 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
287 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
288 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
289 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
290 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
291 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
292 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
293 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
294 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
295 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
296
297/* 3160 Series */
298 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
299 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
300 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
301 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
302 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
303 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
304 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
305 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
306 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
307 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
308 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
309 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
310#endif /* CONFIG_IWLMVM */
263 311
264 {0} 312 {0}
265}; 313};
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 148843e7f34f..b654dcdd048a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -217,6 +217,7 @@ struct iwl_pcie_txq_scratch_buf {
217 * @trans_pcie: pointer back to transport (for timer) 217 * @trans_pcie: pointer back to transport (for timer)
218 * @need_update: indicates need to update read/write index 218 * @need_update: indicates need to update read/write index
219 * @active: stores if queue is active 219 * @active: stores if queue is active
220 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
220 * 221 *
221 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 222 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
222 * descriptors) and required locking structures. 223 * descriptors) and required locking structures.
@@ -232,6 +233,7 @@ struct iwl_txq {
232 struct iwl_trans_pcie *trans_pcie; 233 struct iwl_trans_pcie *trans_pcie;
233 u8 need_update; 234 u8 need_update;
234 u8 active; 235 u8 active;
236 bool ampdu;
235}; 237};
236 238
237static inline dma_addr_t 239static inline dma_addr_t
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 567e67ad1f61..fd848cd1583e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -110,9 +110,10 @@
110/* 110/*
111 * iwl_rxq_space - Return number of free slots available in queue. 111 * iwl_rxq_space - Return number of free slots available in queue.
112 */ 112 */
113static int iwl_rxq_space(const struct iwl_rxq *q) 113static int iwl_rxq_space(const struct iwl_rxq *rxq)
114{ 114{
115 int s = q->read - q->write; 115 int s = rxq->read - rxq->write;
116
116 if (s <= 0) 117 if (s <= 0)
117 s += RX_QUEUE_SIZE; 118 s += RX_QUEUE_SIZE;
118 /* keep some buffer to not confuse full and empty queue */ 119 /* keep some buffer to not confuse full and empty queue */
@@ -143,21 +144,22 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
143/* 144/*
144 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 145 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
145 */ 146 */
146static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) 147static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
148 struct iwl_rxq *rxq)
147{ 149{
148 unsigned long flags; 150 unsigned long flags;
149 u32 reg; 151 u32 reg;
150 152
151 spin_lock_irqsave(&q->lock, flags); 153 spin_lock_irqsave(&rxq->lock, flags);
152 154
153 if (q->need_update == 0) 155 if (rxq->need_update == 0)
154 goto exit_unlock; 156 goto exit_unlock;
155 157
156 if (trans->cfg->base_params->shadow_reg_enable) { 158 if (trans->cfg->base_params->shadow_reg_enable) {
157 /* shadow register enabled */ 159 /* shadow register enabled */
158 /* Device expects a multiple of 8 */ 160 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7); 161 rxq->write_actual = (rxq->write & ~0x7);
160 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); 162 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
161 } else { 163 } else {
162 struct iwl_trans_pcie *trans_pcie = 164 struct iwl_trans_pcie *trans_pcie =
163 IWL_TRANS_GET_PCIE_TRANS(trans); 165 IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -175,22 +177,22 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
175 goto exit_unlock; 177 goto exit_unlock;
176 } 178 }
177 179
178 q->write_actual = (q->write & ~0x7); 180 rxq->write_actual = (rxq->write & ~0x7);
179 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, 181 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
180 q->write_actual); 182 rxq->write_actual);
181 183
182 /* Else device is assumed to be awake */ 184 /* Else device is assumed to be awake */
183 } else { 185 } else {
184 /* Device expects a multiple of 8 */ 186 /* Device expects a multiple of 8 */
185 q->write_actual = (q->write & ~0x7); 187 rxq->write_actual = (rxq->write & ~0x7);
186 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, 188 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
187 q->write_actual); 189 rxq->write_actual);
188 } 190 }
189 } 191 }
190 q->need_update = 0; 192 rxq->need_update = 0;
191 193
192 exit_unlock: 194 exit_unlock:
193 spin_unlock_irqrestore(&q->lock, flags); 195 spin_unlock_irqrestore(&rxq->lock, flags);
194} 196}
195 197
196/* 198/*
@@ -355,19 +357,16 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
355 struct iwl_rxq *rxq = &trans_pcie->rxq; 357 struct iwl_rxq *rxq = &trans_pcie->rxq;
356 int i; 358 int i;
357 359
358 /* Fill the rx_used queue with _all_ of the Rx buffers */ 360 lockdep_assert_held(&rxq->lock);
361
359 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 362 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
360 /* In the reset function, these buffers may have been allocated 363 if (!rxq->pool[i].page)
361 * to an SKB, so we need to unmap and free potential storage */ 364 continue;
362 if (rxq->pool[i].page != NULL) { 365 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
363 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 366 PAGE_SIZE << trans_pcie->rx_page_order,
364 PAGE_SIZE << trans_pcie->rx_page_order, 367 DMA_FROM_DEVICE);
365 DMA_FROM_DEVICE); 368 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
366 __free_pages(rxq->pool[i].page, 369 rxq->pool[i].page = NULL;
367 trans_pcie->rx_page_order);
368 rxq->pool[i].page = NULL;
369 }
370 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
371 } 370 }
372} 371}
373 372
@@ -491,6 +490,20 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
491 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 490 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
492} 491}
493 492
493static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
494{
495 int i;
496
497 lockdep_assert_held(&rxq->lock);
498
499 INIT_LIST_HEAD(&rxq->rx_free);
500 INIT_LIST_HEAD(&rxq->rx_used);
501 rxq->free_count = 0;
502
503 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
504 list_add(&rxq->pool[i].list, &rxq->rx_used);
505}
506
494int iwl_pcie_rx_init(struct iwl_trans *trans) 507int iwl_pcie_rx_init(struct iwl_trans *trans)
495{ 508{
496 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 509 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -505,13 +518,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
505 } 518 }
506 519
507 spin_lock_irqsave(&rxq->lock, flags); 520 spin_lock_irqsave(&rxq->lock, flags);
508 INIT_LIST_HEAD(&rxq->rx_free);
509 INIT_LIST_HEAD(&rxq->rx_used);
510 521
511 INIT_WORK(&trans_pcie->rx_replenish, 522 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
512 iwl_pcie_rx_replenish_work);
513 523
524 /* free all first - we might be reconfigured for a different size */
514 iwl_pcie_rxq_free_rbs(trans); 525 iwl_pcie_rxq_free_rbs(trans);
526 iwl_pcie_rx_init_rxb_lists(rxq);
515 527
516 for (i = 0; i < RX_QUEUE_SIZE; i++) 528 for (i = 0; i < RX_QUEUE_SIZE; i++)
517 rxq->queue[i] = NULL; 529 rxq->queue[i] = NULL;
@@ -520,7 +532,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
520 * not restocked the Rx queue with fresh buffers */ 532 * not restocked the Rx queue with fresh buffers */
521 rxq->read = rxq->write = 0; 533 rxq->read = rxq->write = 0;
522 rxq->write_actual = 0; 534 rxq->write_actual = 0;
523 rxq->free_count = 0;
524 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 535 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
525 spin_unlock_irqrestore(&rxq->lock, flags); 536 spin_unlock_irqrestore(&rxq->lock, flags);
526 537
@@ -802,9 +813,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
802 u32 handled = 0; 813 u32 handled = 0;
803 unsigned long flags; 814 unsigned long flags;
804 u32 i; 815 u32 i;
805#ifdef CONFIG_IWLWIFI_DEBUG
806 u32 inta_mask;
807#endif
808 816
809 lock_map_acquire(&trans->sync_cmd_lockdep_map); 817 lock_map_acquire(&trans->sync_cmd_lockdep_map);
810 818
@@ -826,14 +834,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
826 834
827 inta = trans_pcie->inta; 835 inta = trans_pcie->inta;
828 836
829#ifdef CONFIG_IWLWIFI_DEBUG 837 if (iwl_have_debug_level(IWL_DL_ISR))
830 if (iwl_have_debug_level(IWL_DL_ISR)) {
831 /* just for debug */
832 inta_mask = iwl_read32(trans, CSR_INT_MASK);
833 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 838 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
834 inta, inta_mask); 839 inta, iwl_read32(trans, CSR_INT_MASK));
835 }
836#endif
837 840
838 /* saved interrupt in inta variable now we can reset trans_pcie->inta */ 841 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
839 trans_pcie->inta = 0; 842 trans_pcie->inta = 0;
@@ -855,12 +858,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
855 goto out; 858 goto out;
856 } 859 }
857 860
858#ifdef CONFIG_IWLWIFI_DEBUG
859 if (iwl_have_debug_level(IWL_DL_ISR)) { 861 if (iwl_have_debug_level(IWL_DL_ISR)) {
860 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 862 /* NIC fires this, but we don't use it, redundant with WAKEUP */
861 if (inta & CSR_INT_BIT_SCD) { 863 if (inta & CSR_INT_BIT_SCD) {
862 IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " 864 IWL_DEBUG_ISR(trans,
863 "the frame/frames.\n"); 865 "Scheduler finished to transmit the frame/frames.\n");
864 isr_stats->sch++; 866 isr_stats->sch++;
865 } 867 }
866 868
@@ -870,7 +872,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
870 isr_stats->alive++; 872 isr_stats->alive++;
871 } 873 }
872 } 874 }
873#endif 875
874 /* Safely ignore these bits for debug checks below */ 876 /* Safely ignore these bits for debug checks below */
875 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 877 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
876 878
@@ -1118,9 +1120,6 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
1118 struct iwl_trans *trans = data; 1120 struct iwl_trans *trans = data;
1119 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1121 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1120 u32 inta, inta_mask; 1122 u32 inta, inta_mask;
1121#ifdef CONFIG_IWLWIFI_DEBUG
1122 u32 inta_fh;
1123#endif
1124 1123
1125 lockdep_assert_held(&trans_pcie->irq_lock); 1124 lockdep_assert_held(&trans_pcie->irq_lock);
1126 1125
@@ -1159,13 +1158,11 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
1159 return IRQ_HANDLED; 1158 return IRQ_HANDLED;
1160 } 1159 }
1161 1160
1162#ifdef CONFIG_IWLWIFI_DEBUG 1161 if (iwl_have_debug_level(IWL_DL_ISR))
1163 if (iwl_have_debug_level(IWL_DL_ISR)) { 1162 IWL_DEBUG_ISR(trans,
1164 inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); 1163 "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1165 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " 1164 inta, inta_mask,
1166 "fh 0x%08x\n", inta, inta_mask, inta_fh); 1165 iwl_read32(trans, CSR_FH_INT_STATUS));
1167 }
1168#endif
1169 1166
1170 trans_pcie->inta |= inta; 1167 trans_pcie->inta |= inta;
1171 /* the thread will service interrupts and re-enable them */ 1168 /* the thread will service interrupts and re-enable them */
@@ -1198,7 +1195,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1198{ 1195{
1199 struct iwl_trans *trans = data; 1196 struct iwl_trans *trans = data;
1200 struct iwl_trans_pcie *trans_pcie; 1197 struct iwl_trans_pcie *trans_pcie;
1201 u32 inta, inta_mask; 1198 u32 inta;
1202 u32 val = 0; 1199 u32 val = 0;
1203 u32 read; 1200 u32 read;
1204 unsigned long flags; 1201 unsigned long flags;
@@ -1226,7 +1223,6 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1226 * If we have something to service, the tasklet will re-enable ints. 1223 * If we have something to service, the tasklet will re-enable ints.
1227 * If we *don't* have something, we'll re-enable before leaving here. 1224 * If we *don't* have something, we'll re-enable before leaving here.
1228 */ 1225 */
1229 inta_mask = iwl_read32(trans, CSR_INT_MASK);
1230 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 1226 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1231 1227
1232 /* Ignore interrupt if there's nothing in NIC to service. 1228 /* Ignore interrupt if there's nothing in NIC to service.
@@ -1271,8 +1267,11 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1271 val |= 0x8000; 1267 val |= 0x8000;
1272 1268
1273 inta = (0xff & val) | ((0xff00 & val) << 16); 1269 inta = (0xff & val) | ((0xff00 & val) << 16);
1274 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", 1270 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
1275 inta, inta_mask, val); 1271 inta, trans_pcie->inta_mask, val);
1272 if (iwl_have_debug_level(IWL_DL_ISR))
1273 IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
1274 iwl_read32(trans, CSR_INT_MASK));
1276 1275
1277 inta &= trans_pcie->inta_mask; 1276 inta &= trans_pcie->inta_mask;
1278 trans_pcie->inta |= inta; 1277 trans_pcie->inta |= inta;
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 50ba0a468f94..826c15602c46 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -405,20 +405,27 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
405{ 405{
406 u8 *v_addr; 406 u8 *v_addr;
407 dma_addr_t p_addr; 407 dma_addr_t p_addr;
408 u32 offset; 408 u32 offset, chunk_sz = section->len;
409 int ret = 0; 409 int ret = 0;
410 410
411 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 411 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
412 section_num); 412 section_num);
413 413
414 v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL); 414 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
415 if (!v_addr) 415 GFP_KERNEL | __GFP_NOWARN);
416 return -ENOMEM; 416 if (!v_addr) {
417 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
418 chunk_sz = PAGE_SIZE;
419 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
420 &p_addr, GFP_KERNEL);
421 if (!v_addr)
422 return -ENOMEM;
423 }
417 424
418 for (offset = 0; offset < section->len; offset += PAGE_SIZE) { 425 for (offset = 0; offset < section->len; offset += chunk_sz) {
419 u32 copy_size; 426 u32 copy_size;
420 427
421 copy_size = min_t(u32, PAGE_SIZE, section->len - offset); 428 copy_size = min_t(u32, chunk_sz, section->len - offset);
422 429
423 memcpy(v_addr, (u8 *)section->data + offset, copy_size); 430 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
424 ret = iwl_pcie_load_firmware_chunk(trans, 431 ret = iwl_pcie_load_firmware_chunk(trans,
@@ -432,7 +439,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
432 } 439 }
433 } 440 }
434 441
435 dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr); 442 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
436 return ret; 443 return ret;
437} 444}
438 445
@@ -571,13 +578,17 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
571 clear_bit(STATUS_RFKILL, &trans_pcie->status); 578 clear_bit(STATUS_RFKILL, &trans_pcie->status);
572} 579}
573 580
574static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans) 581static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
575{ 582{
576 /* let the ucode operate on its own */
577 iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
578 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
579
580 iwl_disable_interrupts(trans); 583 iwl_disable_interrupts(trans);
584
585 /*
586 * in testing mode, the host stays awake and the
587 * hardware won't be reset (not even partially)
588 */
589 if (test)
590 return;
591
581 iwl_pcie_disable_ict(trans); 592 iwl_pcie_disable_ict(trans);
582 593
583 iwl_clear_bit(trans, CSR_GP_CNTRL, 594 iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -596,11 +607,18 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
596} 607}
597 608
598static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 609static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
599 enum iwl_d3_status *status) 610 enum iwl_d3_status *status,
611 bool test)
600{ 612{
601 u32 val; 613 u32 val;
602 int ret; 614 int ret;
603 615
616 if (test) {
617 iwl_enable_interrupts(trans);
618 *status = IWL_D3_STATUS_ALIVE;
619 return 0;
620 }
621
604 iwl_pcie_set_pwr(trans, false); 622 iwl_pcie_set_pwr(trans, false);
605 623
606 val = iwl_read32(trans, CSR_RESET); 624 val = iwl_read32(trans, CSR_RESET);
@@ -636,9 +654,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
636 return ret; 654 return ret;
637 } 655 }
638 656
639 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
640 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
641
642 *status = IWL_D3_STATUS_ALIVE; 657 *status = IWL_D3_STATUS_ALIVE;
643 return 0; 658 return 0;
644} 659}
@@ -823,8 +838,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
823 unsigned long *flags) 838 unsigned long *flags)
824{ 839{
825 int ret; 840 int ret;
826 struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans); 841 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
827 spin_lock_irqsave(&pcie_trans->reg_lock, *flags); 842
843 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
828 844
829 /* this bit wakes up the NIC */ 845 /* this bit wakes up the NIC */
830 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 846 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
@@ -860,7 +876,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
860 WARN_ONCE(1, 876 WARN_ONCE(1,
861 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 877 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
862 val); 878 val);
863 spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags); 879 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
864 return false; 880 return false;
865 } 881 }
866 } 882 }
@@ -869,22 +885,22 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
869 * Fool sparse by faking we release the lock - sparse will 885 * Fool sparse by faking we release the lock - sparse will
870 * track nic_access anyway. 886 * track nic_access anyway.
871 */ 887 */
872 __release(&pcie_trans->reg_lock); 888 __release(&trans_pcie->reg_lock);
873 return true; 889 return true;
874} 890}
875 891
876static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, 892static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
877 unsigned long *flags) 893 unsigned long *flags)
878{ 894{
879 struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans); 895 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
880 896
881 lockdep_assert_held(&pcie_trans->reg_lock); 897 lockdep_assert_held(&trans_pcie->reg_lock);
882 898
883 /* 899 /*
884 * Fool sparse by faking we acquiring the lock - sparse will 900 * Fool sparse by faking we acquiring the lock - sparse will
885 * track nic_access anyway. 901 * track nic_access anyway.
886 */ 902 */
887 __acquire(&pcie_trans->reg_lock); 903 __acquire(&trans_pcie->reg_lock);
888 904
889 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 905 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
890 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 906 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -895,7 +911,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
895 * scheduled on different CPUs (after we drop reg_lock). 911 * scheduled on different CPUs (after we drop reg_lock).
896 */ 912 */
897 mmiowb(); 913 mmiowb();
898 spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags); 914 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
899} 915}
900 916
901static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 917static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
@@ -917,11 +933,11 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
917} 933}
918 934
919static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 935static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
920 void *buf, int dwords) 936 const void *buf, int dwords)
921{ 937{
922 unsigned long flags; 938 unsigned long flags;
923 int offs, ret = 0; 939 int offs, ret = 0;
924 u32 *vals = buf; 940 const u32 *vals = buf;
925 941
926 if (iwl_trans_grab_nic_access(trans, false, &flags)) { 942 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
927 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 943 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index c5e30294c5ac..c47c92165aba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -224,13 +224,13 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
224 224
225 switch (sec_ctl & TX_CMD_SEC_MSK) { 225 switch (sec_ctl & TX_CMD_SEC_MSK) {
226 case TX_CMD_SEC_CCM: 226 case TX_CMD_SEC_CCM:
227 len += CCMP_MIC_LEN; 227 len += IEEE80211_CCMP_MIC_LEN;
228 break; 228 break;
229 case TX_CMD_SEC_TKIP: 229 case TX_CMD_SEC_TKIP:
230 len += TKIP_ICV_LEN; 230 len += IEEE80211_TKIP_ICV_LEN;
231 break; 231 break;
232 case TX_CMD_SEC_WEP: 232 case TX_CMD_SEC_WEP:
233 len += WEP_IV_LEN + WEP_ICV_LEN; 233 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
234 break; 234 break;
235 } 235 }
236 236
@@ -576,10 +576,16 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
576 576
577 spin_lock_bh(&txq->lock); 577 spin_lock_bh(&txq->lock);
578 while (q->write_ptr != q->read_ptr) { 578 while (q->write_ptr != q->read_ptr) {
579 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
580 txq_id, q->read_ptr);
579 iwl_pcie_txq_free_tfd(trans, txq); 581 iwl_pcie_txq_free_tfd(trans, txq);
580 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 582 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
581 } 583 }
584 txq->active = false;
582 spin_unlock_bh(&txq->lock); 585 spin_unlock_bh(&txq->lock);
586
587 /* just in case - this queue may have been stopped */
588 iwl_wake_queue(trans, txq);
583} 589}
584 590
585/* 591/*
@@ -927,6 +933,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
927 933
928 spin_lock_bh(&txq->lock); 934 spin_lock_bh(&txq->lock);
929 935
936 if (!txq->active) {
937 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
938 txq_id, ssn);
939 goto out;
940 }
941
930 if (txq->q.read_ptr == tfd_num) 942 if (txq->q.read_ptr == tfd_num)
931 goto out; 943 goto out;
932 944
@@ -1045,6 +1057,10 @@ static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
1045 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1057 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1046} 1058}
1047 1059
1060/* Receiver address (actually, Rx station's index into station table),
1061 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1062#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1063
1048void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, 1064void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1049 int sta_id, int tid, int frame_limit, u16 ssn) 1065 int sta_id, int tid, int frame_limit, u16 ssn)
1050{ 1066{
@@ -1069,6 +1085,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1069 1085
1070 /* enable aggregations for the queue */ 1086 /* enable aggregations for the queue */
1071 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 1087 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1088 trans_pcie->txq[txq_id].ampdu = true;
1072 } else { 1089 } else {
1073 /* 1090 /*
1074 * disable aggregations for the queue, this will also make the 1091 * disable aggregations for the queue, this will also make the
@@ -1103,6 +1120,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1103 (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1120 (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1104 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1121 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1105 SCD_QUEUE_STTS_REG_MSK); 1122 SCD_QUEUE_STTS_REG_MSK);
1123 trans_pcie->txq[txq_id].active = true;
1106 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", 1124 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
1107 txq_id, fifo, ssn & 0xff); 1125 txq_id, fifo, ssn & 0xff);
1108} 1126}
@@ -1125,6 +1143,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
1125 ARRAY_SIZE(zero_val)); 1143 ARRAY_SIZE(zero_val));
1126 1144
1127 iwl_pcie_txq_unmap(trans, txq_id); 1145 iwl_pcie_txq_unmap(trans, txq_id);
1146 trans_pcie->txq[txq_id].ampdu = false;
1128 1147
1129 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1148 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1130} 1149}
@@ -1518,11 +1537,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1518 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { 1537 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
1519 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1538 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1520 get_cmd_string(trans_pcie, cmd->id)); 1539 get_cmd_string(trans_pcie, cmd->id));
1540 dump_stack();
1521 ret = -EIO; 1541 ret = -EIO;
1522 goto cancel; 1542 goto cancel;
1523 } 1543 }
1524 1544
1525 if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { 1545 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1546 test_bit(STATUS_RFKILL, &trans_pcie->status)) {
1526 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1547 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1527 ret = -ERFKILL; 1548 ret = -ERFKILL;
1528 goto cancel; 1549 goto cancel;
@@ -1564,7 +1585,8 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1564 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) 1585 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
1565 return -EIO; 1586 return -EIO;
1566 1587
1567 if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { 1588 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1589 test_bit(STATUS_RFKILL, &trans_pcie->status)) {
1568 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1590 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1569 cmd->id); 1591 cmd->id);
1570 return -ERFKILL; 1592 return -ERFKILL;
@@ -1592,7 +1614,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1592 u8 wait_write_ptr = 0; 1614 u8 wait_write_ptr = 0;
1593 __le16 fc = hdr->frame_control; 1615 __le16 fc = hdr->frame_control;
1594 u8 hdr_len = ieee80211_hdrlen(fc); 1616 u8 hdr_len = ieee80211_hdrlen(fc);
1595 u16 __maybe_unused wifi_seq; 1617 u16 wifi_seq;
1596 1618
1597 txq = &trans_pcie->txq[txq_id]; 1619 txq = &trans_pcie->txq[txq_id];
1598 q = &txq->q; 1620 q = &txq->q;
@@ -1609,13 +1631,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1609 * the BA. 1631 * the BA.
1610 * Check here that the packets are in the right place on the ring. 1632 * Check here that the packets are in the right place on the ring.
1611 */ 1633 */
1612#ifdef CONFIG_IWLWIFI_DEBUG
1613 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1634 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1614 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && 1635 WARN_ONCE(trans_pcie->txq[txq_id].ampdu &&
1615 ((wifi_seq & 0xff) != q->write_ptr), 1636 (wifi_seq & 0xff) != q->write_ptr,
1616 "Q: %d WiFi Seq %d tfdNum %d", 1637 "Q: %d WiFi Seq %d tfdNum %d",
1617 txq_id, wifi_seq, q->write_ptr); 1638 txq_id, wifi_seq, q->write_ptr);
1618#endif
1619 1639
1620 /* Set up driver data for this TFD */ 1640 /* Set up driver data for this TFD */
1621 txq->entries[q->write_ptr].skb = skb; 1641 txq->entries[q->write_ptr].skb = skb;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 3e81264db81e..efae07e05c80 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -240,7 +240,7 @@ static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
240 memset(&mesh_access, 0, sizeof(mesh_access)); 240 memset(&mesh_access, 0, sizeof(mesh_access));
241 mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET); 241 mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET);
242 242
243 if (!strict_strtoul(buf, 10, &retry_limit)) 243 if (!kstrtoul(buf, 10, &retry_limit))
244 return -ENOTSUPP; 244 return -ENOTSUPP;
245 if (retry_limit > 15) 245 if (retry_limit > 15)
246 return -ENOTSUPP; 246 return -ENOTSUPP;
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
new file mode 100644
index 000000000000..8d683070bdb3
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -0,0 +1,101 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11h
3 *
4 * Copyright (C) 2013, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "main.h"
21#include "fw.h"
22
23
24/* This function appends 11h info to a buffer while joining an
25 * infrastructure BSS
26 */
27static void
28mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer,
29 struct mwifiex_bssdescriptor *bss_desc)
30{
31 struct mwifiex_ie_types_header *ie_header;
32 struct mwifiex_ie_types_pwr_capability *cap;
33 struct mwifiex_ie_types_local_pwr_constraint *constraint;
34 struct ieee80211_supported_band *sband;
35 u8 radio_type;
36 int i;
37
38 if (!buffer || !(*buffer))
39 return;
40
41 radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
42 sband = priv->wdev->wiphy->bands[radio_type];
43
44 cap = (struct mwifiex_ie_types_pwr_capability *)*buffer;
45 cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY);
46 cap->header.len = cpu_to_le16(2);
47 cap->min_pwr = 0;
48 cap->max_pwr = 0;
49 *buffer += sizeof(*cap);
50
51 constraint = (struct mwifiex_ie_types_local_pwr_constraint *)*buffer;
52 constraint->header.type = cpu_to_le16(WLAN_EID_PWR_CONSTRAINT);
53 constraint->header.len = cpu_to_le16(2);
54 constraint->chan = bss_desc->channel;
55 constraint->constraint = bss_desc->local_constraint;
56 *buffer += sizeof(*constraint);
57
58 ie_header = (struct mwifiex_ie_types_header *)*buffer;
59 ie_header->type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
60 ie_header->len = cpu_to_le16(2 * sband->n_channels + 2);
61 *buffer += sizeof(*ie_header);
62 *(*buffer)++ = WLAN_EID_SUPPORTED_CHANNELS;
63 *(*buffer)++ = 2 * sband->n_channels;
64 for (i = 0; i < sband->n_channels; i++) {
65 *(*buffer)++ = ieee80211_frequency_to_channel(
66 sband->channels[i].center_freq);
67 *(*buffer)++ = 1; /* one channel in the subband */
68 }
69}
70
71/* Enable or disable the 11h extensions in the firmware */
72static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
73{
74 u32 enable = flag;
75
76 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
77 HostCmd_ACT_GEN_SET, DOT11H_I, &enable);
78}
79
80/* This functions processes TLV buffer for a pending BSS Join command.
81 *
82 * Activate 11h functionality in the firmware if the spectrum management
83 * capability bit is found in the network we are joining. Also, necessary
84 * TLVs are set based on requested network's 11h capability.
85 */
86void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
87 struct mwifiex_bssdescriptor *bss_desc)
88{
89 if (bss_desc->sensed_11h) {
90 /* Activate 11h functions in firmware, turns on capability
91 * bit
92 */
93 mwifiex_11h_activate(priv, true);
94 bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT;
95 mwifiex_11h_process_infra_join(priv, buffer, bss_desc);
96 } else {
97 /* Deactivate 11h functions in the firmware */
98 mwifiex_11h_activate(priv, false);
99 bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT;
100 }
101}
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 4f614aad9ded..f7ff4725506a 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -3,13 +3,13 @@ config MWIFIEX
3 depends on CFG80211 3 depends on CFG80211
4 ---help--- 4 ---help---
5 This adds support for wireless adapters based on Marvell 5 This adds support for wireless adapters based on Marvell
6 802.11n chipsets. 6 802.11n/ac chipsets.
7 7
8 If you choose to build it as a module, it will be called 8 If you choose to build it as a module, it will be called
9 mwifiex. 9 mwifiex.
10 10
11config MWIFIEX_SDIO 11config MWIFIEX_SDIO
12 tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797" 12 tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8897"
13 depends on MWIFIEX && MMC 13 depends on MWIFIEX && MMC
14 select FW_LOADER 14 select FW_LOADER
15 ---help--- 15 ---help---
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index ecf28464367f..a42a506fd32b 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -40,6 +40,7 @@ mwifiex-y += sta_rx.o
40mwifiex-y += uap_txrx.o 40mwifiex-y += uap_txrx.o
41mwifiex-y += cfg80211.o 41mwifiex-y += cfg80211.o
42mwifiex-y += ethtool.o 42mwifiex-y += ethtool.o
43mwifiex-y += 11h.o
43mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o 44mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
44obj-$(CONFIG_MWIFIEX) += mwifiex.o 45obj-$(CONFIG_MWIFIEX) += mwifiex.o
45 46
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e42b266a023a..ef5fa890a286 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -20,6 +20,9 @@
20#include "cfg80211.h" 20#include "cfg80211.h"
21#include "main.h" 21#include "main.h"
22 22
23static char *reg_alpha2;
24module_param(reg_alpha2, charp, 0);
25
23static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { 26static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
24 { 27 {
25 .max = 2, .types = BIT(NL80211_IFTYPE_STATION), 28 .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
@@ -1231,6 +1234,51 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
1231 return 0; 1234 return 0;
1232} 1235}
1233 1236
1237/* cfg80211 operation handler for del_station.
1238 * Function deauthenticates station which value is provided in mac parameter.
1239 * If mac is NULL/broadcast, all stations in associated station list are
1240 * deauthenticated. If bss is not started or there are no stations in
1241 * associated stations list, no action is taken.
1242 */
1243static int
1244mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1245 u8 *mac)
1246{
1247 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1248 struct mwifiex_sta_node *sta_node;
1249 unsigned long flags;
1250
1251 if (list_empty(&priv->sta_list) || !priv->bss_started)
1252 return 0;
1253
1254 if (!mac || is_broadcast_ether_addr(mac)) {
1255 wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
1256 list_for_each_entry(sta_node, &priv->sta_list, list) {
1257 if (mwifiex_send_cmd_sync(priv,
1258 HostCmd_CMD_UAP_STA_DEAUTH,
1259 HostCmd_ACT_GEN_SET, 0,
1260 sta_node->mac_addr))
1261 return -1;
1262 mwifiex_uap_del_sta_data(priv, sta_node);
1263 }
1264 } else {
1265 wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, mac);
1266 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
1267 sta_node = mwifiex_get_sta_entry(priv, mac);
1268 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
1269 if (sta_node) {
1270 if (mwifiex_send_cmd_sync(priv,
1271 HostCmd_CMD_UAP_STA_DEAUTH,
1272 HostCmd_ACT_GEN_SET, 0,
1273 sta_node->mac_addr))
1274 return -1;
1275 mwifiex_uap_del_sta_data(priv, sta_node);
1276 }
1277 }
1278
1279 return 0;
1280}
1281
1234static int 1282static int
1235mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) 1283mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
1236{ 1284{
@@ -1859,6 +1907,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1859 int i, offset, ret; 1907 int i, offset, ret;
1860 struct ieee80211_channel *chan; 1908 struct ieee80211_channel *chan;
1861 struct ieee_types_header *ie; 1909 struct ieee_types_header *ie;
1910 struct mwifiex_user_scan_cfg *user_scan_cfg;
1862 1911
1863 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); 1912 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
1864 1913
@@ -1869,20 +1918,22 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1869 return -EBUSY; 1918 return -EBUSY;
1870 } 1919 }
1871 1920
1872 if (priv->user_scan_cfg) { 1921 /* Block scan request if scan operation or scan cleanup when interface
1922 * is disabled is in process
1923 */
1924 if (priv->scan_request || priv->scan_aborting) {
1873 dev_err(priv->adapter->dev, "cmd: Scan already in process..\n"); 1925 dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
1874 return -EBUSY; 1926 return -EBUSY;
1875 } 1927 }
1876 1928
1877 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), 1929 user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
1878 GFP_KERNEL); 1930 if (!user_scan_cfg)
1879 if (!priv->user_scan_cfg)
1880 return -ENOMEM; 1931 return -ENOMEM;
1881 1932
1882 priv->scan_request = request; 1933 priv->scan_request = request;
1883 1934
1884 priv->user_scan_cfg->num_ssids = request->n_ssids; 1935 user_scan_cfg->num_ssids = request->n_ssids;
1885 priv->user_scan_cfg->ssid_list = request->ssids; 1936 user_scan_cfg->ssid_list = request->ssids;
1886 1937
1887 if (request->ie && request->ie_len) { 1938 if (request->ie && request->ie_len) {
1888 offset = 0; 1939 offset = 0;
@@ -1902,25 +1953,25 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1902 for (i = 0; i < min_t(u32, request->n_channels, 1953 for (i = 0; i < min_t(u32, request->n_channels,
1903 MWIFIEX_USER_SCAN_CHAN_MAX); i++) { 1954 MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
1904 chan = request->channels[i]; 1955 chan = request->channels[i];
1905 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; 1956 user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
1906 priv->user_scan_cfg->chan_list[i].radio_type = chan->band; 1957 user_scan_cfg->chan_list[i].radio_type = chan->band;
1907 1958
1908 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) 1959 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
1909 priv->user_scan_cfg->chan_list[i].scan_type = 1960 user_scan_cfg->chan_list[i].scan_type =
1910 MWIFIEX_SCAN_TYPE_PASSIVE; 1961 MWIFIEX_SCAN_TYPE_PASSIVE;
1911 else 1962 else
1912 priv->user_scan_cfg->chan_list[i].scan_type = 1963 user_scan_cfg->chan_list[i].scan_type =
1913 MWIFIEX_SCAN_TYPE_ACTIVE; 1964 MWIFIEX_SCAN_TYPE_ACTIVE;
1914 1965
1915 priv->user_scan_cfg->chan_list[i].scan_time = 0; 1966 user_scan_cfg->chan_list[i].scan_time = 0;
1916 } 1967 }
1917 1968
1918 ret = mwifiex_scan_networks(priv, priv->user_scan_cfg); 1969 ret = mwifiex_scan_networks(priv, user_scan_cfg);
1970 kfree(user_scan_cfg);
1919 if (ret) { 1971 if (ret) {
1920 dev_err(priv->adapter->dev, "scan failed: %d\n", ret); 1972 dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
1973 priv->scan_aborting = false;
1921 priv->scan_request = NULL; 1974 priv->scan_request = NULL;
1922 kfree(priv->user_scan_cfg);
1923 priv->user_scan_cfg = NULL;
1924 return ret; 1975 return ret;
1925 } 1976 }
1926 1977
@@ -2419,6 +2470,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
2419 .change_beacon = mwifiex_cfg80211_change_beacon, 2470 .change_beacon = mwifiex_cfg80211_change_beacon,
2420 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, 2471 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
2421 .set_antenna = mwifiex_cfg80211_set_antenna, 2472 .set_antenna = mwifiex_cfg80211_set_antenna,
2473 .del_station = mwifiex_cfg80211_del_station,
2422#ifdef CONFIG_PM 2474#ifdef CONFIG_PM
2423 .suspend = mwifiex_cfg80211_suspend, 2475 .suspend = mwifiex_cfg80211_suspend,
2424 .resume = mwifiex_cfg80211_resume, 2476 .resume = mwifiex_cfg80211_resume,
@@ -2426,6 +2478,27 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
2426#endif 2478#endif
2427}; 2479};
2428 2480
2481#ifdef CONFIG_PM
2482static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
2483 .flags = WIPHY_WOWLAN_MAGIC_PKT,
2484 .n_patterns = MWIFIEX_MAX_FILTERS,
2485 .pattern_min_len = 1,
2486 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
2487 .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
2488};
2489#endif
2490
2491static bool mwifiex_is_valid_alpha2(const char *alpha2)
2492{
2493 if (!alpha2 || strlen(alpha2) != 2)
2494 return false;
2495
2496 if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
2497 return true;
2498
2499 return false;
2500}
2501
2429/* 2502/*
2430 * This function registers the device with CFG802.11 subsystem. 2503 * This function registers the device with CFG802.11 subsystem.
2431 * 2504 *
@@ -2478,16 +2551,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2478 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | 2551 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
2479 WIPHY_FLAG_AP_UAPSD | 2552 WIPHY_FLAG_AP_UAPSD |
2480 WIPHY_FLAG_CUSTOM_REGULATORY | 2553 WIPHY_FLAG_CUSTOM_REGULATORY |
2554 WIPHY_FLAG_STRICT_REGULATORY |
2481 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 2555 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
2482 2556
2483 wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom); 2557 wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
2484 2558
2485#ifdef CONFIG_PM 2559#ifdef CONFIG_PM
2486 wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT; 2560 wiphy->wowlan = &mwifiex_wowlan_support;
2487 wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS;
2488 wiphy->wowlan.pattern_min_len = 1;
2489 wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN;
2490 wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN;
2491#endif 2561#endif
2492 2562
2493 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 2563 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
@@ -2519,10 +2589,16 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2519 wiphy_free(wiphy); 2589 wiphy_free(wiphy);
2520 return ret; 2590 return ret;
2521 } 2591 }
2522 country_code = mwifiex_11d_code_2_region(priv->adapter->region_code); 2592
2523 if (country_code) 2593 if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
2524 dev_info(adapter->dev, 2594 wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2);
2525 "ignoring F/W country code %2.2s\n", country_code); 2595 regulatory_hint(wiphy, reg_alpha2);
2596 } else {
2597 country_code = mwifiex_11d_code_2_region(adapter->region_code);
2598 if (country_code)
2599 wiphy_info(wiphy, "ignoring F/W country code %2.2s\n",
2600 country_code);
2601 }
2526 2602
2527 adapter->wiphy = wiphy; 2603 adapter->wiphy = wiphy;
2528 return ret; 2604 return ret;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 26755d9acb55..2d761477d15e 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -570,6 +570,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
570 case HostCmd_CMD_UAP_SYS_CONFIG: 570 case HostCmd_CMD_UAP_SYS_CONFIG:
571 case HostCmd_CMD_UAP_BSS_START: 571 case HostCmd_CMD_UAP_BSS_START:
572 case HostCmd_CMD_UAP_BSS_STOP: 572 case HostCmd_CMD_UAP_BSS_STOP:
573 case HostCmd_CMD_UAP_STA_DEAUTH:
573 ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action, 574 ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
574 cmd_oid, data_buf, 575 cmd_oid, data_buf,
575 cmd_ptr); 576 cmd_ptr);
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 1f7578d553ec..1b45aa533300 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -245,6 +245,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
245#define HT_BW_20 0 245#define HT_BW_20 0
246#define HT_BW_40 1 246#define HT_BW_40 1
247 247
248#define DFS_CHAN_MOVE_TIME 10000
249
248#define HostCmd_CMD_GET_HW_SPEC 0x0003 250#define HostCmd_CMD_GET_HW_SPEC 0x0003
249#define HostCmd_CMD_802_11_SCAN 0x0006 251#define HostCmd_CMD_802_11_SCAN 0x0006
250#define HostCmd_CMD_802_11_GET_LOG 0x000b 252#define HostCmd_CMD_802_11_GET_LOG 0x000b
@@ -271,6 +273,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
271#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 273#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075
272#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f 274#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
273#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 275#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
276#define HostCmd_CMD_CFG_DATA 0x008f
274#define HostCmd_CMD_VERSION_EXT 0x0097 277#define HostCmd_CMD_VERSION_EXT 0x0097
275#define HostCmd_CMD_MEF_CFG 0x009a 278#define HostCmd_CMD_MEF_CFG 0x009a
276#define HostCmd_CMD_RSSI_INFO 0x00a4 279#define HostCmd_CMD_RSSI_INFO 0x00a4
@@ -279,6 +282,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
279#define HostCmd_CMD_UAP_SYS_CONFIG 0x00b0 282#define HostCmd_CMD_UAP_SYS_CONFIG 0x00b0
280#define HostCmd_CMD_UAP_BSS_START 0x00b1 283#define HostCmd_CMD_UAP_BSS_START 0x00b1
281#define HostCmd_CMD_UAP_BSS_STOP 0x00b2 284#define HostCmd_CMD_UAP_BSS_STOP 0x00b2
285#define HostCmd_CMD_UAP_STA_DEAUTH 0x00b5
282#define HostCmd_CMD_11N_CFG 0x00cd 286#define HostCmd_CMD_11N_CFG 0x00cd
283#define HostCmd_CMD_11N_ADDBA_REQ 0x00ce 287#define HostCmd_CMD_11N_ADDBA_REQ 0x00ce
284#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf 288#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf
@@ -436,6 +440,7 @@ enum P2P_MODES {
436#define EVENT_BW_CHANGE 0x00000048 440#define EVENT_BW_CHANGE 0x00000048
437#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c 441#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
438#define EVENT_HOSTWAKE_STAIE 0x0000004d 442#define EVENT_HOSTWAKE_STAIE 0x0000004d
443#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
439#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f 444#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
440 445
441#define EVENT_ID_MASK 0xffff 446#define EVENT_ID_MASK 0xffff
@@ -464,6 +469,8 @@ enum P2P_MODES {
464#define MWIFIEX_CRITERIA_UNICAST BIT(1) 469#define MWIFIEX_CRITERIA_UNICAST BIT(1)
465#define MWIFIEX_CRITERIA_MULTICAST BIT(3) 470#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
466 471
472#define CFG_DATA_TYPE_CAL 2
473
467struct mwifiex_ie_types_header { 474struct mwifiex_ie_types_header {
468 __le16 type; 475 __le16 type;
469 __le16 len; 476 __le16 len;
@@ -971,6 +978,7 @@ enum SNMP_MIB_INDEX {
971 LONG_RETRY_LIM_I = 7, 978 LONG_RETRY_LIM_I = 7,
972 FRAG_THRESH_I = 8, 979 FRAG_THRESH_I = 8,
973 DOT11D_I = 9, 980 DOT11D_I = 9,
981 DOT11H_I = 10,
974}; 982};
975 983
976#define MAX_SNMP_BUF_SIZE 128 984#define MAX_SNMP_BUF_SIZE 128
@@ -1197,6 +1205,23 @@ struct host_cmd_ds_amsdu_aggr_ctrl {
1197 __le16 curr_buf_size; 1205 __le16 curr_buf_size;
1198} __packed; 1206} __packed;
1199 1207
1208struct host_cmd_ds_sta_deauth {
1209 u8 mac[ETH_ALEN];
1210 __le16 reason;
1211} __packed;
1212
1213struct mwifiex_ie_types_pwr_capability {
1214 struct mwifiex_ie_types_header header;
1215 s8 min_pwr;
1216 s8 max_pwr;
1217};
1218
1219struct mwifiex_ie_types_local_pwr_constraint {
1220 struct mwifiex_ie_types_header header;
1221 u8 chan;
1222 u8 constraint;
1223};
1224
1200struct mwifiex_ie_types_wmm_param_set { 1225struct mwifiex_ie_types_wmm_param_set {
1201 struct mwifiex_ie_types_header header; 1226 struct mwifiex_ie_types_header header;
1202 u8 wmm_ie[1]; 1227 u8 wmm_ie[1];
@@ -1573,6 +1598,12 @@ struct mwifiex_ie_list {
1573 struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX]; 1598 struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX];
1574} __packed; 1599} __packed;
1575 1600
1601struct host_cmd_ds_802_11_cfg_data {
1602 __le16 action;
1603 __le16 type;
1604 __le16 data_len;
1605} __packed;
1606
1576struct host_cmd_ds_command { 1607struct host_cmd_ds_command {
1577 __le16 command; 1608 __le16 command;
1578 __le16 size; 1609 __le16 size;
@@ -1630,7 +1661,9 @@ struct host_cmd_ds_command {
1630 struct host_cmd_ds_802_11_eeprom_access eeprom; 1661 struct host_cmd_ds_802_11_eeprom_access eeprom;
1631 struct host_cmd_ds_802_11_subsc_evt subsc_evt; 1662 struct host_cmd_ds_802_11_subsc_evt subsc_evt;
1632 struct host_cmd_ds_sys_config uap_sys_config; 1663 struct host_cmd_ds_sys_config uap_sys_config;
1664 struct host_cmd_ds_sta_deauth sta_deauth;
1633 struct host_cmd_11ac_vht_cfg vht_cfg; 1665 struct host_cmd_11ac_vht_cfg vht_cfg;
1666 struct host_cmd_ds_802_11_cfg_data cfg_data;
1634 } params; 1667 } params;
1635} __packed; 1668} __packed;
1636 1669
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 9f44fda19db9..caaf4bd56b30 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -52,87 +52,6 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
52 return 0; 52 return 0;
53} 53}
54 54
55static void scan_delay_timer_fn(unsigned long data)
56{
57 struct mwifiex_private *priv = (struct mwifiex_private *)data;
58 struct mwifiex_adapter *adapter = priv->adapter;
59 struct cmd_ctrl_node *cmd_node, *tmp_node;
60 unsigned long flags;
61
62 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
63 /*
64 * Abort scan operation by cancelling all pending scan
65 * commands
66 */
67 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
68 list_for_each_entry_safe(cmd_node, tmp_node,
69 &adapter->scan_pending_q, list) {
70 list_del(&cmd_node->list);
71 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
72 }
73 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
74
75 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
76 adapter->scan_processing = false;
77 adapter->scan_delay_cnt = 0;
78 adapter->empty_tx_q_cnt = 0;
79 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
80
81 if (priv->user_scan_cfg) {
82 if (priv->scan_request) {
83 dev_dbg(priv->adapter->dev,
84 "info: aborting scan\n");
85 cfg80211_scan_done(priv->scan_request, 1);
86 priv->scan_request = NULL;
87 } else {
88 dev_dbg(priv->adapter->dev,
89 "info: scan already aborted\n");
90 }
91
92 kfree(priv->user_scan_cfg);
93 priv->user_scan_cfg = NULL;
94 }
95 goto done;
96 }
97
98 if (!atomic_read(&priv->adapter->is_tx_received)) {
99 adapter->empty_tx_q_cnt++;
100 if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
101 /*
102 * No Tx traffic for 200msec. Get scan command from
103 * scan pending queue and put to cmd pending queue to
104 * resume scan operation
105 */
106 adapter->scan_delay_cnt = 0;
107 adapter->empty_tx_q_cnt = 0;
108 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
109 cmd_node = list_first_entry(&adapter->scan_pending_q,
110 struct cmd_ctrl_node, list);
111 list_del(&cmd_node->list);
112 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
113 flags);
114
115 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
116 true);
117 queue_work(adapter->workqueue, &adapter->main_work);
118 goto done;
119 }
120 } else {
121 adapter->empty_tx_q_cnt = 0;
122 }
123
124 /* Delay scan operation further by 20msec */
125 mod_timer(&priv->scan_delay_timer, jiffies +
126 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
127 adapter->scan_delay_cnt++;
128
129done:
130 if (atomic_read(&priv->adapter->is_tx_received))
131 atomic_set(&priv->adapter->is_tx_received, false);
132
133 return;
134}
135
136/* 55/*
137 * This function initializes the private structure and sets default 56 * This function initializes the private structure and sets default
138 * values to the members. 57 * values to the members.
@@ -214,8 +133,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
214 133
215 priv->scan_block = false; 134 priv->scan_block = false;
216 135
217 setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn, 136 priv->csa_chan = 0;
218 (unsigned long)priv); 137 priv->csa_expire_time = 0;
219 138
220 return mwifiex_add_bss_prio_tbl(priv); 139 return mwifiex_add_bss_prio_tbl(priv);
221} 140}
@@ -447,23 +366,29 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
447} 366}
448 367
449/* 368/*
450 * This function frees the adapter structure. 369 * This function performs cleanup for adapter structure.
451 * 370 *
452 * The freeing operation is done recursively, by canceling all 371 * The cleanup is done recursively, by canceling all pending
453 * pending commands, freeing the member buffers previously 372 * commands, freeing the member buffers previously allocated
454 * allocated (command buffers, scan table buffer, sleep confirm 373 * (command buffers, scan table buffer, sleep confirm command
455 * command buffer), stopping the timers and calling the cleanup 374 * buffer), stopping the timers and calling the cleanup routines
456 * routines for every interface, before the actual adapter 375 * for every interface.
457 * structure is freed.
458 */ 376 */
459static void 377static void
460mwifiex_free_adapter(struct mwifiex_adapter *adapter) 378mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
461{ 379{
380 int i;
381
462 if (!adapter) { 382 if (!adapter) {
463 pr_err("%s: adapter is NULL\n", __func__); 383 pr_err("%s: adapter is NULL\n", __func__);
464 return; 384 return;
465 } 385 }
466 386
387 for (i = 0; i < adapter->priv_num; i++) {
388 if (adapter->priv[i])
389 del_timer_sync(&adapter->priv[i]->scan_delay_timer);
390 }
391
467 mwifiex_cancel_all_pending_cmd(adapter); 392 mwifiex_cancel_all_pending_cmd(adapter);
468 393
469 /* Free lock variables */ 394 /* Free lock variables */
@@ -684,7 +609,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
684 int ret = -EINPROGRESS; 609 int ret = -EINPROGRESS;
685 struct mwifiex_private *priv; 610 struct mwifiex_private *priv;
686 s32 i; 611 s32 i;
687 unsigned long flags;
688 struct sk_buff *skb; 612 struct sk_buff *skb;
689 613
690 /* mwifiex already shutdown */ 614 /* mwifiex already shutdown */
@@ -719,7 +643,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
719 } 643 }
720 } 644 }
721 645
722 spin_lock_irqsave(&adapter->mwifiex_lock, flags); 646 spin_lock(&adapter->mwifiex_lock);
723 647
724 if (adapter->if_ops.data_complete) { 648 if (adapter->if_ops.data_complete) {
725 while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) { 649 while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) {
@@ -733,10 +657,9 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
733 } 657 }
734 } 658 }
735 659
736 /* Free adapter structure */ 660 mwifiex_adapter_cleanup(adapter);
737 mwifiex_free_adapter(adapter);
738 661
739 spin_unlock_irqrestore(&adapter->mwifiex_lock, flags); 662 spin_unlock(&adapter->mwifiex_lock);
740 663
741 /* Notify completion */ 664 /* Notify completion */
742 ret = mwifiex_shutdown_fw_complete(adapter); 665 ret = mwifiex_shutdown_fw_complete(adapter);
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 6bcb66e6e97c..1c8a771e8e81 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -534,6 +534,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
534 534
535 mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc); 535 mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc);
536 536
537 mwifiex_11h_process_join(priv, &pos, bss_desc);
538
537 cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN); 539 cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN);
538 540
539 /* Set the Capability info at last */ 541 /* Set the Capability info at last */
@@ -919,9 +921,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
919 memcpy(&priv->curr_bss_params.data_rates, 921 memcpy(&priv->curr_bss_params.data_rates,
920 &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates); 922 &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
921 923
922 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n", 924 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n",
923 adhoc_start->data_rate[0], adhoc_start->data_rate[1], 925 adhoc_start->data_rate);
924 adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
925 926
926 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); 927 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
927 928
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 2eb88ea9acf7..e15ab72fb03d 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -25,6 +25,86 @@
25#define VERSION "1.0" 25#define VERSION "1.0"
26 26
27const char driver_version[] = "mwifiex " VERSION " (%s) "; 27const char driver_version[] = "mwifiex " VERSION " (%s) ";
28static char *cal_data_cfg;
29module_param(cal_data_cfg, charp, 0);
30
31static void scan_delay_timer_fn(unsigned long data)
32{
33 struct mwifiex_private *priv = (struct mwifiex_private *)data;
34 struct mwifiex_adapter *adapter = priv->adapter;
35 struct cmd_ctrl_node *cmd_node, *tmp_node;
36 unsigned long flags;
37
38 if (adapter->surprise_removed)
39 return;
40
41 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
42 /*
43 * Abort scan operation by cancelling all pending scan
44 * commands
45 */
46 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
47 list_for_each_entry_safe(cmd_node, tmp_node,
48 &adapter->scan_pending_q, list) {
49 list_del(&cmd_node->list);
50 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
51 }
52 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
53
54 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
55 adapter->scan_processing = false;
56 adapter->scan_delay_cnt = 0;
57 adapter->empty_tx_q_cnt = 0;
58 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
59
60 if (priv->scan_request) {
61 dev_dbg(adapter->dev, "info: aborting scan\n");
62 cfg80211_scan_done(priv->scan_request, 1);
63 priv->scan_request = NULL;
64 } else {
65 priv->scan_aborting = false;
66 dev_dbg(adapter->dev, "info: scan already aborted\n");
67 }
68 goto done;
69 }
70
71 if (!atomic_read(&priv->adapter->is_tx_received)) {
72 adapter->empty_tx_q_cnt++;
73 if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
74 /*
75 * No Tx traffic for 200msec. Get scan command from
76 * scan pending queue and put to cmd pending queue to
77 * resume scan operation
78 */
79 adapter->scan_delay_cnt = 0;
80 adapter->empty_tx_q_cnt = 0;
81 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
82 cmd_node = list_first_entry(&adapter->scan_pending_q,
83 struct cmd_ctrl_node, list);
84 list_del(&cmd_node->list);
85 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
86 flags);
87
88 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
89 true);
90 queue_work(adapter->workqueue, &adapter->main_work);
91 goto done;
92 }
93 } else {
94 adapter->empty_tx_q_cnt = 0;
95 }
96
97 /* Delay scan operation further by 20msec */
98 mod_timer(&priv->scan_delay_timer, jiffies +
99 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
100 adapter->scan_delay_cnt++;
101
102done:
103 if (atomic_read(&priv->adapter->is_tx_received))
104 atomic_set(&priv->adapter->is_tx_received, false);
105
106 return;
107}
28 108
29/* 109/*
30 * This function registers the device and performs all the necessary 110 * This function registers the device and performs all the necessary
@@ -73,6 +153,10 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
73 153
74 adapter->priv[i]->adapter = adapter; 154 adapter->priv[i]->adapter = adapter;
75 adapter->priv_num++; 155 adapter->priv_num++;
156
157 setup_timer(&adapter->priv[i]->scan_delay_timer,
158 scan_delay_timer_fn,
159 (unsigned long)adapter->priv[i]);
76 } 160 }
77 mwifiex_init_lock_list(adapter); 161 mwifiex_init_lock_list(adapter);
78 162
@@ -336,6 +420,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
336 420
337 dev_notice(adapter->dev, "WLAN FW is active\n"); 421 dev_notice(adapter->dev, "WLAN FW is active\n");
338 422
423 if (cal_data_cfg) {
424 if ((request_firmware(&adapter->cal_data, cal_data_cfg,
425 adapter->dev)) < 0)
426 dev_err(adapter->dev,
427 "Cal data request_firmware() failed\n");
428 }
429
339 adapter->init_wait_q_woken = false; 430 adapter->init_wait_q_woken = false;
340 ret = mwifiex_init_fw(adapter); 431 ret = mwifiex_init_fw(adapter);
341 if (ret == -1) { 432 if (ret == -1) {
@@ -390,6 +481,10 @@ err_init_fw:
390 pr_debug("info: %s: unregister device\n", __func__); 481 pr_debug("info: %s: unregister device\n", __func__);
391 adapter->if_ops.unregister_dev(adapter); 482 adapter->if_ops.unregister_dev(adapter);
392done: 483done:
484 if (adapter->cal_data) {
485 release_firmware(adapter->cal_data);
486 adapter->cal_data = NULL;
487 }
393 release_firmware(adapter->firmware); 488 release_firmware(adapter->firmware);
394 complete(&adapter->fw_load); 489 complete(&adapter->fw_load);
395 return; 490 return;
@@ -436,6 +531,7 @@ mwifiex_close(struct net_device *dev)
436 dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n"); 531 dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
437 cfg80211_scan_done(priv->scan_request, 1); 532 cfg80211_scan_done(priv->scan_request, 1);
438 priv->scan_request = NULL; 533 priv->scan_request = NULL;
534 priv->scan_aborting = true;
439 } 535 }
440 536
441 return 0; 537 return 0;
@@ -573,9 +669,8 @@ static void mwifiex_set_multicast_list(struct net_device *dev)
573 mcast_list.mode = MWIFIEX_ALL_MULTI_MODE; 669 mcast_list.mode = MWIFIEX_ALL_MULTI_MODE;
574 } else { 670 } else {
575 mcast_list.mode = MWIFIEX_MULTICAST_MODE; 671 mcast_list.mode = MWIFIEX_MULTICAST_MODE;
576 if (netdev_mc_count(dev)) 672 mcast_list.num_multicast_addr =
577 mcast_list.num_multicast_addr = 673 mwifiex_copy_mcast_addr(&mcast_list, dev);
578 mwifiex_copy_mcast_addr(&mcast_list, dev);
579 } 674 }
580 mwifiex_request_set_multicast_list(priv, &mcast_list); 675 mwifiex_request_set_multicast_list(priv, &mcast_list);
581} 676}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 4ef67fca06d3..3da73d36acdf 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -309,6 +309,9 @@ struct mwifiex_bssdescriptor {
309 u16 wapi_offset; 309 u16 wapi_offset;
310 u8 *beacon_buf; 310 u8 *beacon_buf;
311 u32 beacon_buf_size; 311 u32 beacon_buf_size;
312 u8 sensed_11h;
313 u8 local_constraint;
314 u8 chan_sw_ie_present;
312}; 315};
313 316
314struct mwifiex_current_bss_params { 317struct mwifiex_current_bss_params {
@@ -492,7 +495,6 @@ struct mwifiex_private {
492 struct semaphore async_sem; 495 struct semaphore async_sem;
493 u8 report_scan_result; 496 u8 report_scan_result;
494 struct cfg80211_scan_request *scan_request; 497 struct cfg80211_scan_request *scan_request;
495 struct mwifiex_user_scan_cfg *user_scan_cfg;
496 u8 cfg_bssid[6]; 498 u8 cfg_bssid[6];
497 struct wps wps; 499 struct wps wps;
498 u8 scan_block; 500 u8 scan_block;
@@ -510,6 +512,9 @@ struct mwifiex_private {
510 u8 ap_11ac_enabled; 512 u8 ap_11ac_enabled;
511 u32 mgmt_frame_mask; 513 u32 mgmt_frame_mask;
512 struct mwifiex_roc_cfg roc_cfg; 514 struct mwifiex_roc_cfg roc_cfg;
515 bool scan_aborting;
516 u8 csa_chan;
517 unsigned long csa_expire_time;
513}; 518};
514 519
515enum mwifiex_ba_status { 520enum mwifiex_ba_status {
@@ -730,6 +735,7 @@ struct mwifiex_adapter {
730 u16 max_mgmt_ie_index; 735 u16 max_mgmt_ie_index;
731 u8 scan_delay_cnt; 736 u8 scan_delay_cnt;
732 u8 empty_tx_q_cnt; 737 u8 empty_tx_q_cnt;
738 const struct firmware *cal_data;
733 739
734 /* 11AC */ 740 /* 11AC */
735 u32 is_hw_11ac_capable; 741 u32 is_hw_11ac_capable;
@@ -1017,6 +1023,24 @@ static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
1017 return (*(u32 *)skb->data == PKT_TYPE_MGMT); 1023 return (*(u32 *)skb->data == PKT_TYPE_MGMT);
1018} 1024}
1019 1025
1026/* This function retrieves channel closed for operation by Channel
1027 * Switch Announcement.
1028 */
1029static inline u8
1030mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv)
1031{
1032 if (!priv->csa_chan)
1033 return 0;
1034
1035 /* Clear csa channel, if DFS channel move time has passed */
1036 if (jiffies > priv->csa_expire_time) {
1037 priv->csa_chan = 0;
1038 priv->csa_expire_time = 0;
1039 }
1040
1041 return priv->csa_chan;
1042}
1043
1020int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, 1044int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
1021 u32 func_init_shutdown); 1045 u32 func_init_shutdown);
1022int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); 1046int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1115,6 +1139,12 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
1115 struct cfg80211_beacon_data *data); 1139 struct cfg80211_beacon_data *data);
1116int mwifiex_del_mgmt_ies(struct mwifiex_private *priv); 1140int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
1117u8 *mwifiex_11d_code_2_region(u8 code); 1141u8 *mwifiex_11d_code_2_region(u8 code);
1142void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
1143 struct mwifiex_sta_node *node);
1144
1145void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
1146 struct mwifiex_bssdescriptor *bss_desc);
1147int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
1118 1148
1119extern const struct ethtool_ops mwifiex_ethtool_ops; 1149extern const struct ethtool_ops mwifiex_ethtool_ops;
1120 1150
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 9cf5d8f07df8..c447d9bd1aa9 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -391,6 +391,12 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
391 return 0; 391 return 0;
392 } 392 }
393 393
394 if (bss_desc->chan_sw_ie_present) {
395 dev_err(adapter->dev,
396 "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
397 return -1;
398 }
399
394 if (mwifiex_is_bss_wapi(priv, bss_desc)) { 400 if (mwifiex_is_bss_wapi(priv, bss_desc)) {
395 dev_dbg(adapter->dev, "info: return success for WAPI AP\n"); 401 dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
396 return 0; 402 return 0;
@@ -569,6 +575,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
569 return -1; 575 return -1;
570 } 576 }
571 577
578 /* Check csa channel expiry before preparing scan list */
579 mwifiex_11h_get_csa_closed_channel(priv);
580
572 chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); 581 chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
573 582
574 /* Set the temp channel struct pointer to the start of the desired 583 /* Set the temp channel struct pointer to the start of the desired
@@ -598,6 +607,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
598 while (tlv_idx < max_chan_per_scan && 607 while (tlv_idx < max_chan_per_scan &&
599 tmp_chan_list->chan_number && !done_early) { 608 tmp_chan_list->chan_number && !done_early) {
600 609
610 if (tmp_chan_list->chan_number == priv->csa_chan) {
611 tmp_chan_list++;
612 continue;
613 }
614
601 dev_dbg(priv->adapter->dev, 615 dev_dbg(priv->adapter->dev,
602 "info: Scan: Chan(%3d), Radio(%d)," 616 "info: Scan: Chan(%3d), Radio(%d),"
603 " Mode(%d, %d), Dur(%d)\n", 617 " Mode(%d, %d), Dur(%d)\n",
@@ -1169,6 +1183,19 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1169 bss_entry->erp_flags = *(current_ptr + 2); 1183 bss_entry->erp_flags = *(current_ptr + 2);
1170 break; 1184 break;
1171 1185
1186 case WLAN_EID_PWR_CONSTRAINT:
1187 bss_entry->local_constraint = *(current_ptr + 2);
1188 bss_entry->sensed_11h = true;
1189 break;
1190
1191 case WLAN_EID_CHANNEL_SWITCH:
1192 bss_entry->chan_sw_ie_present = true;
1193 case WLAN_EID_PWR_CAPABILITY:
1194 case WLAN_EID_TPC_REPORT:
1195 case WLAN_EID_QUIET:
1196 bss_entry->sensed_11h = true;
1197 break;
1198
1172 case WLAN_EID_EXT_SUPP_RATES: 1199 case WLAN_EID_EXT_SUPP_RATES:
1173 /* 1200 /*
1174 * Only process extended supported rate 1201 * Only process extended supported rate
@@ -1575,6 +1602,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1575 goto check_next_scan; 1602 goto check_next_scan;
1576 } 1603 }
1577 1604
1605 /* Check csa channel expiry before parsing scan response */
1606 mwifiex_11h_get_csa_closed_channel(priv);
1607
1578 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); 1608 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
1579 dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n", 1609 dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
1580 bytes_left); 1610 bytes_left);
@@ -1727,6 +1757,13 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1727 struct ieee80211_channel *chan; 1757 struct ieee80211_channel *chan;
1728 u8 band; 1758 u8 band;
1729 1759
1760 /* Skip entry if on csa closed channel */
1761 if (channel == priv->csa_chan) {
1762 dev_dbg(adapter->dev,
1763 "Dropping entry on csa closed channel\n");
1764 continue;
1765 }
1766
1730 band = BAND_G; 1767 band = BAND_G;
1731 if (chan_band_tlv) { 1768 if (chan_band_tlv) {
1732 chan_band = 1769 chan_band =
@@ -1784,22 +1821,17 @@ check_next_scan:
1784 if (priv->report_scan_result) 1821 if (priv->report_scan_result)
1785 priv->report_scan_result = false; 1822 priv->report_scan_result = false;
1786 1823
1787 if (priv->user_scan_cfg) { 1824 if (priv->scan_request) {
1788 if (priv->scan_request) { 1825 dev_dbg(adapter->dev, "info: notifying scan done\n");
1789 dev_dbg(priv->adapter->dev, 1826 cfg80211_scan_done(priv->scan_request, 0);
1790 "info: notifying scan done\n"); 1827 priv->scan_request = NULL;
1791 cfg80211_scan_done(priv->scan_request, 0); 1828 } else {
1792 priv->scan_request = NULL; 1829 priv->scan_aborting = false;
1793 } else { 1830 dev_dbg(adapter->dev, "info: scan already aborted\n");
1794 dev_dbg(priv->adapter->dev,
1795 "info: scan already aborted\n");
1796 }
1797
1798 kfree(priv->user_scan_cfg);
1799 priv->user_scan_cfg = NULL;
1800 } 1831 }
1801 } else { 1832 } else {
1802 if (priv->user_scan_cfg && !priv->scan_request) { 1833 if ((priv->scan_aborting && !priv->scan_request) ||
1834 priv->scan_block) {
1803 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1835 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1804 flags); 1836 flags);
1805 adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT; 1837 adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 363ba31b58bf..5ee5ed02eccd 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -77,6 +77,17 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
77 77
78 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; 78 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
79 79
80 if (id->driver_data) {
81 struct mwifiex_sdio_device *data = (void *)id->driver_data;
82
83 card->firmware = data->firmware;
84 card->reg = data->reg;
85 card->max_ports = data->max_ports;
86 card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
87 card->supports_sdio_new_mode = data->supports_sdio_new_mode;
88 card->has_control_mask = data->has_control_mask;
89 }
90
80 sdio_claim_host(func); 91 sdio_claim_host(func);
81 ret = sdio_enable_func(func); 92 ret = sdio_enable_func(func);
82 sdio_release_host(func); 93 sdio_release_host(func);
@@ -251,12 +262,19 @@ static int mwifiex_sdio_resume(struct device *dev)
251#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119) 262#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
252/* Device ID for SD8797 */ 263/* Device ID for SD8797 */
253#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129) 264#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
265/* Device ID for SD8897 */
266#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d)
254 267
255/* WLAN IDs */ 268/* WLAN IDs */
256static const struct sdio_device_id mwifiex_ids[] = { 269static const struct sdio_device_id mwifiex_ids[] = {
257 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786)}, 270 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786),
258 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)}, 271 .driver_data = (unsigned long) &mwifiex_sdio_sd8786},
259 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)}, 272 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787),
273 .driver_data = (unsigned long) &mwifiex_sdio_sd8787},
274 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797),
275 .driver_data = (unsigned long) &mwifiex_sdio_sd8797},
276 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897),
277 .driver_data = (unsigned long) &mwifiex_sdio_sd8897},
260 {}, 278 {},
261}; 279};
262 280
@@ -282,13 +300,13 @@ static struct sdio_driver mwifiex_sdio = {
282 * This function writes data into SDIO card register. 300 * This function writes data into SDIO card register.
283 */ 301 */
284static int 302static int
285mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data) 303mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
286{ 304{
287 struct sdio_mmc_card *card = adapter->card; 305 struct sdio_mmc_card *card = adapter->card;
288 int ret = -1; 306 int ret = -1;
289 307
290 sdio_claim_host(card->func); 308 sdio_claim_host(card->func);
291 sdio_writeb(card->func, (u8) data, reg, &ret); 309 sdio_writeb(card->func, data, reg, &ret);
292 sdio_release_host(card->func); 310 sdio_release_host(card->func);
293 311
294 return ret; 312 return ret;
@@ -298,7 +316,7 @@ mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data)
298 * This function reads data from SDIO card register. 316 * This function reads data from SDIO card register.
299 */ 317 */
300static int 318static int
301mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u32 *data) 319mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data)
302{ 320{
303 struct sdio_mmc_card *card = adapter->card; 321 struct sdio_mmc_card *card = adapter->card;
304 int ret = -1; 322 int ret = -1;
@@ -400,7 +418,40 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
400} 418}
401 419
402/* 420/*
403 * This function initializes the IO ports. 421 * This function is used to initialize IO ports for the
422 * chipsets supporting SDIO new mode eg SD8897.
423 */
424static int mwifiex_init_sdio_new_mode(struct mwifiex_adapter *adapter)
425{
426 u8 reg;
427
428 adapter->ioport = MEM_PORT;
429
430 /* enable sdio new mode */
431 if (mwifiex_read_reg(adapter, CARD_CONFIG_2_1_REG, &reg))
432 return -1;
433 if (mwifiex_write_reg(adapter, CARD_CONFIG_2_1_REG,
434 reg | CMD53_NEW_MODE))
435 return -1;
436
437 /* Configure cmd port and enable reading rx length from the register */
438 if (mwifiex_read_reg(adapter, CMD_CONFIG_0, &reg))
439 return -1;
440 if (mwifiex_write_reg(adapter, CMD_CONFIG_0, reg | CMD_PORT_RD_LEN_EN))
441 return -1;
442
443 /* Enable Dnld/Upld ready auto reset for cmd port after cmd53 is
444 * completed
445 */
446 if (mwifiex_read_reg(adapter, CMD_CONFIG_1, &reg))
447 return -1;
448 if (mwifiex_write_reg(adapter, CMD_CONFIG_1, reg | CMD_PORT_AUTO_EN))
449 return -1;
450
451 return 0;
452}
453
454/* This function initializes the IO ports.
404 * 455 *
405 * The following operations are performed - 456 * The following operations are performed -
406 * - Read the IO ports (0, 1 and 2) 457 * - Read the IO ports (0, 1 and 2)
@@ -409,10 +460,17 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
409 */ 460 */
410static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) 461static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
411{ 462{
412 u32 reg; 463 u8 reg;
464 struct sdio_mmc_card *card = adapter->card;
413 465
414 adapter->ioport = 0; 466 adapter->ioport = 0;
415 467
468 if (card->supports_sdio_new_mode) {
469 if (mwifiex_init_sdio_new_mode(adapter))
470 return -1;
471 goto cont;
472 }
473
416 /* Read the IO port */ 474 /* Read the IO port */
417 if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, &reg)) 475 if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, &reg))
418 adapter->ioport |= (reg & 0xff); 476 adapter->ioport |= (reg & 0xff);
@@ -428,19 +486,19 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
428 adapter->ioport |= ((reg & 0xff) << 16); 486 adapter->ioport |= ((reg & 0xff) << 16);
429 else 487 else
430 return -1; 488 return -1;
431 489cont:
432 pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); 490 pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
433 491
434 /* Set Host interrupt reset to read to clear */ 492 /* Set Host interrupt reset to read to clear */
435 if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, &reg)) 493 if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, &reg))
436 mwifiex_write_reg(adapter, HOST_INT_RSR_REG, 494 mwifiex_write_reg(adapter, HOST_INT_RSR_REG,
437 reg | SDIO_INT_MASK); 495 reg | card->reg->sdio_int_mask);
438 else 496 else
439 return -1; 497 return -1;
440 498
441 /* Dnld/Upld ready set to auto reset */ 499 /* Dnld/Upld ready set to auto reset */
442 if (!mwifiex_read_reg(adapter, CARD_MISC_CFG_REG, &reg)) 500 if (!mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, &reg))
443 mwifiex_write_reg(adapter, CARD_MISC_CFG_REG, 501 mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg,
444 reg | AUTO_RE_ENABLE_INT); 502 reg | AUTO_RE_ENABLE_INT);
445 else 503 else
446 return -1; 504 return -1;
@@ -486,34 +544,42 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
486static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) 544static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
487{ 545{
488 struct sdio_mmc_card *card = adapter->card; 546 struct sdio_mmc_card *card = adapter->card;
489 u16 rd_bitmap = card->mp_rd_bitmap; 547 const struct mwifiex_sdio_card_reg *reg = card->reg;
548 u32 rd_bitmap = card->mp_rd_bitmap;
490 549
491 dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%04x\n", rd_bitmap); 550 dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
492 551
493 if (!(rd_bitmap & (CTRL_PORT_MASK | DATA_PORT_MASK))) 552 if (card->supports_sdio_new_mode) {
494 return -1; 553 if (!(rd_bitmap & reg->data_port_mask))
554 return -1;
555 } else {
556 if (!(rd_bitmap & (CTRL_PORT_MASK | reg->data_port_mask)))
557 return -1;
558 }
495 559
496 if (card->mp_rd_bitmap & CTRL_PORT_MASK) { 560 if ((card->has_control_mask) &&
497 card->mp_rd_bitmap &= (u16) (~CTRL_PORT_MASK); 561 (card->mp_rd_bitmap & CTRL_PORT_MASK)) {
562 card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK);
498 *port = CTRL_PORT; 563 *port = CTRL_PORT;
499 dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%04x\n", 564 dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n",
500 *port, card->mp_rd_bitmap); 565 *port, card->mp_rd_bitmap);
501 } else { 566 return 0;
502 if (card->mp_rd_bitmap & (1 << card->curr_rd_port)) { 567 }
503 card->mp_rd_bitmap &= (u16)
504 (~(1 << card->curr_rd_port));
505 *port = card->curr_rd_port;
506 568
507 if (++card->curr_rd_port == MAX_PORT) 569 if (!(card->mp_rd_bitmap & (1 << card->curr_rd_port)))
508 card->curr_rd_port = 1; 570 return -1;
509 } else { 571
510 return -1; 572 /* We are now handling the SDIO data ports */
511 } 573 card->mp_rd_bitmap &= (u32)(~(1 << card->curr_rd_port));
574 *port = card->curr_rd_port;
575
576 if (++card->curr_rd_port == card->max_ports)
577 card->curr_rd_port = reg->start_rd_port;
578
579 dev_dbg(adapter->dev,
580 "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
581 *port, rd_bitmap, card->mp_rd_bitmap);
512 582
513 dev_dbg(adapter->dev,
514 "data: port=%d mp_rd_bitmap=0x%04x -> 0x%04x\n",
515 *port, rd_bitmap, card->mp_rd_bitmap);
516 }
517 return 0; 583 return 0;
518} 584}
519 585
@@ -524,35 +590,45 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
524 * increased (provided it does not reach the maximum limit, in which 590 * increased (provided it does not reach the maximum limit, in which
525 * case it is reset to 1) 591 * case it is reset to 1)
526 */ 592 */
527static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port) 593static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
528{ 594{
529 struct sdio_mmc_card *card = adapter->card; 595 struct sdio_mmc_card *card = adapter->card;
530 u16 wr_bitmap = card->mp_wr_bitmap; 596 const struct mwifiex_sdio_card_reg *reg = card->reg;
597 u32 wr_bitmap = card->mp_wr_bitmap;
531 598
532 dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%04x\n", wr_bitmap); 599 dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
533 600
534 if (!(wr_bitmap & card->mp_data_port_mask)) 601 if (card->supports_sdio_new_mode &&
602 !(wr_bitmap & reg->data_port_mask)) {
603 adapter->data_sent = true;
604 return -EBUSY;
605 } else if (!card->supports_sdio_new_mode &&
606 !(wr_bitmap & card->mp_data_port_mask)) {
535 return -1; 607 return -1;
608 }
536 609
537 if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) { 610 if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) {
538 card->mp_wr_bitmap &= (u16) (~(1 << card->curr_wr_port)); 611 card->mp_wr_bitmap &= (u32) (~(1 << card->curr_wr_port));
539 *port = card->curr_wr_port; 612 *port = card->curr_wr_port;
540 if (++card->curr_wr_port == card->mp_end_port) 613 if (((card->supports_sdio_new_mode) &&
541 card->curr_wr_port = 1; 614 (++card->curr_wr_port == card->max_ports)) ||
615 ((!card->supports_sdio_new_mode) &&
616 (++card->curr_wr_port == card->mp_end_port)))
617 card->curr_wr_port = reg->start_wr_port;
542 } else { 618 } else {
543 adapter->data_sent = true; 619 adapter->data_sent = true;
544 return -EBUSY; 620 return -EBUSY;
545 } 621 }
546 622
547 if (*port == CTRL_PORT) { 623 if ((card->has_control_mask) && (*port == CTRL_PORT)) {
548 dev_err(adapter->dev, "invalid data port=%d cur port=%d" 624 dev_err(adapter->dev,
549 " mp_wr_bitmap=0x%04x -> 0x%04x\n", 625 "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
550 *port, card->curr_wr_port, wr_bitmap, 626 *port, card->curr_wr_port, wr_bitmap,
551 card->mp_wr_bitmap); 627 card->mp_wr_bitmap);
552 return -1; 628 return -1;
553 } 629 }
554 630
555 dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%04x -> 0x%04x\n", 631 dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
556 *port, wr_bitmap, card->mp_wr_bitmap); 632 *port, wr_bitmap, card->mp_wr_bitmap);
557 633
558 return 0; 634 return 0;
@@ -564,11 +640,12 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port)
564static int 640static int
565mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) 641mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
566{ 642{
643 struct sdio_mmc_card *card = adapter->card;
567 u32 tries; 644 u32 tries;
568 u32 cs; 645 u8 cs;
569 646
570 for (tries = 0; tries < MAX_POLL_TRIES; tries++) { 647 for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
571 if (mwifiex_read_reg(adapter, CARD_STATUS_REG, &cs)) 648 if (mwifiex_read_reg(adapter, card->reg->poll_reg, &cs))
572 break; 649 break;
573 else if ((cs & bits) == bits) 650 else if ((cs & bits) == bits)
574 return 0; 651 return 0;
@@ -587,12 +664,14 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
587static int 664static int
588mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) 665mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
589{ 666{
590 u32 fws0, fws1; 667 struct sdio_mmc_card *card = adapter->card;
668 const struct mwifiex_sdio_card_reg *reg = card->reg;
669 u8 fws0, fws1;
591 670
592 if (mwifiex_read_reg(adapter, CARD_FW_STATUS0_REG, &fws0)) 671 if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0))
593 return -1; 672 return -1;
594 673
595 if (mwifiex_read_reg(adapter, CARD_FW_STATUS1_REG, &fws1)) 674 if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1))
596 return -1; 675 return -1;
597 676
598 *dat = (u16) ((fws1 << 8) | fws0); 677 *dat = (u16) ((fws1 << 8) | fws0);
@@ -608,14 +687,14 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
608 */ 687 */
609static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) 688static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
610{ 689{
611 u32 host_int_mask; 690 u8 host_int_mask, host_int_disable = HOST_INT_DISABLE;
612 691
613 /* Read back the host_int_mask register */ 692 /* Read back the host_int_mask register */
614 if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask)) 693 if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
615 return -1; 694 return -1;
616 695
617 /* Update with the mask and write back to the register */ 696 /* Update with the mask and write back to the register */
618 host_int_mask &= ~HOST_INT_DISABLE; 697 host_int_mask &= ~host_int_disable;
619 698
620 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) { 699 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
621 dev_err(adapter->dev, "disable host interrupt failed\n"); 700 dev_err(adapter->dev, "disable host interrupt failed\n");
@@ -633,8 +712,11 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
633 */ 712 */
634static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) 713static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
635{ 714{
715 struct sdio_mmc_card *card = adapter->card;
716
636 /* Simply write the mask to the register */ 717 /* Simply write the mask to the register */
637 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, HOST_INT_ENABLE)) { 718 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG,
719 card->reg->host_int_enable)) {
638 dev_err(adapter->dev, "enable host interrupt failed\n"); 720 dev_err(adapter->dev, "enable host interrupt failed\n");
639 return -1; 721 return -1;
640 } 722 }
@@ -686,11 +768,13 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
686static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, 768static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
687 struct mwifiex_fw_image *fw) 769 struct mwifiex_fw_image *fw)
688{ 770{
771 struct sdio_mmc_card *card = adapter->card;
772 const struct mwifiex_sdio_card_reg *reg = card->reg;
689 int ret; 773 int ret;
690 u8 *firmware = fw->fw_buf; 774 u8 *firmware = fw->fw_buf;
691 u32 firmware_len = fw->fw_len; 775 u32 firmware_len = fw->fw_len;
692 u32 offset = 0; 776 u32 offset = 0;
693 u32 base0, base1; 777 u8 base0, base1;
694 u8 *fwbuf; 778 u8 *fwbuf;
695 u16 len = 0; 779 u16 len = 0;
696 u32 txlen, tx_blocks = 0, tries; 780 u32 txlen, tx_blocks = 0, tries;
@@ -727,7 +811,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
727 break; 811 break;
728 812
729 for (tries = 0; tries < MAX_POLL_TRIES; tries++) { 813 for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
730 ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_0, 814 ret = mwifiex_read_reg(adapter, reg->base_0_reg,
731 &base0); 815 &base0);
732 if (ret) { 816 if (ret) {
733 dev_err(adapter->dev, 817 dev_err(adapter->dev,
@@ -736,7 +820,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
736 base0, base0); 820 base0, base0);
737 goto done; 821 goto done;
738 } 822 }
739 ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_1, 823 ret = mwifiex_read_reg(adapter, reg->base_1_reg,
740 &base1); 824 &base1);
741 if (ret) { 825 if (ret) {
742 dev_err(adapter->dev, 826 dev_err(adapter->dev,
@@ -828,10 +912,11 @@ done:
828static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, 912static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
829 u32 poll_num) 913 u32 poll_num)
830{ 914{
915 struct sdio_mmc_card *card = adapter->card;
831 int ret = 0; 916 int ret = 0;
832 u16 firmware_stat; 917 u16 firmware_stat;
833 u32 tries; 918 u32 tries;
834 u32 winner_status; 919 u8 winner_status;
835 920
836 /* Wait for firmware initialization event */ 921 /* Wait for firmware initialization event */
837 for (tries = 0; tries < poll_num; tries++) { 922 for (tries = 0; tries < poll_num; tries++) {
@@ -849,7 +934,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
849 934
850 if (ret) { 935 if (ret) {
851 if (mwifiex_read_reg 936 if (mwifiex_read_reg
852 (adapter, CARD_FW_STATUS0_REG, &winner_status)) 937 (adapter, card->reg->status_reg_0, &winner_status))
853 winner_status = 0; 938 winner_status = 0;
854 939
855 if (winner_status) 940 if (winner_status)
@@ -866,12 +951,12 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
866static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) 951static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
867{ 952{
868 struct sdio_mmc_card *card = adapter->card; 953 struct sdio_mmc_card *card = adapter->card;
869 u32 sdio_ireg; 954 u8 sdio_ireg;
870 unsigned long flags; 955 unsigned long flags;
871 956
872 if (mwifiex_read_data_sync(adapter, card->mp_regs, MAX_MP_REGS, 957 if (mwifiex_read_data_sync(adapter, card->mp_regs,
873 REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 958 card->reg->max_mp_regs,
874 0)) { 959 REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
875 dev_err(adapter->dev, "read mp_regs failed\n"); 960 dev_err(adapter->dev, "read mp_regs failed\n");
876 return; 961 return;
877 } 962 }
@@ -880,6 +965,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
880 if (sdio_ireg) { 965 if (sdio_ireg) {
881 /* 966 /*
882 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS 967 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
968 * For SDIO new mode CMD port interrupts
969 * DN_LD_CMD_PORT_HOST_INT_STATUS and/or
970 * UP_LD_CMD_PORT_HOST_INT_STATUS
883 * Clear the interrupt status register 971 * Clear the interrupt status register
884 */ 972 */
885 dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg); 973 dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
@@ -1003,11 +1091,11 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1003 s32 f_aggr_cur = 0; 1091 s32 f_aggr_cur = 0;
1004 struct sk_buff *skb_deaggr; 1092 struct sk_buff *skb_deaggr;
1005 u32 pind; 1093 u32 pind;
1006 u32 pkt_len, pkt_type = 0; 1094 u32 pkt_len, pkt_type, mport;
1007 u8 *curr_ptr; 1095 u8 *curr_ptr;
1008 u32 rx_len = skb->len; 1096 u32 rx_len = skb->len;
1009 1097
1010 if (port == CTRL_PORT) { 1098 if ((card->has_control_mask) && (port == CTRL_PORT)) {
1011 /* Read the command Resp without aggr */ 1099 /* Read the command Resp without aggr */
1012 dev_dbg(adapter->dev, "info: %s: no aggregation for cmd " 1100 dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
1013 "response\n", __func__); 1101 "response\n", __func__);
@@ -1024,7 +1112,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1024 goto rx_curr_single; 1112 goto rx_curr_single;
1025 } 1113 }
1026 1114
1027 if (card->mp_rd_bitmap & (~((u16) CTRL_PORT_MASK))) { 1115 if ((!card->has_control_mask && (card->mp_rd_bitmap &
1116 card->reg->data_port_mask)) ||
1117 (card->has_control_mask && (card->mp_rd_bitmap &
1118 (~((u32) CTRL_PORT_MASK))))) {
1028 /* Some more data RX pending */ 1119 /* Some more data RX pending */
1029 dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__); 1120 dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
1030 1121
@@ -1060,10 +1151,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1060 if (f_aggr_cur) { 1151 if (f_aggr_cur) {
1061 dev_dbg(adapter->dev, "info: current packet aggregation\n"); 1152 dev_dbg(adapter->dev, "info: current packet aggregation\n");
1062 /* Curr pkt can be aggregated */ 1153 /* Curr pkt can be aggregated */
1063 MP_RX_AGGR_SETUP(card, skb, port); 1154 mp_rx_aggr_setup(card, skb, port);
1064 1155
1065 if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) || 1156 if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
1066 MP_RX_AGGR_PORT_LIMIT_REACHED(card)) { 1157 mp_rx_aggr_port_limit_reached(card)) {
1067 dev_dbg(adapter->dev, "info: %s: aggregated packet " 1158 dev_dbg(adapter->dev, "info: %s: aggregated packet "
1068 "limit reached\n", __func__); 1159 "limit reached\n", __func__);
1069 /* No more pkts allowed in Aggr buf, rx it */ 1160 /* No more pkts allowed in Aggr buf, rx it */
@@ -1076,11 +1167,28 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1076 dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n", 1167 dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
1077 card->mpa_rx.pkt_cnt); 1168 card->mpa_rx.pkt_cnt);
1078 1169
1170 if (card->supports_sdio_new_mode) {
1171 int i;
1172 u32 port_count;
1173
1174 for (i = 0, port_count = 0; i < card->max_ports; i++)
1175 if (card->mpa_rx.ports & BIT(i))
1176 port_count++;
1177
1178 /* Reading data from "start_port + 0" to "start_port +
1179 * port_count -1", so decrease the count by 1
1180 */
1181 port_count--;
1182 mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
1183 (port_count << 8)) + card->mpa_rx.start_port;
1184 } else {
1185 mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
1186 (card->mpa_rx.ports << 4)) +
1187 card->mpa_rx.start_port;
1188 }
1189
1079 if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf, 1190 if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
1080 card->mpa_rx.buf_len, 1191 card->mpa_rx.buf_len, mport, 1))
1081 (adapter->ioport | 0x1000 |
1082 (card->mpa_rx.ports << 4)) +
1083 card->mpa_rx.start_port, 1))
1084 goto error; 1192 goto error;
1085 1193
1086 curr_ptr = card->mpa_rx.buf; 1194 curr_ptr = card->mpa_rx.buf;
@@ -1167,6 +1275,7 @@ error:
1167static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) 1275static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1168{ 1276{
1169 struct sdio_mmc_card *card = adapter->card; 1277 struct sdio_mmc_card *card = adapter->card;
1278 const struct mwifiex_sdio_card_reg *reg = card->reg;
1170 int ret = 0; 1279 int ret = 0;
1171 u8 sdio_ireg; 1280 u8 sdio_ireg;
1172 struct sk_buff *skb; 1281 struct sk_buff *skb;
@@ -1175,6 +1284,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1175 u32 rx_blocks; 1284 u32 rx_blocks;
1176 u16 rx_len; 1285 u16 rx_len;
1177 unsigned long flags; 1286 unsigned long flags;
1287 u32 bitmap;
1288 u8 cr;
1178 1289
1179 spin_lock_irqsave(&adapter->int_lock, flags); 1290 spin_lock_irqsave(&adapter->int_lock, flags);
1180 sdio_ireg = adapter->int_status; 1291 sdio_ireg = adapter->int_status;
@@ -1184,10 +1295,60 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1184 if (!sdio_ireg) 1295 if (!sdio_ireg)
1185 return ret; 1296 return ret;
1186 1297
1298 /* Following interrupt is only for SDIO new mode */
1299 if (sdio_ireg & DN_LD_CMD_PORT_HOST_INT_STATUS && adapter->cmd_sent)
1300 adapter->cmd_sent = false;
1301
1302 /* Following interrupt is only for SDIO new mode */
1303 if (sdio_ireg & UP_LD_CMD_PORT_HOST_INT_STATUS) {
1304 u32 pkt_type;
1305
1306 /* read the len of control packet */
1307 rx_len = card->mp_regs[CMD_RD_LEN_1] << 8;
1308 rx_len |= (u16) card->mp_regs[CMD_RD_LEN_0];
1309 rx_blocks = DIV_ROUND_UP(rx_len, MWIFIEX_SDIO_BLOCK_SIZE);
1310 if (rx_len <= INTF_HEADER_LEN ||
1311 (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
1312 MWIFIEX_RX_DATA_BUF_SIZE)
1313 return -1;
1314 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
1315
1316 skb = dev_alloc_skb(rx_len);
1317 if (!skb)
1318 return -1;
1319
1320 skb_put(skb, rx_len);
1321
1322 if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
1323 skb->len, adapter->ioport |
1324 CMD_PORT_SLCT)) {
1325 dev_err(adapter->dev,
1326 "%s: failed to card_to_host", __func__);
1327 dev_kfree_skb_any(skb);
1328 goto term_cmd;
1329 }
1330
1331 if ((pkt_type != MWIFIEX_TYPE_CMD) &&
1332 (pkt_type != MWIFIEX_TYPE_EVENT))
1333 dev_err(adapter->dev,
1334 "%s:Received wrong packet on cmd port",
1335 __func__);
1336
1337 mwifiex_decode_rx_packet(adapter, skb, pkt_type);
1338 }
1339
1187 if (sdio_ireg & DN_LD_HOST_INT_STATUS) { 1340 if (sdio_ireg & DN_LD_HOST_INT_STATUS) {
1188 card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8; 1341 bitmap = (u32) card->mp_regs[reg->wr_bitmap_l];
1189 card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L]; 1342 bitmap |= ((u32) card->mp_regs[reg->wr_bitmap_u]) << 8;
1190 dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n", 1343 if (card->supports_sdio_new_mode) {
1344 bitmap |=
1345 ((u32) card->mp_regs[reg->wr_bitmap_1l]) << 16;
1346 bitmap |=
1347 ((u32) card->mp_regs[reg->wr_bitmap_1u]) << 24;
1348 }
1349 card->mp_wr_bitmap = bitmap;
1350
1351 dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n",
1191 card->mp_wr_bitmap); 1352 card->mp_wr_bitmap);
1192 if (adapter->data_sent && 1353 if (adapter->data_sent &&
1193 (card->mp_wr_bitmap & card->mp_data_port_mask)) { 1354 (card->mp_wr_bitmap & card->mp_data_port_mask)) {
@@ -1200,11 +1361,11 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1200 /* As firmware will not generate download ready interrupt if the port 1361 /* As firmware will not generate download ready interrupt if the port
1201 updated is command port only, cmd_sent should be done for any SDIO 1362 updated is command port only, cmd_sent should be done for any SDIO
1202 interrupt. */ 1363 interrupt. */
1203 if (adapter->cmd_sent) { 1364 if (card->has_control_mask && adapter->cmd_sent) {
1204 /* Check if firmware has attach buffer at command port and 1365 /* Check if firmware has attach buffer at command port and
1205 update just that in wr_bit_map. */ 1366 update just that in wr_bit_map. */
1206 card->mp_wr_bitmap |= 1367 card->mp_wr_bitmap |=
1207 (u16) card->mp_regs[WR_BITMAP_L] & CTRL_PORT_MASK; 1368 (u32) card->mp_regs[reg->wr_bitmap_l] & CTRL_PORT_MASK;
1208 if (card->mp_wr_bitmap & CTRL_PORT_MASK) 1369 if (card->mp_wr_bitmap & CTRL_PORT_MASK)
1209 adapter->cmd_sent = false; 1370 adapter->cmd_sent = false;
1210 } 1371 }
@@ -1212,9 +1373,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1212 dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n", 1373 dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
1213 adapter->cmd_sent, adapter->data_sent); 1374 adapter->cmd_sent, adapter->data_sent);
1214 if (sdio_ireg & UP_LD_HOST_INT_STATUS) { 1375 if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
1215 card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8; 1376 bitmap = (u32) card->mp_regs[reg->rd_bitmap_l];
1216 card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L]; 1377 bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8;
1217 dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n", 1378 if (card->supports_sdio_new_mode) {
1379 bitmap |=
1380 ((u32) card->mp_regs[reg->rd_bitmap_1l]) << 16;
1381 bitmap |=
1382 ((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24;
1383 }
1384 card->mp_rd_bitmap = bitmap;
1385 dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n",
1218 card->mp_rd_bitmap); 1386 card->mp_rd_bitmap);
1219 1387
1220 while (true) { 1388 while (true) {
@@ -1224,8 +1392,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1224 "info: no more rd_port available\n"); 1392 "info: no more rd_port available\n");
1225 break; 1393 break;
1226 } 1394 }
1227 len_reg_l = RD_LEN_P0_L + (port << 1); 1395 len_reg_l = reg->rd_len_p0_l + (port << 1);
1228 len_reg_u = RD_LEN_P0_U + (port << 1); 1396 len_reg_u = reg->rd_len_p0_u + (port << 1);
1229 rx_len = ((u16) card->mp_regs[len_reg_u]) << 8; 1397 rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
1230 rx_len |= (u16) card->mp_regs[len_reg_l]; 1398 rx_len |= (u16) card->mp_regs[len_reg_l];
1231 dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n", 1399 dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
@@ -1257,37 +1425,33 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1257 1425
1258 if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb, 1426 if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
1259 port)) { 1427 port)) {
1260 u32 cr = 0;
1261
1262 dev_err(adapter->dev, "card_to_host_mpa failed:" 1428 dev_err(adapter->dev, "card_to_host_mpa failed:"
1263 " int status=%#x\n", sdio_ireg); 1429 " int status=%#x\n", sdio_ireg);
1264 if (mwifiex_read_reg(adapter, 1430 goto term_cmd;
1265 CONFIGURATION_REG, &cr))
1266 dev_err(adapter->dev,
1267 "read CFG reg failed\n");
1268
1269 dev_dbg(adapter->dev,
1270 "info: CFG reg val = %d\n", cr);
1271 if (mwifiex_write_reg(adapter,
1272 CONFIGURATION_REG,
1273 (cr | 0x04)))
1274 dev_err(adapter->dev,
1275 "write CFG reg failed\n");
1276
1277 dev_dbg(adapter->dev, "info: write success\n");
1278 if (mwifiex_read_reg(adapter,
1279 CONFIGURATION_REG, &cr))
1280 dev_err(adapter->dev,
1281 "read CFG reg failed\n");
1282
1283 dev_dbg(adapter->dev,
1284 "info: CFG reg val =%x\n", cr);
1285 return -1;
1286 } 1431 }
1287 } 1432 }
1288 } 1433 }
1289 1434
1290 return 0; 1435 return 0;
1436
1437term_cmd:
1438 /* terminate cmd */
1439 if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
1440 dev_err(adapter->dev, "read CFG reg failed\n");
1441 else
1442 dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr);
1443
1444 if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04)))
1445 dev_err(adapter->dev, "write CFG reg failed\n");
1446 else
1447 dev_dbg(adapter->dev, "info: write success\n");
1448
1449 if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
1450 dev_err(adapter->dev, "read CFG reg failed\n");
1451 else
1452 dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr);
1453
1454 return -1;
1291} 1455}
1292 1456
1293/* 1457/*
@@ -1305,7 +1469,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1305 * and return. 1469 * and return.
1306 */ 1470 */
1307static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, 1471static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
1308 u8 *payload, u32 pkt_len, u8 port, 1472 u8 *payload, u32 pkt_len, u32 port,
1309 u32 next_pkt_len) 1473 u32 next_pkt_len)
1310{ 1474{
1311 struct sdio_mmc_card *card = adapter->card; 1475 struct sdio_mmc_card *card = adapter->card;
@@ -1314,8 +1478,11 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
1314 s32 f_send_cur_buf = 0; 1478 s32 f_send_cur_buf = 0;
1315 s32 f_precopy_cur_buf = 0; 1479 s32 f_precopy_cur_buf = 0;
1316 s32 f_postcopy_cur_buf = 0; 1480 s32 f_postcopy_cur_buf = 0;
1481 u32 mport;
1317 1482
1318 if ((!card->mpa_tx.enabled) || (port == CTRL_PORT)) { 1483 if (!card->mpa_tx.enabled ||
1484 (card->has_control_mask && (port == CTRL_PORT)) ||
1485 (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) {
1319 dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n", 1486 dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
1320 __func__); 1487 __func__);
1321 1488
@@ -1329,7 +1496,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
1329 __func__); 1496 __func__);
1330 1497
1331 if (MP_TX_AGGR_IN_PROGRESS(card)) { 1498 if (MP_TX_AGGR_IN_PROGRESS(card)) {
1332 if (!MP_TX_AGGR_PORT_LIMIT_REACHED(card) && 1499 if (!mp_tx_aggr_port_limit_reached(card) &&
1333 MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) { 1500 MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
1334 f_precopy_cur_buf = 1; 1501 f_precopy_cur_buf = 1;
1335 1502
@@ -1342,7 +1509,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
1342 /* No room in Aggr buf, send it */ 1509 /* No room in Aggr buf, send it */
1343 f_send_aggr_buf = 1; 1510 f_send_aggr_buf = 1;
1344 1511
1345 if (MP_TX_AGGR_PORT_LIMIT_REACHED(card) || 1512 if (mp_tx_aggr_port_limit_reached(card) ||
1346 !(card->mp_wr_bitmap & 1513 !(card->mp_wr_bitmap &
1347 (1 << card->curr_wr_port))) 1514 (1 << card->curr_wr_port)))
1348 f_send_cur_buf = 1; 1515 f_send_cur_buf = 1;
@@ -1381,7 +1548,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
1381 MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port); 1548 MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
1382 1549
1383 if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) || 1550 if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
1384 MP_TX_AGGR_PORT_LIMIT_REACHED(card)) 1551 mp_tx_aggr_port_limit_reached(card))
1385 /* No more pkts allowed in Aggr buf, send it */ 1552 /* No more pkts allowed in Aggr buf, send it */
1386 f_send_aggr_buf = 1; 1553 f_send_aggr_buf = 1;
1387 } 1554 }
@@ -1390,11 +1557,28 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
1390 dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n", 1557 dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
1391 __func__, 1558 __func__,
1392 card->mpa_tx.start_port, card->mpa_tx.ports); 1559 card->mpa_tx.start_port, card->mpa_tx.ports);
1560 if (card->supports_sdio_new_mode) {
1561 u32 port_count;
1562 int i;
1563
1564 for (i = 0, port_count = 0; i < card->max_ports; i++)
1565 if (card->mpa_tx.ports & BIT(i))
1566 port_count++;
1567
1568 /* Writing data from "start_port + 0" to "start_port +
1569 * port_count -1", so decrease the count by 1
1570 */
1571 port_count--;
1572 mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
1573 (port_count << 8)) + card->mpa_tx.start_port;
1574 } else {
1575 mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
1576 (card->mpa_tx.ports << 4)) +
1577 card->mpa_tx.start_port;
1578 }
1579
1393 ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, 1580 ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
1394 card->mpa_tx.buf_len, 1581 card->mpa_tx.buf_len, mport);
1395 (adapter->ioport | 0x1000 |
1396 (card->mpa_tx.ports << 4)) +
1397 card->mpa_tx.start_port);
1398 1582
1399 MP_TX_AGGR_BUF_RESET(card); 1583 MP_TX_AGGR_BUF_RESET(card);
1400 } 1584 }
@@ -1434,7 +1618,7 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
1434 int ret; 1618 int ret;
1435 u32 buf_block_len; 1619 u32 buf_block_len;
1436 u32 blk_size; 1620 u32 blk_size;
1437 u8 port = CTRL_PORT; 1621 u32 port = CTRL_PORT;
1438 u8 *payload = (u8 *)skb->data; 1622 u8 *payload = (u8 *)skb->data;
1439 u32 pkt_len = skb->len; 1623 u32 pkt_len = skb->len;
1440 1624
@@ -1465,6 +1649,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
1465 pkt_len > MWIFIEX_UPLD_SIZE) 1649 pkt_len > MWIFIEX_UPLD_SIZE)
1466 dev_err(adapter->dev, "%s: payload=%p, nb=%d\n", 1650 dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
1467 __func__, payload, pkt_len); 1651 __func__, payload, pkt_len);
1652
1653 if (card->supports_sdio_new_mode)
1654 port = CMD_PORT_SLCT;
1468 } 1655 }
1469 1656
1470 /* Transfer data to card */ 1657 /* Transfer data to card */
@@ -1586,18 +1773,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1586 1773
1587 adapter->dev = &func->dev; 1774 adapter->dev = &func->dev;
1588 1775
1589 switch (func->device) { 1776 strcpy(adapter->fw_name, card->firmware);
1590 case SDIO_DEVICE_ID_MARVELL_8786:
1591 strcpy(adapter->fw_name, SD8786_DEFAULT_FW_NAME);
1592 break;
1593 case SDIO_DEVICE_ID_MARVELL_8797:
1594 strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
1595 break;
1596 case SDIO_DEVICE_ID_MARVELL_8787:
1597 default:
1598 strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
1599 break;
1600 }
1601 1777
1602 return 0; 1778 return 0;
1603 1779
@@ -1626,8 +1802,9 @@ disable_func:
1626static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) 1802static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1627{ 1803{
1628 struct sdio_mmc_card *card = adapter->card; 1804 struct sdio_mmc_card *card = adapter->card;
1805 const struct mwifiex_sdio_card_reg *reg = card->reg;
1629 int ret; 1806 int ret;
1630 u32 sdio_ireg; 1807 u8 sdio_ireg;
1631 1808
1632 /* 1809 /*
1633 * Read the HOST_INT_STATUS_REG for ACK the first interrupt got 1810 * Read the HOST_INT_STATUS_REG for ACK the first interrupt got
@@ -1645,30 +1822,35 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1645 /* Initialize SDIO variables in card */ 1822 /* Initialize SDIO variables in card */
1646 card->mp_rd_bitmap = 0; 1823 card->mp_rd_bitmap = 0;
1647 card->mp_wr_bitmap = 0; 1824 card->mp_wr_bitmap = 0;
1648 card->curr_rd_port = 1; 1825 card->curr_rd_port = reg->start_rd_port;
1649 card->curr_wr_port = 1; 1826 card->curr_wr_port = reg->start_wr_port;
1650 1827
1651 card->mp_data_port_mask = DATA_PORT_MASK; 1828 card->mp_data_port_mask = reg->data_port_mask;
1652 1829
1653 card->mpa_tx.buf_len = 0; 1830 card->mpa_tx.buf_len = 0;
1654 card->mpa_tx.pkt_cnt = 0; 1831 card->mpa_tx.pkt_cnt = 0;
1655 card->mpa_tx.start_port = 0; 1832 card->mpa_tx.start_port = 0;
1656 1833
1657 card->mpa_tx.enabled = 1; 1834 card->mpa_tx.enabled = 1;
1658 card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; 1835 card->mpa_tx.pkt_aggr_limit = card->mp_agg_pkt_limit;
1659 1836
1660 card->mpa_rx.buf_len = 0; 1837 card->mpa_rx.buf_len = 0;
1661 card->mpa_rx.pkt_cnt = 0; 1838 card->mpa_rx.pkt_cnt = 0;
1662 card->mpa_rx.start_port = 0; 1839 card->mpa_rx.start_port = 0;
1663 1840
1664 card->mpa_rx.enabled = 1; 1841 card->mpa_rx.enabled = 1;
1665 card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; 1842 card->mpa_rx.pkt_aggr_limit = card->mp_agg_pkt_limit;
1666 1843
1667 /* Allocate buffers for SDIO MP-A */ 1844 /* Allocate buffers for SDIO MP-A */
1668 card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL); 1845 card->mp_regs = kzalloc(reg->max_mp_regs, GFP_KERNEL);
1669 if (!card->mp_regs) 1846 if (!card->mp_regs)
1670 return -ENOMEM; 1847 return -ENOMEM;
1671 1848
1849 /* Allocate skb pointer buffers */
1850 card->mpa_rx.skb_arr = kzalloc((sizeof(void *)) *
1851 card->mp_agg_pkt_limit, GFP_KERNEL);
1852 card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
1853 card->mp_agg_pkt_limit, GFP_KERNEL);
1672 ret = mwifiex_alloc_sdio_mpa_buffers(adapter, 1854 ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
1673 SDIO_MP_TX_AGGR_DEF_BUF_SIZE, 1855 SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
1674 SDIO_MP_RX_AGGR_DEF_BUF_SIZE); 1856 SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
@@ -1705,6 +1887,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
1705 struct sdio_mmc_card *card = adapter->card; 1887 struct sdio_mmc_card *card = adapter->card;
1706 1888
1707 kfree(card->mp_regs); 1889 kfree(card->mp_regs);
1890 kfree(card->mpa_rx.skb_arr);
1891 kfree(card->mpa_rx.len_arr);
1708 kfree(card->mpa_tx.buf); 1892 kfree(card->mpa_tx.buf);
1709 kfree(card->mpa_rx.buf); 1893 kfree(card->mpa_rx.buf);
1710} 1894}
@@ -1716,16 +1900,20 @@ static void
1716mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) 1900mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
1717{ 1901{
1718 struct sdio_mmc_card *card = adapter->card; 1902 struct sdio_mmc_card *card = adapter->card;
1903 const struct mwifiex_sdio_card_reg *reg = card->reg;
1719 int i; 1904 int i;
1720 1905
1721 card->mp_end_port = port; 1906 card->mp_end_port = port;
1722 1907
1723 card->mp_data_port_mask = DATA_PORT_MASK; 1908 card->mp_data_port_mask = reg->data_port_mask;
1724 1909
1725 for (i = 1; i <= MAX_PORT - card->mp_end_port; i++) 1910 if (reg->start_wr_port) {
1726 card->mp_data_port_mask &= ~(1 << (MAX_PORT - i)); 1911 for (i = 1; i <= card->max_ports - card->mp_end_port; i++)
1912 card->mp_data_port_mask &=
1913 ~(1 << (card->max_ports - i));
1914 }
1727 1915
1728 card->curr_wr_port = 1; 1916 card->curr_wr_port = reg->start_wr_port;
1729 1917
1730 dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n", 1918 dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
1731 port, card->mp_data_port_mask); 1919 port, card->mp_data_port_mask);
@@ -1831,3 +2019,4 @@ MODULE_LICENSE("GPL v2");
1831MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME); 2019MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME);
1832MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); 2020MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
1833MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); 2021MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
2022MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 8cc5468654b4..6d51dfdd8251 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -32,30 +32,37 @@
32#define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin" 32#define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin"
33#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" 33#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
34#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" 34#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
35#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
35 36
36#define BLOCK_MODE 1 37#define BLOCK_MODE 1
37#define BYTE_MODE 0 38#define BYTE_MODE 0
38 39
39#define REG_PORT 0 40#define REG_PORT 0
40#define RD_BITMAP_L 0x04
41#define RD_BITMAP_U 0x05
42#define WR_BITMAP_L 0x06
43#define WR_BITMAP_U 0x07
44#define RD_LEN_P0_L 0x08
45#define RD_LEN_P0_U 0x09
46 41
47#define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff 42#define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff
48 43
49#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000 44#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000
50 45
46#define SDIO_MPA_ADDR_BASE 0x1000
51#define CTRL_PORT 0 47#define CTRL_PORT 0
52#define CTRL_PORT_MASK 0x0001 48#define CTRL_PORT_MASK 0x0001
53#define DATA_PORT_MASK 0xfffe
54 49
55#define MAX_MP_REGS 64 50#define CMD_PORT_UPLD_INT_MASK (0x1U<<6)
56#define MAX_PORT 16 51#define CMD_PORT_DNLD_INT_MASK (0x1U<<7)
57 52#define HOST_TERM_CMD53 (0x1U << 2)
58#define SDIO_MP_AGGR_DEF_PKT_LIMIT 8 53#define REG_PORT 0
54#define MEM_PORT 0x10000
55#define CMD_RD_LEN_0 0xB4
56#define CMD_RD_LEN_1 0xB5
57#define CARD_CONFIG_2_1_REG 0xCD
58#define CMD53_NEW_MODE (0x1U << 0)
59#define CMD_CONFIG_0 0xB8
60#define CMD_PORT_RD_LEN_EN (0x1U << 2)
61#define CMD_CONFIG_1 0xB9
62#define CMD_PORT_AUTO_EN (0x1U << 0)
63#define CMD_PORT_SLCT 0x8000
64#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
65#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
59 66
60#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */ 67#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */
61 68
@@ -75,14 +82,8 @@
75 82
76/* Host Control Registers : Configuration */ 83/* Host Control Registers : Configuration */
77#define CONFIGURATION_REG 0x00 84#define CONFIGURATION_REG 0x00
78/* Host Control Registers : Host without Command 53 finish host*/
79#define HOST_TO_CARD_EVENT (0x1U << 3)
80/* Host Control Registers : Host without Command 53 finish host */
81#define HOST_WO_CMD53_FINISH_HOST (0x1U << 2)
82/* Host Control Registers : Host power up */ 85/* Host Control Registers : Host power up */
83#define HOST_POWER_UP (0x1U << 1) 86#define HOST_POWER_UP (0x1U << 1)
84/* Host Control Registers : Host power down */
85#define HOST_POWER_DOWN (0x1U << 0)
86 87
87/* Host Control Registers : Host interrupt mask */ 88/* Host Control Registers : Host interrupt mask */
88#define HOST_INT_MASK_REG 0x02 89#define HOST_INT_MASK_REG 0x02
@@ -90,8 +91,7 @@
90#define UP_LD_HOST_INT_MASK (0x1U) 91#define UP_LD_HOST_INT_MASK (0x1U)
91/* Host Control Registers : Download host interrupt mask */ 92/* Host Control Registers : Download host interrupt mask */
92#define DN_LD_HOST_INT_MASK (0x2U) 93#define DN_LD_HOST_INT_MASK (0x2U)
93/* Enable Host interrupt mask */ 94
94#define HOST_INT_ENABLE (UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK)
95/* Disable Host interrupt mask */ 95/* Disable Host interrupt mask */
96#define HOST_INT_DISABLE 0xff 96#define HOST_INT_DISABLE 0xff
97 97
@@ -104,74 +104,15 @@
104 104
105/* Host Control Registers : Host interrupt RSR */ 105/* Host Control Registers : Host interrupt RSR */
106#define HOST_INT_RSR_REG 0x01 106#define HOST_INT_RSR_REG 0x01
107/* Host Control Registers : Upload host interrupt RSR */
108#define UP_LD_HOST_INT_RSR (0x1U)
109#define SDIO_INT_MASK 0x3F
110 107
111/* Host Control Registers : Host interrupt status */ 108/* Host Control Registers : Host interrupt status */
112#define HOST_INT_STATUS_REG 0x28 109#define HOST_INT_STATUS_REG 0x28
113/* Host Control Registers : Upload CRC error */ 110
114#define UP_LD_CRC_ERR (0x1U << 2)
115/* Host Control Registers : Upload restart */
116#define UP_LD_RESTART (0x1U << 1)
117/* Host Control Registers : Download restart */
118#define DN_LD_RESTART (0x1U << 0)
119
120/* Card Control Registers : Card status register */
121#define CARD_STATUS_REG 0x30
122/* Card Control Registers : Card I/O ready */ 111/* Card Control Registers : Card I/O ready */
123#define CARD_IO_READY (0x1U << 3) 112#define CARD_IO_READY (0x1U << 3)
124/* Card Control Registers : CIS card ready */
125#define CIS_CARD_RDY (0x1U << 2)
126/* Card Control Registers : Upload card ready */
127#define UP_LD_CARD_RDY (0x1U << 1)
128/* Card Control Registers : Download card ready */ 113/* Card Control Registers : Download card ready */
129#define DN_LD_CARD_RDY (0x1U << 0) 114#define DN_LD_CARD_RDY (0x1U << 0)
130 115
131/* Card Control Registers : Host interrupt mask register */
132#define HOST_INTERRUPT_MASK_REG 0x34
133/* Card Control Registers : Host power interrupt mask */
134#define HOST_POWER_INT_MASK (0x1U << 3)
135/* Card Control Registers : Abort card interrupt mask */
136#define ABORT_CARD_INT_MASK (0x1U << 2)
137/* Card Control Registers : Upload card interrupt mask */
138#define UP_LD_CARD_INT_MASK (0x1U << 1)
139/* Card Control Registers : Download card interrupt mask */
140#define DN_LD_CARD_INT_MASK (0x1U << 0)
141
142/* Card Control Registers : Card interrupt status register */
143#define CARD_INTERRUPT_STATUS_REG 0x38
144/* Card Control Registers : Power up interrupt */
145#define POWER_UP_INT (0x1U << 4)
146/* Card Control Registers : Power down interrupt */
147#define POWER_DOWN_INT (0x1U << 3)
148
149/* Card Control Registers : Card interrupt RSR register */
150#define CARD_INTERRUPT_RSR_REG 0x3c
151/* Card Control Registers : Power up RSR */
152#define POWER_UP_RSR (0x1U << 4)
153/* Card Control Registers : Power down RSR */
154#define POWER_DOWN_RSR (0x1U << 3)
155
156/* Card Control Registers : Miscellaneous Configuration Register */
157#define CARD_MISC_CFG_REG 0x6C
158
159/* Host F1 read base 0 */
160#define HOST_F1_RD_BASE_0 0x0040
161/* Host F1 read base 1 */
162#define HOST_F1_RD_BASE_1 0x0041
163/* Host F1 card ready */
164#define HOST_F1_CARD_RDY 0x0020
165
166/* Firmware status 0 register */
167#define CARD_FW_STATUS0_REG 0x60
168/* Firmware status 1 register */
169#define CARD_FW_STATUS1_REG 0x61
170/* Rx length register */
171#define CARD_RX_LEN_REG 0x62
172/* Rx unit register */
173#define CARD_RX_UNIT_REG 0x63
174
175/* Max retry number of CMD53 write */ 116/* Max retry number of CMD53 write */
176#define MAX_WRITE_IOMEM_RETRY 2 117#define MAX_WRITE_IOMEM_RETRY 2
177 118
@@ -192,7 +133,8 @@
192 if (a->mpa_tx.start_port <= port) \ 133 if (a->mpa_tx.start_port <= port) \
193 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt)); \ 134 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt)); \
194 else \ 135 else \
195 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \ 136 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+ \
137 (a->max_ports - \
196 a->mp_end_port))); \ 138 a->mp_end_port))); \
197 a->mpa_tx.pkt_cnt++; \ 139 a->mpa_tx.pkt_cnt++; \
198} while (0) 140} while (0)
@@ -201,12 +143,6 @@
201#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \ 143#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \
202 (a->mpa_tx.pkt_cnt == a->mpa_tx.pkt_aggr_limit) 144 (a->mpa_tx.pkt_cnt == a->mpa_tx.pkt_aggr_limit)
203 145
204/* SDIO Tx aggregation port limit ? */
205#define MP_TX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_wr_port < \
206 a->mpa_tx.start_port) && (((MAX_PORT - \
207 a->mpa_tx.start_port) + a->curr_wr_port) >= \
208 SDIO_MP_AGGR_DEF_PKT_LIMIT))
209
210/* Reset SDIO Tx aggregation buffer parameters */ 146/* Reset SDIO Tx aggregation buffer parameters */
211#define MP_TX_AGGR_BUF_RESET(a) do { \ 147#define MP_TX_AGGR_BUF_RESET(a) do { \
212 a->mpa_tx.pkt_cnt = 0; \ 148 a->mpa_tx.pkt_cnt = 0; \
@@ -219,12 +155,6 @@
219#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \ 155#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \
220 (a->mpa_rx.pkt_cnt == a->mpa_rx.pkt_aggr_limit) 156 (a->mpa_rx.pkt_cnt == a->mpa_rx.pkt_aggr_limit)
221 157
222/* SDIO Tx aggregation port limit ? */
223#define MP_RX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_rd_port < \
224 a->mpa_rx.start_port) && (((MAX_PORT - \
225 a->mpa_rx.start_port) + a->curr_rd_port) >= \
226 SDIO_MP_AGGR_DEF_PKT_LIMIT))
227
228/* SDIO Rx aggregation in progress ? */ 158/* SDIO Rx aggregation in progress ? */
229#define MP_RX_AGGR_IN_PROGRESS(a) (a->mpa_rx.pkt_cnt > 0) 159#define MP_RX_AGGR_IN_PROGRESS(a) (a->mpa_rx.pkt_cnt > 0)
230 160
@@ -232,20 +162,6 @@
232#define MP_RX_AGGR_BUF_HAS_ROOM(a, rx_len) \ 162#define MP_RX_AGGR_BUF_HAS_ROOM(a, rx_len) \
233 ((a->mpa_rx.buf_len+rx_len) <= a->mpa_rx.buf_size) 163 ((a->mpa_rx.buf_len+rx_len) <= a->mpa_rx.buf_size)
234 164
235/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
236#define MP_RX_AGGR_SETUP(a, skb, port) do { \
237 a->mpa_rx.buf_len += skb->len; \
238 if (!a->mpa_rx.pkt_cnt) \
239 a->mpa_rx.start_port = port; \
240 if (a->mpa_rx.start_port <= port) \
241 a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt)); \
242 else \
243 a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt+1)); \
244 a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \
245 a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \
246 a->mpa_rx.pkt_cnt++; \
247} while (0)
248
249/* Reset SDIO Rx aggregation buffer parameters */ 165/* Reset SDIO Rx aggregation buffer parameters */
250#define MP_RX_AGGR_BUF_RESET(a) do { \ 166#define MP_RX_AGGR_BUF_RESET(a) do { \
251 a->mpa_rx.pkt_cnt = 0; \ 167 a->mpa_rx.pkt_cnt = 0; \
@@ -254,14 +170,13 @@
254 a->mpa_rx.start_port = 0; \ 170 a->mpa_rx.start_port = 0; \
255} while (0) 171} while (0)
256 172
257
258/* data structure for SDIO MPA TX */ 173/* data structure for SDIO MPA TX */
259struct mwifiex_sdio_mpa_tx { 174struct mwifiex_sdio_mpa_tx {
260 /* multiport tx aggregation buffer pointer */ 175 /* multiport tx aggregation buffer pointer */
261 u8 *buf; 176 u8 *buf;
262 u32 buf_len; 177 u32 buf_len;
263 u32 pkt_cnt; 178 u32 pkt_cnt;
264 u16 ports; 179 u32 ports;
265 u16 start_port; 180 u16 start_port;
266 u8 enabled; 181 u8 enabled;
267 u32 buf_size; 182 u32 buf_size;
@@ -272,11 +187,11 @@ struct mwifiex_sdio_mpa_rx {
272 u8 *buf; 187 u8 *buf;
273 u32 buf_len; 188 u32 buf_len;
274 u32 pkt_cnt; 189 u32 pkt_cnt;
275 u16 ports; 190 u32 ports;
276 u16 start_port; 191 u16 start_port;
277 192
278 struct sk_buff *skb_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT]; 193 struct sk_buff **skb_arr;
279 u32 len_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT]; 194 u32 *len_arr;
280 195
281 u8 enabled; 196 u8 enabled;
282 u32 buf_size; 197 u32 buf_size;
@@ -286,15 +201,47 @@ struct mwifiex_sdio_mpa_rx {
286int mwifiex_bus_register(void); 201int mwifiex_bus_register(void);
287void mwifiex_bus_unregister(void); 202void mwifiex_bus_unregister(void);
288 203
204struct mwifiex_sdio_card_reg {
205 u8 start_rd_port;
206 u8 start_wr_port;
207 u8 base_0_reg;
208 u8 base_1_reg;
209 u8 poll_reg;
210 u8 host_int_enable;
211 u8 status_reg_0;
212 u8 status_reg_1;
213 u8 sdio_int_mask;
214 u32 data_port_mask;
215 u8 max_mp_regs;
216 u8 rd_bitmap_l;
217 u8 rd_bitmap_u;
218 u8 rd_bitmap_1l;
219 u8 rd_bitmap_1u;
220 u8 wr_bitmap_l;
221 u8 wr_bitmap_u;
222 u8 wr_bitmap_1l;
223 u8 wr_bitmap_1u;
224 u8 rd_len_p0_l;
225 u8 rd_len_p0_u;
226 u8 card_misc_cfg_reg;
227};
228
289struct sdio_mmc_card { 229struct sdio_mmc_card {
290 struct sdio_func *func; 230 struct sdio_func *func;
291 struct mwifiex_adapter *adapter; 231 struct mwifiex_adapter *adapter;
292 232
293 u16 mp_rd_bitmap; 233 const char *firmware;
294 u16 mp_wr_bitmap; 234 const struct mwifiex_sdio_card_reg *reg;
235 u8 max_ports;
236 u8 mp_agg_pkt_limit;
237 bool supports_sdio_new_mode;
238 bool has_control_mask;
239
240 u32 mp_rd_bitmap;
241 u32 mp_wr_bitmap;
295 242
296 u16 mp_end_port; 243 u16 mp_end_port;
297 u16 mp_data_port_mask; 244 u32 mp_data_port_mask;
298 245
299 u8 curr_rd_port; 246 u8 curr_rd_port;
300 u8 curr_wr_port; 247 u8 curr_wr_port;
@@ -305,6 +252,98 @@ struct sdio_mmc_card {
305 struct mwifiex_sdio_mpa_rx mpa_rx; 252 struct mwifiex_sdio_mpa_rx mpa_rx;
306}; 253};
307 254
255struct mwifiex_sdio_device {
256 const char *firmware;
257 const struct mwifiex_sdio_card_reg *reg;
258 u8 max_ports;
259 u8 mp_agg_pkt_limit;
260 bool supports_sdio_new_mode;
261 bool has_control_mask;
262};
263
264static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
265 .start_rd_port = 1,
266 .start_wr_port = 1,
267 .base_0_reg = 0x0040,
268 .base_1_reg = 0x0041,
269 .poll_reg = 0x30,
270 .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK,
271 .status_reg_0 = 0x60,
272 .status_reg_1 = 0x61,
273 .sdio_int_mask = 0x3f,
274 .data_port_mask = 0x0000fffe,
275 .max_mp_regs = 64,
276 .rd_bitmap_l = 0x04,
277 .rd_bitmap_u = 0x05,
278 .wr_bitmap_l = 0x06,
279 .wr_bitmap_u = 0x07,
280 .rd_len_p0_l = 0x08,
281 .rd_len_p0_u = 0x09,
282 .card_misc_cfg_reg = 0x6c,
283};
284
285static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
286 .start_rd_port = 0,
287 .start_wr_port = 0,
288 .base_0_reg = 0x60,
289 .base_1_reg = 0x61,
290 .poll_reg = 0x50,
291 .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
292 CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
293 .status_reg_0 = 0xc0,
294 .status_reg_1 = 0xc1,
295 .sdio_int_mask = 0xff,
296 .data_port_mask = 0xffffffff,
297 .max_mp_regs = 184,
298 .rd_bitmap_l = 0x04,
299 .rd_bitmap_u = 0x05,
300 .rd_bitmap_1l = 0x06,
301 .rd_bitmap_1u = 0x07,
302 .wr_bitmap_l = 0x08,
303 .wr_bitmap_u = 0x09,
304 .wr_bitmap_1l = 0x0a,
305 .wr_bitmap_1u = 0x0b,
306 .rd_len_p0_l = 0x0c,
307 .rd_len_p0_u = 0x0d,
308 .card_misc_cfg_reg = 0xcc,
309};
310
311static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
312 .firmware = SD8786_DEFAULT_FW_NAME,
313 .reg = &mwifiex_reg_sd87xx,
314 .max_ports = 16,
315 .mp_agg_pkt_limit = 8,
316 .supports_sdio_new_mode = false,
317 .has_control_mask = true,
318};
319
320static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
321 .firmware = SD8787_DEFAULT_FW_NAME,
322 .reg = &mwifiex_reg_sd87xx,
323 .max_ports = 16,
324 .mp_agg_pkt_limit = 8,
325 .supports_sdio_new_mode = false,
326 .has_control_mask = true,
327};
328
329static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
330 .firmware = SD8797_DEFAULT_FW_NAME,
331 .reg = &mwifiex_reg_sd87xx,
332 .max_ports = 16,
333 .mp_agg_pkt_limit = 8,
334 .supports_sdio_new_mode = false,
335 .has_control_mask = true,
336};
337
338static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
339 .firmware = SD8897_DEFAULT_FW_NAME,
340 .reg = &mwifiex_reg_sd8897,
341 .max_ports = 32,
342 .mp_agg_pkt_limit = 16,
343 .supports_sdio_new_mode = true,
344 .has_control_mask = false,
345};
346
308/* 347/*
309 * .cmdrsp_complete handler 348 * .cmdrsp_complete handler
310 */ 349 */
@@ -325,4 +364,77 @@ static inline int mwifiex_sdio_event_complete(struct mwifiex_adapter *adapter,
325 return 0; 364 return 0;
326} 365}
327 366
367static inline bool
368mp_rx_aggr_port_limit_reached(struct sdio_mmc_card *card)
369{
370 u8 tmp;
371
372 if (card->curr_rd_port < card->mpa_rx.start_port) {
373 if (card->supports_sdio_new_mode)
374 tmp = card->mp_end_port >> 1;
375 else
376 tmp = card->mp_agg_pkt_limit;
377
378 if (((card->max_ports - card->mpa_rx.start_port) +
379 card->curr_rd_port) >= tmp)
380 return true;
381 }
382
383 if (!card->supports_sdio_new_mode)
384 return false;
385
386 if ((card->curr_rd_port - card->mpa_rx.start_port) >=
387 (card->mp_end_port >> 1))
388 return true;
389
390 return false;
391}
392
393static inline bool
394mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card)
395{
396 u16 tmp;
397
398 if (card->curr_wr_port < card->mpa_tx.start_port) {
399 if (card->supports_sdio_new_mode)
400 tmp = card->mp_end_port >> 1;
401 else
402 tmp = card->mp_agg_pkt_limit;
403
404 if (((card->max_ports - card->mpa_tx.start_port) +
405 card->curr_wr_port) >= tmp)
406 return true;
407 }
408
409 if (!card->supports_sdio_new_mode)
410 return false;
411
412 if ((card->curr_wr_port - card->mpa_tx.start_port) >=
413 (card->mp_end_port >> 1))
414 return true;
415
416 return false;
417}
418
419/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
420static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
421 struct sk_buff *skb, u8 port)
422{
423 card->mpa_rx.buf_len += skb->len;
424
425 if (!card->mpa_rx.pkt_cnt)
426 card->mpa_rx.start_port = port;
427
428 if (card->supports_sdio_new_mode) {
429 card->mpa_rx.ports |= (1 << port);
430 } else {
431 if (card->mpa_rx.start_port <= port)
432 card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt);
433 else
434 card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1);
435 }
436 card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb;
437 card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len;
438 card->mpa_rx.pkt_cnt++;
439}
328#endif /* _MWIFIEX_SDIO_H */ 440#endif /* _MWIFIEX_SDIO_H */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index b193e25977d2..8ece48580642 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1134,6 +1134,55 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
1134 return 0; 1134 return 0;
1135} 1135}
1136 1136
1137/* This function parse cal data from ASCII to hex */
1138static u32 mwifiex_parse_cal_cfg(u8 *src, size_t len, u8 *dst)
1139{
1140 u8 *s = src, *d = dst;
1141
1142 while (s - src < len) {
1143 if (*s && (isspace(*s) || *s == '\t')) {
1144 s++;
1145 continue;
1146 }
1147 if (isxdigit(*s)) {
1148 *d++ = simple_strtol(s, NULL, 16);
1149 s += 2;
1150 } else {
1151 s++;
1152 }
1153 }
1154
1155 return d - dst;
1156}
1157
1158/* This function prepares command of set_cfg_data. */
1159static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
1160 struct host_cmd_ds_command *cmd,
1161 u16 cmd_action)
1162{
1163 struct host_cmd_ds_802_11_cfg_data *cfg_data = &cmd->params.cfg_data;
1164 struct mwifiex_adapter *adapter = priv->adapter;
1165 u32 len, cal_data_offset;
1166 u8 *tmp_cmd = (u8 *)cmd;
1167
1168 cal_data_offset = S_DS_GEN + sizeof(*cfg_data);
1169 if ((adapter->cal_data->data) && (adapter->cal_data->size > 0))
1170 len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
1171 adapter->cal_data->size,
1172 (u8 *)(tmp_cmd + cal_data_offset));
1173 else
1174 return -1;
1175
1176 cfg_data->action = cpu_to_le16(cmd_action);
1177 cfg_data->type = cpu_to_le16(CFG_DATA_TYPE_CAL);
1178 cfg_data->data_len = cpu_to_le16(len);
1179
1180 cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA);
1181 cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*cfg_data) + len);
1182
1183 return 0;
1184}
1185
1137/* 1186/*
1138 * This function prepares the commands before sending them to the firmware. 1187 * This function prepares the commands before sending them to the firmware.
1139 * 1188 *
@@ -1152,6 +1201,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1152 case HostCmd_CMD_GET_HW_SPEC: 1201 case HostCmd_CMD_GET_HW_SPEC:
1153 ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr); 1202 ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr);
1154 break; 1203 break;
1204 case HostCmd_CMD_CFG_DATA:
1205 ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, cmd_action);
1206 break;
1155 case HostCmd_CMD_MAC_CONTROL: 1207 case HostCmd_CMD_MAC_CONTROL:
1156 ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action, 1208 ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action,
1157 data_buf); 1209 data_buf);
@@ -1384,6 +1436,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1384 */ 1436 */
1385int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta) 1437int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1386{ 1438{
1439 struct mwifiex_adapter *adapter = priv->adapter;
1387 int ret; 1440 int ret;
1388 u16 enable = true; 1441 u16 enable = true;
1389 struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl; 1442 struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl;
@@ -1404,6 +1457,15 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1404 HostCmd_ACT_GEN_SET, 0, NULL); 1457 HostCmd_ACT_GEN_SET, 0, NULL);
1405 if (ret) 1458 if (ret)
1406 return -1; 1459 return -1;
1460
1461 /* Download calibration data to firmware */
1462 if (adapter->cal_data) {
1463 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
1464 HostCmd_ACT_GEN_SET, 0, NULL);
1465 if (ret)
1466 return -1;
1467 }
1468
1407 /* Read MAC address from HW */ 1469 /* Read MAC address from HW */
1408 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC, 1470 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
1409 HostCmd_ACT_GEN_GET, 0, NULL); 1471 HostCmd_ACT_GEN_GET, 0, NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 9f990e14966e..d85df158cc6c 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -818,6 +818,18 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
818 return 0; 818 return 0;
819} 819}
820 820
821/* This function handles the command response of set_cfg_data */
822static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
823 struct host_cmd_ds_command *resp)
824{
825 if (resp->result != HostCmd_RESULT_OK) {
826 dev_err(priv->adapter->dev, "Cal data cmd resp failed\n");
827 return -1;
828 }
829
830 return 0;
831}
832
821/* 833/*
822 * This function handles the command responses. 834 * This function handles the command responses.
823 * 835 *
@@ -841,6 +853,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
841 case HostCmd_CMD_GET_HW_SPEC: 853 case HostCmd_CMD_GET_HW_SPEC:
842 ret = mwifiex_ret_get_hw_spec(priv, resp); 854 ret = mwifiex_ret_get_hw_spec(priv, resp);
843 break; 855 break;
856 case HostCmd_CMD_CFG_DATA:
857 ret = mwifiex_ret_cfg_data(priv, resp);
858 break;
844 case HostCmd_CMD_MAC_CONTROL: 859 case HostCmd_CMD_MAC_CONTROL:
845 break; 860 break;
846 case HostCmd_CMD_802_11_MAC_ADDRESS: 861 case HostCmd_CMD_802_11_MAC_ADDRESS:
@@ -978,6 +993,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
978 case HostCmd_CMD_UAP_BSS_STOP: 993 case HostCmd_CMD_UAP_BSS_STOP:
979 priv->bss_started = 0; 994 priv->bss_started = 0;
980 break; 995 break;
996 case HostCmd_CMD_UAP_STA_DEAUTH:
997 break;
981 case HostCmd_CMD_MEF_CFG: 998 case HostCmd_CMD_MEF_CFG:
982 break; 999 break;
983 default: 1000 default:
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 41aafc7454ed..ea265ec0e522 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -427,6 +427,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
427 427
428 break; 428 break;
429 429
430 case EVENT_CHANNEL_SWITCH_ANN:
431 dev_dbg(adapter->dev, "event: Channel Switch Announcement\n");
432 priv->csa_expire_time =
433 jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
434 priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
435 ret = mwifiex_send_cmd_async(priv,
436 HostCmd_CMD_802_11_DEAUTHENTICATE,
437 HostCmd_ACT_GEN_SET, 0,
438 priv->curr_bss_params.bss_descriptor.mac_address);
439 break;
440
430 default: 441 default:
431 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", 442 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
432 eventcause); 443 eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 1a8a19dbd635..206c3e038072 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -104,16 +104,14 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
104 } else { 104 } else {
105 priv->curr_pkt_filter &= 105 priv->curr_pkt_filter &=
106 ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; 106 ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
107 if (mcast_list->num_multicast_addr) { 107 dev_dbg(priv->adapter->dev,
108 dev_dbg(priv->adapter->dev, 108 "info: Set multicast list=%d\n",
109 "info: Set multicast list=%d\n", 109 mcast_list->num_multicast_addr);
110 mcast_list->num_multicast_addr); 110 /* Send multicast addresses to firmware */
111 /* Send multicast addresses to firmware */ 111 ret = mwifiex_send_cmd_async(priv,
112 ret = mwifiex_send_cmd_async(priv, 112 HostCmd_CMD_MAC_MULTICAST_ADR,
113 HostCmd_CMD_MAC_MULTICAST_ADR, 113 HostCmd_ACT_GEN_SET, 0,
114 HostCmd_ACT_GEN_SET, 0, 114 mcast_list);
115 mcast_list);
116 }
117 } 115 }
118 } 116 }
119 dev_dbg(priv->adapter->dev, 117 dev_dbg(priv->adapter->dev,
@@ -180,6 +178,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
180 */ 178 */
181 bss_desc->disable_11ac = true; 179 bss_desc->disable_11ac = true;
182 180
181 if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT)
182 bss_desc->sensed_11h = true;
183
183 return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); 184 return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
184} 185}
185 186
@@ -257,30 +258,37 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
257 } 258 }
258 259
259 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 260 if (priv->bss_mode == NL80211_IFTYPE_STATION) {
261 u8 config_bands;
262
260 /* Infra mode */ 263 /* Infra mode */
261 ret = mwifiex_deauthenticate(priv, NULL); 264 ret = mwifiex_deauthenticate(priv, NULL);
262 if (ret) 265 if (ret)
263 goto done; 266 goto done;
264 267
265 if (bss_desc) { 268 if (!bss_desc)
266 u8 config_bands = 0; 269 return -1;
267 270
268 if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band) 271 if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
269 == HostCmd_SCAN_RADIO_TYPE_BG) 272 HostCmd_SCAN_RADIO_TYPE_BG)
270 config_bands = BAND_B | BAND_G | BAND_GN | 273 config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC;
271 BAND_GAC; 274 else
272 else 275 config_bands = BAND_A | BAND_AN | BAND_AAC;
273 config_bands = BAND_A | BAND_AN | BAND_AAC;
274 276
275 if (!((config_bands | adapter->fw_bands) & 277 if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands))
276 ~adapter->fw_bands)) 278 adapter->config_bands = config_bands;
277 adapter->config_bands = config_bands;
278 }
279 279
280 ret = mwifiex_check_network_compatibility(priv, bss_desc); 280 ret = mwifiex_check_network_compatibility(priv, bss_desc);
281 if (ret) 281 if (ret)
282 goto done; 282 goto done;
283 283
284 if (mwifiex_11h_get_csa_closed_channel(priv) ==
285 (u8)bss_desc->channel) {
286 dev_err(adapter->dev,
287 "Attempt to reconnect on csa closed chan(%d)\n",
288 bss_desc->channel);
289 goto done;
290 }
291
284 dev_dbg(adapter->dev, "info: SSID found in scan list ... " 292 dev_dbg(adapter->dev, "info: SSID found in scan list ... "
285 "associating...\n"); 293 "associating...\n");
286 294
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index b04b1db29100..2de882dead0f 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -689,6 +689,23 @@ mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
689 return 0; 689 return 0;
690} 690}
691 691
692/* This function prepares AP specific deauth command with mac supplied in
693 * function parameter.
694 */
695static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
696 struct host_cmd_ds_command *cmd, u8 *mac)
697{
698 struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth;
699
700 cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH);
701 memcpy(sta_deauth->mac, mac, ETH_ALEN);
702 sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
703
704 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) +
705 S_DS_GEN);
706 return 0;
707}
708
692/* This function prepares the AP specific commands before sending them 709/* This function prepares the AP specific commands before sending them
693 * to the firmware. 710 * to the firmware.
694 * This is a generic function which calls specific command preparation 711 * This is a generic function which calls specific command preparation
@@ -710,6 +727,10 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
710 cmd->command = cpu_to_le16(cmd_no); 727 cmd->command = cpu_to_le16(cmd_no);
711 cmd->size = cpu_to_le16(S_DS_GEN); 728 cmd->size = cpu_to_le16(S_DS_GEN);
712 break; 729 break;
730 case HostCmd_CMD_UAP_STA_DEAUTH:
731 if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
732 return -1;
733 break;
713 default: 734 default:
714 dev_err(priv->adapter->dev, 735 dev_err(priv->adapter->dev,
715 "PREP_CMD: unknown cmd %#x\n", cmd_no); 736 "PREP_CMD: unknown cmd %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 21c640d3b579..718066577c6c 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -107,18 +107,15 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
107 */ 107 */
108static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac) 108static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
109{ 109{
110 struct mwifiex_sta_node *node, *tmp; 110 struct mwifiex_sta_node *node;
111 unsigned long flags; 111 unsigned long flags;
112 112
113 spin_lock_irqsave(&priv->sta_list_spinlock, flags); 113 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
114 114
115 node = mwifiex_get_sta_entry(priv, mac); 115 node = mwifiex_get_sta_entry(priv, mac);
116 if (node) { 116 if (node) {
117 list_for_each_entry_safe(node, tmp, &priv->sta_list, 117 list_del(&node->list);
118 list) { 118 kfree(node);
119 list_del(&node->list);
120 kfree(node);
121 }
122 } 119 }
123 120
124 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); 121 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
@@ -295,3 +292,19 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
295 292
296 return 0; 293 return 0;
297} 294}
295
296/* This function deletes station entry from associated station list.
297 * Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream
298 * tables created for this station are deleted.
299 */
300void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
301 struct mwifiex_sta_node *node)
302{
303 if (priv->ap_11n_enabled && node->is_11n_enabled) {
304 mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr);
305 mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr);
306 }
307 mwifiex_del_sta_entry(priv, node->mac_addr);
308
309 return;
310}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 4be3d33ceae8..944e8846f6fc 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -37,6 +37,9 @@
37/* Offset for TOS field in the IP header */ 37/* Offset for TOS field in the IP header */
38#define IPTOS_OFFSET 5 38#define IPTOS_OFFSET 5
39 39
40static bool enable_tx_amsdu;
41module_param(enable_tx_amsdu, bool, 0644);
42
40/* WMM information IE */ 43/* WMM information IE */
41static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07, 44static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
42 0x00, 0x50, 0xf2, 0x02, 45 0x00, 0x50, 0xf2, 0x02,
@@ -1233,7 +1236,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1233 mwifiex_send_delba(priv, tid_del, ra, 1); 1236 mwifiex_send_delba(priv, tid_del, ra, 1);
1234 } 1237 }
1235 } 1238 }
1236 if (mwifiex_is_amsdu_allowed(priv, tid) && 1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
1237 mwifiex_is_11n_aggragation_possible(priv, ptr, 1240 mwifiex_is_11n_aggragation_possible(priv, ptr,
1238 adapter->tx_buf_size)) 1241 adapter->tx_buf_size))
1239 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, 1242 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 6820fce4016b..a3707fd4ef62 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1548,7 +1548,7 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1548 if (!priv->pending_tx_pkts) 1548 if (!priv->pending_tx_pkts)
1549 return 0; 1549 return 0;
1550 1550
1551 retry = 0; 1551 retry = 1;
1552 rc = 0; 1552 rc = 0;
1553 1553
1554 spin_lock_bh(&priv->tx_lock); 1554 spin_lock_bh(&priv->tx_lock);
@@ -1572,13 +1572,19 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1572 1572
1573 spin_lock_bh(&priv->tx_lock); 1573 spin_lock_bh(&priv->tx_lock);
1574 1574
1575 if (timeout) { 1575 if (timeout || !priv->pending_tx_pkts) {
1576 WARN_ON(priv->pending_tx_pkts); 1576 WARN_ON(priv->pending_tx_pkts);
1577 if (retry) 1577 if (retry)
1578 wiphy_notice(hw->wiphy, "tx rings drained\n"); 1578 wiphy_notice(hw->wiphy, "tx rings drained\n");
1579 break; 1579 break;
1580 } 1580 }
1581 1581
1582 if (retry) {
1583 mwl8k_tx_start(priv);
1584 retry = 0;
1585 continue;
1586 }
1587
1582 if (priv->pending_tx_pkts < oldcount) { 1588 if (priv->pending_tx_pkts < oldcount) {
1583 wiphy_notice(hw->wiphy, 1589 wiphy_notice(hw->wiphy,
1584 "waiting for tx rings to drain (%d -> %d pkts)\n", 1590 "waiting for tx rings to drain (%d -> %d pkts)\n",
@@ -2055,6 +2061,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
2055 mwl8k_remove_stream(hw, stream); 2061 mwl8k_remove_stream(hw, stream);
2056 spin_unlock(&priv->stream_lock); 2062 spin_unlock(&priv->stream_lock);
2057 } 2063 }
2064 mwl8k_tx_start(priv);
2058 spin_unlock_bh(&priv->tx_lock); 2065 spin_unlock_bh(&priv->tx_lock);
2059 pci_unmap_single(priv->pdev, dma, skb->len, 2066 pci_unmap_single(priv->pdev, dma, skb->len,
2060 PCI_DMA_TODEVICE); 2067 PCI_DMA_TODEVICE);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.h b/drivers/net/wireless/orinoco/orinoco_pci.h
index ea7231af40a8..43f5b9f5a0b0 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.h
+++ b/drivers/net/wireless/orinoco/orinoco_pci.h
@@ -38,7 +38,7 @@ static int orinoco_pci_resume(struct pci_dev *pdev)
38 struct net_device *dev = priv->ndev; 38 struct net_device *dev = priv->ndev;
39 int err; 39 int err;
40 40
41 pci_set_power_state(pdev, 0); 41 pci_set_power_state(pdev, PCI_D0);
42 err = pci_enable_device(pdev); 42 err = pci_enable_device(pdev);
43 if (err) { 43 if (err) {
44 printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 44 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 1f9cb55c3360..bdfe637953f4 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -881,7 +881,8 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
881 881
882 if (!upriv->udev) { 882 if (!upriv->udev) {
883 dbg("Device disconnected"); 883 dbg("Device disconnected");
884 return -ENODEV; 884 retval = -ENODEV;
885 goto exit;
885 } 886 }
886 887
887 if (upriv->read_urb->status != -EINPROGRESS) 888 if (upriv->read_urb->status != -EINPROGRESS)
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 978e7eb26567..7fc46f26cf2b 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -42,8 +42,7 @@
42 42
43MODULE_FIRMWARE("3826.arm"); 43MODULE_FIRMWARE("3826.arm");
44 44
45/* 45/* gpios should be handled in board files and provided via platform data,
46 * gpios should be handled in board files and provided via platform data,
47 * but because it's currently impossible for p54spi to have a header file 46 * but because it's currently impossible for p54spi to have a header file
48 * in include/linux, let's use module paramaters for now 47 * in include/linux, let's use module paramaters for now
49 */ 48 */
@@ -191,8 +190,7 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
191 const struct firmware *eeprom; 190 const struct firmware *eeprom;
192 int ret; 191 int ret;
193 192
194 /* 193 /* allow users to customize their eeprom.
195 * allow users to customize their eeprom.
196 */ 194 */
197 195
198 ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev); 196 ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
@@ -285,8 +283,7 @@ static void p54spi_power_on(struct p54s_priv *priv)
285 gpio_set_value(p54spi_gpio_power, 1); 283 gpio_set_value(p54spi_gpio_power, 1);
286 enable_irq(gpio_to_irq(p54spi_gpio_irq)); 284 enable_irq(gpio_to_irq(p54spi_gpio_irq));
287 285
288 /* 286 /* need to wait a while before device can be accessed, the length
289 * need to wait a while before device can be accessed, the length
290 * is just a guess 287 * is just a guess
291 */ 288 */
292 msleep(10); 289 msleep(10);
@@ -365,7 +362,8 @@ static int p54spi_rx(struct p54s_priv *priv)
365 /* Firmware may insert up to 4 padding bytes after the lmac header, 362 /* Firmware may insert up to 4 padding bytes after the lmac header,
366 * but it does not amend the size of SPI data transfer. 363 * but it does not amend the size of SPI data transfer.
367 * Such packets has correct data size in header, thus referencing 364 * Such packets has correct data size in header, thus referencing
368 * past the end of allocated skb. Reserve extra 4 bytes for this case */ 365 * past the end of allocated skb. Reserve extra 4 bytes for this case
366 */
369 skb = dev_alloc_skb(len + 4); 367 skb = dev_alloc_skb(len + 4);
370 if (!skb) { 368 if (!skb) {
371 p54spi_sleep(priv); 369 p54spi_sleep(priv);
@@ -383,7 +381,8 @@ static int p54spi_rx(struct p54s_priv *priv)
383 } 381 }
384 p54spi_sleep(priv); 382 p54spi_sleep(priv);
385 /* Put additional bytes to compensate for the possible 383 /* Put additional bytes to compensate for the possible
386 * alignment-caused truncation */ 384 * alignment-caused truncation
385 */
387 skb_put(skb, 4); 386 skb_put(skb, 4);
388 387
389 if (p54_rx(priv->hw, skb) == 0) 388 if (p54_rx(priv->hw, skb) == 0)
@@ -713,27 +712,7 @@ static struct spi_driver p54spi_driver = {
713 .remove = p54spi_remove, 712 .remove = p54spi_remove,
714}; 713};
715 714
716static int __init p54spi_init(void) 715module_spi_driver(p54spi_driver);
717{
718 int ret;
719
720 ret = spi_register_driver(&p54spi_driver);
721 if (ret < 0) {
722 printk(KERN_ERR "failed to register SPI driver: %d", ret);
723 goto out;
724 }
725
726out:
727 return ret;
728}
729
730static void __exit p54spi_exit(void)
731{
732 spi_unregister_driver(&p54spi_driver);
733}
734
735module_init(p54spi_init);
736module_exit(p54spi_exit);
737 716
738MODULE_LICENSE("GPL"); 717MODULE_LICENSE("GPL");
739MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); 718MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index f7143733d7e9..3d53a09da5a1 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1767,33 +1767,45 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1767 .config = rt2400pci_config, 1767 .config = rt2400pci_config,
1768}; 1768};
1769 1769
1770static const struct data_queue_desc rt2400pci_queue_rx = { 1770static void rt2400pci_queue_init(struct data_queue *queue)
1771 .entry_num = 24, 1771{
1772 .data_size = DATA_FRAME_SIZE, 1772 switch (queue->qid) {
1773 .desc_size = RXD_DESC_SIZE, 1773 case QID_RX:
1774 .priv_size = sizeof(struct queue_entry_priv_mmio), 1774 queue->limit = 24;
1775}; 1775 queue->data_size = DATA_FRAME_SIZE;
1776 queue->desc_size = RXD_DESC_SIZE;
1777 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1778 break;
1776 1779
1777static const struct data_queue_desc rt2400pci_queue_tx = { 1780 case QID_AC_VO:
1778 .entry_num = 24, 1781 case QID_AC_VI:
1779 .data_size = DATA_FRAME_SIZE, 1782 case QID_AC_BE:
1780 .desc_size = TXD_DESC_SIZE, 1783 case QID_AC_BK:
1781 .priv_size = sizeof(struct queue_entry_priv_mmio), 1784 queue->limit = 24;
1782}; 1785 queue->data_size = DATA_FRAME_SIZE;
1786 queue->desc_size = TXD_DESC_SIZE;
1787 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1788 break;
1783 1789
1784static const struct data_queue_desc rt2400pci_queue_bcn = { 1790 case QID_BEACON:
1785 .entry_num = 1, 1791 queue->limit = 1;
1786 .data_size = MGMT_FRAME_SIZE, 1792 queue->data_size = MGMT_FRAME_SIZE;
1787 .desc_size = TXD_DESC_SIZE, 1793 queue->desc_size = TXD_DESC_SIZE;
1788 .priv_size = sizeof(struct queue_entry_priv_mmio), 1794 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1789}; 1795 break;
1790 1796
1791static const struct data_queue_desc rt2400pci_queue_atim = { 1797 case QID_ATIM:
1792 .entry_num = 8, 1798 queue->limit = 8;
1793 .data_size = DATA_FRAME_SIZE, 1799 queue->data_size = DATA_FRAME_SIZE;
1794 .desc_size = TXD_DESC_SIZE, 1800 queue->desc_size = TXD_DESC_SIZE;
1795 .priv_size = sizeof(struct queue_entry_priv_mmio), 1801 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1796}; 1802 break;
1803
1804 default:
1805 BUG();
1806 break;
1807 }
1808}
1797 1809
1798static const struct rt2x00_ops rt2400pci_ops = { 1810static const struct rt2x00_ops rt2400pci_ops = {
1799 .name = KBUILD_MODNAME, 1811 .name = KBUILD_MODNAME,
@@ -1801,11 +1813,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
1801 .eeprom_size = EEPROM_SIZE, 1813 .eeprom_size = EEPROM_SIZE,
1802 .rf_size = RF_SIZE, 1814 .rf_size = RF_SIZE,
1803 .tx_queues = NUM_TX_QUEUES, 1815 .tx_queues = NUM_TX_QUEUES,
1804 .extra_tx_headroom = 0, 1816 .queue_init = rt2400pci_queue_init,
1805 .rx = &rt2400pci_queue_rx,
1806 .tx = &rt2400pci_queue_tx,
1807 .bcn = &rt2400pci_queue_bcn,
1808 .atim = &rt2400pci_queue_atim,
1809 .lib = &rt2400pci_rt2x00_ops, 1817 .lib = &rt2400pci_rt2x00_ops,
1810 .hw = &rt2400pci_mac80211_ops, 1818 .hw = &rt2400pci_mac80211_ops,
1811#ifdef CONFIG_RT2X00_LIB_DEBUGFS 1819#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 77e45b223d15..0ac5c589ddce 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -2056,33 +2056,45 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
2056 .config = rt2500pci_config, 2056 .config = rt2500pci_config,
2057}; 2057};
2058 2058
2059static const struct data_queue_desc rt2500pci_queue_rx = { 2059static void rt2500pci_queue_init(struct data_queue *queue)
2060 .entry_num = 32, 2060{
2061 .data_size = DATA_FRAME_SIZE, 2061 switch (queue->qid) {
2062 .desc_size = RXD_DESC_SIZE, 2062 case QID_RX:
2063 .priv_size = sizeof(struct queue_entry_priv_mmio), 2063 queue->limit = 32;
2064}; 2064 queue->data_size = DATA_FRAME_SIZE;
2065 queue->desc_size = RXD_DESC_SIZE;
2066 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
2067 break;
2065 2068
2066static const struct data_queue_desc rt2500pci_queue_tx = { 2069 case QID_AC_VO:
2067 .entry_num = 32, 2070 case QID_AC_VI:
2068 .data_size = DATA_FRAME_SIZE, 2071 case QID_AC_BE:
2069 .desc_size = TXD_DESC_SIZE, 2072 case QID_AC_BK:
2070 .priv_size = sizeof(struct queue_entry_priv_mmio), 2073 queue->limit = 32;
2071}; 2074 queue->data_size = DATA_FRAME_SIZE;
2075 queue->desc_size = TXD_DESC_SIZE;
2076 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
2077 break;
2072 2078
2073static const struct data_queue_desc rt2500pci_queue_bcn = { 2079 case QID_BEACON:
2074 .entry_num = 1, 2080 queue->limit = 1;
2075 .data_size = MGMT_FRAME_SIZE, 2081 queue->data_size = MGMT_FRAME_SIZE;
2076 .desc_size = TXD_DESC_SIZE, 2082 queue->desc_size = TXD_DESC_SIZE;
2077 .priv_size = sizeof(struct queue_entry_priv_mmio), 2083 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
2078}; 2084 break;
2079 2085
2080static const struct data_queue_desc rt2500pci_queue_atim = { 2086 case QID_ATIM:
2081 .entry_num = 8, 2087 queue->limit = 8;
2082 .data_size = DATA_FRAME_SIZE, 2088 queue->data_size = DATA_FRAME_SIZE;
2083 .desc_size = TXD_DESC_SIZE, 2089 queue->desc_size = TXD_DESC_SIZE;
2084 .priv_size = sizeof(struct queue_entry_priv_mmio), 2090 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
2085}; 2091 break;
2092
2093 default:
2094 BUG();
2095 break;
2096 }
2097}
2086 2098
2087static const struct rt2x00_ops rt2500pci_ops = { 2099static const struct rt2x00_ops rt2500pci_ops = {
2088 .name = KBUILD_MODNAME, 2100 .name = KBUILD_MODNAME,
@@ -2090,11 +2102,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
2090 .eeprom_size = EEPROM_SIZE, 2102 .eeprom_size = EEPROM_SIZE,
2091 .rf_size = RF_SIZE, 2103 .rf_size = RF_SIZE,
2092 .tx_queues = NUM_TX_QUEUES, 2104 .tx_queues = NUM_TX_QUEUES,
2093 .extra_tx_headroom = 0, 2105 .queue_init = rt2500pci_queue_init,
2094 .rx = &rt2500pci_queue_rx,
2095 .tx = &rt2500pci_queue_tx,
2096 .bcn = &rt2500pci_queue_bcn,
2097 .atim = &rt2500pci_queue_atim,
2098 .lib = &rt2500pci_rt2x00_ops, 2106 .lib = &rt2500pci_rt2x00_ops,
2099 .hw = &rt2500pci_mac80211_ops, 2107 .hw = &rt2500pci_mac80211_ops,
2100#ifdef CONFIG_RT2X00_LIB_DEBUGFS 2108#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index a7f7b365eff4..85acc79f68b8 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1867,33 +1867,45 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1867 .config = rt2500usb_config, 1867 .config = rt2500usb_config,
1868}; 1868};
1869 1869
1870static const struct data_queue_desc rt2500usb_queue_rx = { 1870static void rt2500usb_queue_init(struct data_queue *queue)
1871 .entry_num = 32, 1871{
1872 .data_size = DATA_FRAME_SIZE, 1872 switch (queue->qid) {
1873 .desc_size = RXD_DESC_SIZE, 1873 case QID_RX:
1874 .priv_size = sizeof(struct queue_entry_priv_usb), 1874 queue->limit = 32;
1875}; 1875 queue->data_size = DATA_FRAME_SIZE;
1876 queue->desc_size = RXD_DESC_SIZE;
1877 queue->priv_size = sizeof(struct queue_entry_priv_usb);
1878 break;
1876 1879
1877static const struct data_queue_desc rt2500usb_queue_tx = { 1880 case QID_AC_VO:
1878 .entry_num = 32, 1881 case QID_AC_VI:
1879 .data_size = DATA_FRAME_SIZE, 1882 case QID_AC_BE:
1880 .desc_size = TXD_DESC_SIZE, 1883 case QID_AC_BK:
1881 .priv_size = sizeof(struct queue_entry_priv_usb), 1884 queue->limit = 32;
1882}; 1885 queue->data_size = DATA_FRAME_SIZE;
1886 queue->desc_size = TXD_DESC_SIZE;
1887 queue->priv_size = sizeof(struct queue_entry_priv_usb);
1888 break;
1883 1889
1884static const struct data_queue_desc rt2500usb_queue_bcn = { 1890 case QID_BEACON:
1885 .entry_num = 1, 1891 queue->limit = 1;
1886 .data_size = MGMT_FRAME_SIZE, 1892 queue->data_size = MGMT_FRAME_SIZE;
1887 .desc_size = TXD_DESC_SIZE, 1893 queue->desc_size = TXD_DESC_SIZE;
1888 .priv_size = sizeof(struct queue_entry_priv_usb_bcn), 1894 queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn);
1889}; 1895 break;
1890 1896
1891static const struct data_queue_desc rt2500usb_queue_atim = { 1897 case QID_ATIM:
1892 .entry_num = 8, 1898 queue->limit = 8;
1893 .data_size = DATA_FRAME_SIZE, 1899 queue->data_size = DATA_FRAME_SIZE;
1894 .desc_size = TXD_DESC_SIZE, 1900 queue->desc_size = TXD_DESC_SIZE;
1895 .priv_size = sizeof(struct queue_entry_priv_usb), 1901 queue->priv_size = sizeof(struct queue_entry_priv_usb);
1896}; 1902 break;
1903
1904 default:
1905 BUG();
1906 break;
1907 }
1908}
1897 1909
1898static const struct rt2x00_ops rt2500usb_ops = { 1910static const struct rt2x00_ops rt2500usb_ops = {
1899 .name = KBUILD_MODNAME, 1911 .name = KBUILD_MODNAME,
@@ -1901,11 +1913,7 @@ static const struct rt2x00_ops rt2500usb_ops = {
1901 .eeprom_size = EEPROM_SIZE, 1913 .eeprom_size = EEPROM_SIZE,
1902 .rf_size = RF_SIZE, 1914 .rf_size = RF_SIZE,
1903 .tx_queues = NUM_TX_QUEUES, 1915 .tx_queues = NUM_TX_QUEUES,
1904 .extra_tx_headroom = TXD_DESC_SIZE, 1916 .queue_init = rt2500usb_queue_init,
1905 .rx = &rt2500usb_queue_rx,
1906 .tx = &rt2500usb_queue_tx,
1907 .bcn = &rt2500usb_queue_bcn,
1908 .atim = &rt2500usb_queue_atim,
1909 .lib = &rt2500usb_rt2x00_ops, 1917 .lib = &rt2500usb_rt2x00_ops,
1910 .hw = &rt2500usb_mac80211_ops, 1918 .hw = &rt2500usb_mac80211_ops,
1911#ifdef CONFIG_RT2X00_LIB_DEBUGFS 1919#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index a7630d5ec892..d78c495a86a0 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -100,7 +100,7 @@
100#define CSR_REG_BASE 0x1000 100#define CSR_REG_BASE 0x1000
101#define CSR_REG_SIZE 0x0800 101#define CSR_REG_SIZE 0x0800
102#define EEPROM_BASE 0x0000 102#define EEPROM_BASE 0x0000
103#define EEPROM_SIZE 0x0110 103#define EEPROM_SIZE 0x0200
104#define BBP_BASE 0x0000 104#define BBP_BASE 0x0000
105#define BBP_SIZE 0x00ff 105#define BBP_SIZE 0x00ff
106#define RF_BASE 0x0004 106#define RF_BASE 0x0004
@@ -2625,11 +2625,13 @@ struct mac_iveiv_entry {
2625/* 2625/*
2626 * DMA descriptor defines. 2626 * DMA descriptor defines.
2627 */ 2627 */
2628#define TXWI_DESC_SIZE (4 * sizeof(__le32))
2629#define RXWI_DESC_SIZE (4 * sizeof(__le32))
2630 2628
2631#define TXWI_DESC_SIZE_5592 (5 * sizeof(__le32)) 2629#define TXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
2632#define RXWI_DESC_SIZE_5592 (6 * sizeof(__le32)) 2630#define TXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
2631
2632#define RXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
2633#define RXWI_DESC_SIZE_6WORDS (6 * sizeof(__le32))
2634
2633/* 2635/*
2634 * TX WI structure 2636 * TX WI structure
2635 */ 2637 */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 72f32e5caa4d..1f80ea5e29dd 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -840,7 +840,7 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
840 unsigned int beacon_base) 840 unsigned int beacon_base)
841{ 841{
842 int i; 842 int i;
843 const int txwi_desc_size = rt2x00dev->ops->bcn->winfo_size; 843 const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
844 844
845 /* 845 /*
846 * For the Beacon base registers we only need to clear 846 * For the Beacon base registers we only need to clear
@@ -2392,7 +2392,7 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
2392 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); 2392 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
2393 2393
2394 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); 2394 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
2395 if (info->default_power1 > power_bound) 2395 if (info->default_power2 > power_bound)
2396 rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound); 2396 rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
2397 else 2397 else
2398 rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2); 2398 rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
@@ -2678,30 +2678,53 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2678 2678
2679 tx_pin = 0; 2679 tx_pin = 0;
2680 2680
2681 /* Turn on unused PA or LNA when not using 1T or 1R */ 2681 switch (rt2x00dev->default_ant.tx_chain_num) {
2682 if (rt2x00dev->default_ant.tx_chain_num == 2) { 2682 case 3:
2683 /* Turn on tertiary PAs */
2684 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN,
2685 rf->channel > 14);
2686 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN,
2687 rf->channel <= 14);
2688 /* fall-through */
2689 case 2:
2690 /* Turn on secondary PAs */
2683 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 2691 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN,
2684 rf->channel > 14); 2692 rf->channel > 14);
2685 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 2693 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN,
2686 rf->channel <= 14); 2694 rf->channel <= 14);
2695 /* fall-through */
2696 case 1:
2697 /* Turn on primary PAs */
2698 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
2699 rf->channel > 14);
2700 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
2701 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
2702 else
2703 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
2704 rf->channel <= 14);
2705 break;
2687 } 2706 }
2688 2707
2689 /* Turn on unused PA or LNA when not using 1T or 1R */ 2708 switch (rt2x00dev->default_ant.rx_chain_num) {
2690 if (rt2x00dev->default_ant.rx_chain_num == 2) { 2709 case 3:
2710 /* Turn on tertiary LNAs */
2711 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
2712 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
2713 /* fall-through */
2714 case 2:
2715 /* Turn on secondary LNAs */
2691 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); 2716 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
2692 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); 2717 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
2718 /* fall-through */
2719 case 1:
2720 /* Turn on primary LNAs */
2721 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
2722 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
2723 break;
2693 } 2724 }
2694 2725
2695 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
2696 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
2697 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1); 2726 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
2698 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1); 2727 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
2699 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
2700 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
2701 else
2702 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
2703 rf->channel <= 14);
2704 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
2705 2728
2706 rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); 2729 rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
2707 2730
@@ -3960,379 +3983,577 @@ static void rt2800_init_bbp_early(struct rt2x00_dev *rt2x00dev)
3960 rt2800_bbp_write(rt2x00dev, 106, 0x35); 3983 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3961} 3984}
3962 3985
3963static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) 3986static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev)
3964{ 3987{
3965 int ant, div_mode;
3966 u16 eeprom; 3988 u16 eeprom;
3967 u8 value; 3989 u8 value;
3968 3990
3969 rt2800_init_bbp_early(rt2x00dev); 3991 rt2800_bbp_read(rt2x00dev, 138, &value);
3992 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
3993 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
3994 value |= 0x20;
3995 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
3996 value &= ~0x02;
3997 rt2800_bbp_write(rt2x00dev, 138, value);
3998}
3970 3999
3971 rt2800_bbp_read(rt2x00dev, 105, &value); 4000static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev)
3972 rt2x00_set_field8(&value, BBP105_MLD, 4001{
3973 rt2x00dev->default_ant.rx_chain_num == 2); 4002 rt2800_bbp_write(rt2x00dev, 31, 0x08);
3974 rt2800_bbp_write(rt2x00dev, 105, value); 4003
4004 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
4005 rt2800_bbp_write(rt2x00dev, 66, 0x38);
4006
4007 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4008 rt2800_bbp_write(rt2x00dev, 73, 0x10);
4009
4010 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
4011
4012 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
4013 rt2800_bbp_write(rt2x00dev, 80, 0x08);
4014
4015 rt2800_bbp_write(rt2x00dev, 82, 0x62);
4016
4017 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
4018
4019 rt2800_bbp_write(rt2x00dev, 84, 0x99);
4020
4021 rt2800_bbp_write(rt2x00dev, 86, 0x00);
4022
4023 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4024
4025 rt2800_bbp_write(rt2x00dev, 92, 0x00);
4026
4027 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4028
4029 rt2800_bbp_write(rt2x00dev, 105, 0x01);
4030
4031 rt2800_bbp_write(rt2x00dev, 106, 0x35);
4032}
4033
4034static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev)
4035{
4036 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
4037 rt2800_bbp_write(rt2x00dev, 66, 0x38);
4038
4039 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
4040 rt2800_bbp_write(rt2x00dev, 69, 0x16);
4041 rt2800_bbp_write(rt2x00dev, 73, 0x12);
4042 } else {
4043 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4044 rt2800_bbp_write(rt2x00dev, 73, 0x10);
4045 }
4046
4047 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
4048
4049 rt2800_bbp_write(rt2x00dev, 81, 0x37);
4050
4051 rt2800_bbp_write(rt2x00dev, 82, 0x62);
4052
4053 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
4054
4055 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
4056 rt2800_bbp_write(rt2x00dev, 84, 0x19);
4057 else
4058 rt2800_bbp_write(rt2x00dev, 84, 0x99);
4059
4060 rt2800_bbp_write(rt2x00dev, 86, 0x00);
4061
4062 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4063
4064 rt2800_bbp_write(rt2x00dev, 92, 0x00);
4065
4066 rt2800_bbp_write(rt2x00dev, 103, 0x00);
4067
4068 rt2800_bbp_write(rt2x00dev, 105, 0x05);
4069
4070 rt2800_bbp_write(rt2x00dev, 106, 0x35);
4071}
4072
4073static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev)
4074{
4075 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
4076 rt2800_bbp_write(rt2x00dev, 66, 0x38);
4077
4078 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4079 rt2800_bbp_write(rt2x00dev, 73, 0x10);
4080
4081 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
4082
4083 rt2800_bbp_write(rt2x00dev, 79, 0x13);
4084 rt2800_bbp_write(rt2x00dev, 80, 0x05);
4085 rt2800_bbp_write(rt2x00dev, 81, 0x33);
4086
4087 rt2800_bbp_write(rt2x00dev, 82, 0x62);
4088
4089 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
4090
4091 rt2800_bbp_write(rt2x00dev, 84, 0x99);
4092
4093 rt2800_bbp_write(rt2x00dev, 86, 0x00);
4094
4095 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4096
4097 rt2800_bbp_write(rt2x00dev, 92, 0x00);
4098
4099 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
4100 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
4101 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E))
4102 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4103 else
4104 rt2800_bbp_write(rt2x00dev, 103, 0x00);
4105
4106 rt2800_bbp_write(rt2x00dev, 105, 0x05);
4107
4108 rt2800_bbp_write(rt2x00dev, 106, 0x35);
4109
4110 if (rt2x00_rt(rt2x00dev, RT3071) ||
4111 rt2x00_rt(rt2x00dev, RT3090))
4112 rt2800_disable_unused_dac_adc(rt2x00dev);
4113}
4114
4115static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev)
4116{
4117 u8 value;
3975 4118
3976 rt2800_bbp4_mac_if_ctrl(rt2x00dev); 4119 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
3977 4120
3978 rt2800_bbp_write(rt2x00dev, 20, 0x06);
3979 rt2800_bbp_write(rt2x00dev, 31, 0x08); 4121 rt2800_bbp_write(rt2x00dev, 31, 0x08);
3980 rt2800_bbp_write(rt2x00dev, 65, 0x2C); 4122
3981 rt2800_bbp_write(rt2x00dev, 68, 0xDD); 4123 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
3982 rt2800_bbp_write(rt2x00dev, 69, 0x1A); 4124 rt2800_bbp_write(rt2x00dev, 66, 0x38);
3983 rt2800_bbp_write(rt2x00dev, 70, 0x05); 4125
4126 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
4127
4128 rt2800_bbp_write(rt2x00dev, 69, 0x12);
3984 rt2800_bbp_write(rt2x00dev, 73, 0x13); 4129 rt2800_bbp_write(rt2x00dev, 73, 0x13);
3985 rt2800_bbp_write(rt2x00dev, 74, 0x0F); 4130 rt2800_bbp_write(rt2x00dev, 75, 0x46);
3986 rt2800_bbp_write(rt2x00dev, 75, 0x4F);
3987 rt2800_bbp_write(rt2x00dev, 76, 0x28); 4131 rt2800_bbp_write(rt2x00dev, 76, 0x28);
4132
4133 rt2800_bbp_write(rt2x00dev, 77, 0x58);
4134
4135 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
4136
4137 rt2800_bbp_write(rt2x00dev, 74, 0x0b);
4138 rt2800_bbp_write(rt2x00dev, 79, 0x18);
4139 rt2800_bbp_write(rt2x00dev, 80, 0x09);
4140 rt2800_bbp_write(rt2x00dev, 81, 0x33);
4141
4142 rt2800_bbp_write(rt2x00dev, 82, 0x62);
4143
4144 rt2800_bbp_write(rt2x00dev, 83, 0x7a);
4145
4146 rt2800_bbp_write(rt2x00dev, 84, 0x9a);
4147
4148 rt2800_bbp_write(rt2x00dev, 86, 0x38);
4149
4150 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4151
4152 rt2800_bbp_write(rt2x00dev, 92, 0x02);
4153
4154 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4155
4156 rt2800_bbp_write(rt2x00dev, 104, 0x92);
4157
4158 rt2800_bbp_write(rt2x00dev, 105, 0x1c);
4159
4160 rt2800_bbp_write(rt2x00dev, 106, 0x03);
4161
4162 rt2800_bbp_write(rt2x00dev, 128, 0x12);
4163
4164 rt2800_bbp_write(rt2x00dev, 67, 0x24);
4165 rt2800_bbp_write(rt2x00dev, 143, 0x04);
4166 rt2800_bbp_write(rt2x00dev, 142, 0x99);
4167 rt2800_bbp_write(rt2x00dev, 150, 0x30);
4168 rt2800_bbp_write(rt2x00dev, 151, 0x2e);
4169 rt2800_bbp_write(rt2x00dev, 152, 0x20);
4170 rt2800_bbp_write(rt2x00dev, 153, 0x34);
4171 rt2800_bbp_write(rt2x00dev, 154, 0x40);
4172 rt2800_bbp_write(rt2x00dev, 155, 0x3b);
4173 rt2800_bbp_write(rt2x00dev, 253, 0x04);
4174
4175 rt2800_bbp_read(rt2x00dev, 47, &value);
4176 rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
4177 rt2800_bbp_write(rt2x00dev, 47, value);
4178
4179 /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
4180 rt2800_bbp_read(rt2x00dev, 3, &value);
4181 rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
4182 rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
4183 rt2800_bbp_write(rt2x00dev, 3, value);
4184}
4185
4186static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
4187{
4188 rt2800_bbp_write(rt2x00dev, 3, 0x00);
4189 rt2800_bbp_write(rt2x00dev, 4, 0x50);
4190
4191 rt2800_bbp_write(rt2x00dev, 31, 0x08);
4192
4193 rt2800_bbp_write(rt2x00dev, 47, 0x48);
4194
4195 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
4196 rt2800_bbp_write(rt2x00dev, 66, 0x38);
4197
4198 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
4199
4200 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4201 rt2800_bbp_write(rt2x00dev, 73, 0x13);
4202 rt2800_bbp_write(rt2x00dev, 75, 0x46);
4203 rt2800_bbp_write(rt2x00dev, 76, 0x28);
4204
3988 rt2800_bbp_write(rt2x00dev, 77, 0x59); 4205 rt2800_bbp_write(rt2x00dev, 77, 0x59);
3989 rt2800_bbp_write(rt2x00dev, 84, 0x9A); 4206
4207 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
4208
4209 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
4210 rt2800_bbp_write(rt2x00dev, 80, 0x08);
4211 rt2800_bbp_write(rt2x00dev, 81, 0x37);
4212
4213 rt2800_bbp_write(rt2x00dev, 82, 0x62);
4214
4215 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
4216
4217 rt2800_bbp_write(rt2x00dev, 84, 0x99);
4218
3990 rt2800_bbp_write(rt2x00dev, 86, 0x38); 4219 rt2800_bbp_write(rt2x00dev, 86, 0x38);
4220
3991 rt2800_bbp_write(rt2x00dev, 88, 0x90); 4221 rt2800_bbp_write(rt2x00dev, 88, 0x90);
4222
3992 rt2800_bbp_write(rt2x00dev, 91, 0x04); 4223 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4224
3993 rt2800_bbp_write(rt2x00dev, 92, 0x02); 4225 rt2800_bbp_write(rt2x00dev, 92, 0x02);
3994 rt2800_bbp_write(rt2x00dev, 95, 0x9a); 4226
3995 rt2800_bbp_write(rt2x00dev, 98, 0x12); 4227 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
3996 rt2800_bbp_write(rt2x00dev, 103, 0xC0); 4228
3997 rt2800_bbp_write(rt2x00dev, 104, 0x92); 4229 rt2800_bbp_write(rt2x00dev, 104, 0x92);
3998 /* FIXME BBP105 owerwrite */
3999 rt2800_bbp_write(rt2x00dev, 105, 0x3C);
4000 rt2800_bbp_write(rt2x00dev, 106, 0x35);
4001 rt2800_bbp_write(rt2x00dev, 128, 0x12);
4002 rt2800_bbp_write(rt2x00dev, 134, 0xD0);
4003 rt2800_bbp_write(rt2x00dev, 135, 0xF6);
4004 rt2800_bbp_write(rt2x00dev, 137, 0x0F);
4005 4230
4006 /* Initialize GLRT (Generalized Likehood Radio Test) */ 4231 rt2800_bbp_write(rt2x00dev, 105, 0x34);
4007 rt2800_init_bbp_5592_glrt(rt2x00dev); 4232
4233 rt2800_bbp_write(rt2x00dev, 106, 0x05);
4234
4235 rt2800_bbp_write(rt2x00dev, 120, 0x50);
4236
4237 rt2800_bbp_write(rt2x00dev, 137, 0x0f);
4238
4239 rt2800_bbp_write(rt2x00dev, 163, 0xbd);
4240 /* Set ITxBF timeout to 0x9c40=1000msec */
4241 rt2800_bbp_write(rt2x00dev, 179, 0x02);
4242 rt2800_bbp_write(rt2x00dev, 180, 0x00);
4243 rt2800_bbp_write(rt2x00dev, 182, 0x40);
4244 rt2800_bbp_write(rt2x00dev, 180, 0x01);
4245 rt2800_bbp_write(rt2x00dev, 182, 0x9c);
4246 rt2800_bbp_write(rt2x00dev, 179, 0x00);
4247 /* Reprogram the inband interface to put right values in RXWI */
4248 rt2800_bbp_write(rt2x00dev, 142, 0x04);
4249 rt2800_bbp_write(rt2x00dev, 143, 0x3b);
4250 rt2800_bbp_write(rt2x00dev, 142, 0x06);
4251 rt2800_bbp_write(rt2x00dev, 143, 0xa0);
4252 rt2800_bbp_write(rt2x00dev, 142, 0x07);
4253 rt2800_bbp_write(rt2x00dev, 143, 0xa1);
4254 rt2800_bbp_write(rt2x00dev, 142, 0x08);
4255 rt2800_bbp_write(rt2x00dev, 143, 0xa2);
4256
4257 rt2800_bbp_write(rt2x00dev, 148, 0xc8);
4258}
4008 4259
4009 rt2800_bbp4_mac_if_ctrl(rt2x00dev); 4260static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev)
4261{
4262 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
4263 rt2800_bbp_write(rt2x00dev, 66, 0x38);
4010 4264
4011 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 4265 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4012 div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); 4266 rt2800_bbp_write(rt2x00dev, 73, 0x10);
4013 ant = (div_mode == 3) ? 1 : 0;
4014 rt2800_bbp_read(rt2x00dev, 152, &value);
4015 if (ant == 0) {
4016 /* Main antenna */
4017 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
4018 } else {
4019 /* Auxiliary antenna */
4020 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
4021 }
4022 rt2800_bbp_write(rt2x00dev, 152, value);
4023 4267
4024 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { 4268 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
4025 rt2800_bbp_read(rt2x00dev, 254, &value);
4026 rt2x00_set_field8(&value, BBP254_BIT7, 1);
4027 rt2800_bbp_write(rt2x00dev, 254, value);
4028 }
4029 4269
4030 rt2800_init_freq_calibration(rt2x00dev); 4270 rt2800_bbp_write(rt2x00dev, 79, 0x13);
4271 rt2800_bbp_write(rt2x00dev, 80, 0x05);
4272 rt2800_bbp_write(rt2x00dev, 81, 0x33);
4031 4273
4032 rt2800_bbp_write(rt2x00dev, 84, 0x19); 4274 rt2800_bbp_write(rt2x00dev, 82, 0x62);
4033 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) 4275
4276 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
4277
4278 rt2800_bbp_write(rt2x00dev, 84, 0x99);
4279
4280 rt2800_bbp_write(rt2x00dev, 86, 0x00);
4281
4282 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4283
4284 rt2800_bbp_write(rt2x00dev, 92, 0x00);
4285
4286 if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E))
4034 rt2800_bbp_write(rt2x00dev, 103, 0xc0); 4287 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4288 else
4289 rt2800_bbp_write(rt2x00dev, 103, 0x00);
4290
4291 rt2800_bbp_write(rt2x00dev, 105, 0x05);
4292
4293 rt2800_bbp_write(rt2x00dev, 106, 0x35);
4294
4295 rt2800_disable_unused_dac_adc(rt2x00dev);
4035} 4296}
4036 4297
4037static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) 4298static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev)
4038{ 4299{
4039 unsigned int i; 4300 rt2800_bbp_write(rt2x00dev, 31, 0x08);
4040 u16 eeprom;
4041 u8 reg_id;
4042 u8 value;
4043 4301
4044 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || 4302 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
4045 rt2800_wait_bbp_ready(rt2x00dev))) 4303 rt2800_bbp_write(rt2x00dev, 66, 0x38);
4046 return -EACCES;
4047 4304
4048 if (rt2x00_rt(rt2x00dev, RT5592)) { 4305 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4049 rt2800_init_bbp_5592(rt2x00dev); 4306 rt2800_bbp_write(rt2x00dev, 73, 0x10);
4050 return 0;
4051 }
4052 4307
4053 if (rt2x00_rt(rt2x00dev, RT3352)) { 4308 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
4054 rt2800_bbp_write(rt2x00dev, 3, 0x00);
4055 rt2800_bbp_write(rt2x00dev, 4, 0x50);
4056 }
4057 4309
4058 if (rt2x00_rt(rt2x00dev, RT3290) || 4310 rt2800_bbp_write(rt2x00dev, 79, 0x13);
4059 rt2x00_rt(rt2x00dev, RT5390) || 4311 rt2800_bbp_write(rt2x00dev, 80, 0x05);
4060 rt2x00_rt(rt2x00dev, RT5392)) 4312 rt2800_bbp_write(rt2x00dev, 81, 0x33);
4061 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
4062 4313
4063 if (rt2800_is_305x_soc(rt2x00dev) || 4314 rt2800_bbp_write(rt2x00dev, 82, 0x62);
4064 rt2x00_rt(rt2x00dev, RT3290) ||
4065 rt2x00_rt(rt2x00dev, RT3352) ||
4066 rt2x00_rt(rt2x00dev, RT3572) ||
4067 rt2x00_rt(rt2x00dev, RT5390) ||
4068 rt2x00_rt(rt2x00dev, RT5392))
4069 rt2800_bbp_write(rt2x00dev, 31, 0x08);
4070 4315
4071 if (rt2x00_rt(rt2x00dev, RT3352)) 4316 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
4072 rt2800_bbp_write(rt2x00dev, 47, 0x48); 4317
4318 rt2800_bbp_write(rt2x00dev, 84, 0x99);
4319
4320 rt2800_bbp_write(rt2x00dev, 86, 0x00);
4321
4322 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4323
4324 rt2800_bbp_write(rt2x00dev, 92, 0x00);
4325
4326 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4327
4328 rt2800_bbp_write(rt2x00dev, 105, 0x05);
4329
4330 rt2800_bbp_write(rt2x00dev, 106, 0x35);
4331
4332 rt2800_disable_unused_dac_adc(rt2x00dev);
4333}
4334
4335static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
4336{
4337 int ant, div_mode;
4338 u16 eeprom;
4339 u8 value;
4340
4341 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
4342
4343 rt2800_bbp_write(rt2x00dev, 31, 0x08);
4073 4344
4074 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 4345 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
4075 rt2800_bbp_write(rt2x00dev, 66, 0x38); 4346 rt2800_bbp_write(rt2x00dev, 66, 0x38);
4076 4347
4077 if (rt2x00_rt(rt2x00dev, RT3290) || 4348 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
4078 rt2x00_rt(rt2x00dev, RT3352) ||
4079 rt2x00_rt(rt2x00dev, RT5390) ||
4080 rt2x00_rt(rt2x00dev, RT5392))
4081 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
4082 4349
4083 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { 4350 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4084 rt2800_bbp_write(rt2x00dev, 69, 0x16); 4351 rt2800_bbp_write(rt2x00dev, 73, 0x13);
4085 rt2800_bbp_write(rt2x00dev, 73, 0x12); 4352 rt2800_bbp_write(rt2x00dev, 75, 0x46);
4086 } else if (rt2x00_rt(rt2x00dev, RT3290) || 4353 rt2800_bbp_write(rt2x00dev, 76, 0x28);
4087 rt2x00_rt(rt2x00dev, RT3352) ||
4088 rt2x00_rt(rt2x00dev, RT5390) ||
4089 rt2x00_rt(rt2x00dev, RT5392)) {
4090 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4091 rt2800_bbp_write(rt2x00dev, 73, 0x13);
4092 rt2800_bbp_write(rt2x00dev, 75, 0x46);
4093 rt2800_bbp_write(rt2x00dev, 76, 0x28);
4094 4354
4095 if (rt2x00_rt(rt2x00dev, RT3290)) 4355 rt2800_bbp_write(rt2x00dev, 77, 0x59);
4096 rt2800_bbp_write(rt2x00dev, 77, 0x58);
4097 else
4098 rt2800_bbp_write(rt2x00dev, 77, 0x59);
4099 } else {
4100 rt2800_bbp_write(rt2x00dev, 69, 0x12);
4101 rt2800_bbp_write(rt2x00dev, 73, 0x10);
4102 }
4103 4356
4104 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 4357 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
4105 4358
4106 if (rt2x00_rt(rt2x00dev, RT3070) || 4359 rt2800_bbp_write(rt2x00dev, 79, 0x13);
4107 rt2x00_rt(rt2x00dev, RT3071) || 4360 rt2800_bbp_write(rt2x00dev, 80, 0x05);
4108 rt2x00_rt(rt2x00dev, RT3090) || 4361 rt2800_bbp_write(rt2x00dev, 81, 0x33);
4109 rt2x00_rt(rt2x00dev, RT3390) ||
4110 rt2x00_rt(rt2x00dev, RT3572) ||
4111 rt2x00_rt(rt2x00dev, RT5390) ||
4112 rt2x00_rt(rt2x00dev, RT5392)) {
4113 rt2800_bbp_write(rt2x00dev, 79, 0x13);
4114 rt2800_bbp_write(rt2x00dev, 80, 0x05);
4115 rt2800_bbp_write(rt2x00dev, 81, 0x33);
4116 } else if (rt2800_is_305x_soc(rt2x00dev)) {
4117 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
4118 rt2800_bbp_write(rt2x00dev, 80, 0x08);
4119 } else if (rt2x00_rt(rt2x00dev, RT3290)) {
4120 rt2800_bbp_write(rt2x00dev, 74, 0x0b);
4121 rt2800_bbp_write(rt2x00dev, 79, 0x18);
4122 rt2800_bbp_write(rt2x00dev, 80, 0x09);
4123 rt2800_bbp_write(rt2x00dev, 81, 0x33);
4124 } else if (rt2x00_rt(rt2x00dev, RT3352)) {
4125 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
4126 rt2800_bbp_write(rt2x00dev, 80, 0x08);
4127 rt2800_bbp_write(rt2x00dev, 81, 0x37);
4128 } else {
4129 rt2800_bbp_write(rt2x00dev, 81, 0x37);
4130 }
4131 4362
4132 rt2800_bbp_write(rt2x00dev, 82, 0x62); 4363 rt2800_bbp_write(rt2x00dev, 82, 0x62);
4133 if (rt2x00_rt(rt2x00dev, RT3290) ||
4134 rt2x00_rt(rt2x00dev, RT5390) ||
4135 rt2x00_rt(rt2x00dev, RT5392))
4136 rt2800_bbp_write(rt2x00dev, 83, 0x7a);
4137 else
4138 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
4139 4364
4140 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) 4365 rt2800_bbp_write(rt2x00dev, 83, 0x7a);
4141 rt2800_bbp_write(rt2x00dev, 84, 0x19);
4142 else if (rt2x00_rt(rt2x00dev, RT3290) ||
4143 rt2x00_rt(rt2x00dev, RT5390) ||
4144 rt2x00_rt(rt2x00dev, RT5392))
4145 rt2800_bbp_write(rt2x00dev, 84, 0x9a);
4146 else
4147 rt2800_bbp_write(rt2x00dev, 84, 0x99);
4148 4366
4149 if (rt2x00_rt(rt2x00dev, RT3290) || 4367 rt2800_bbp_write(rt2x00dev, 84, 0x9a);
4150 rt2x00_rt(rt2x00dev, RT3352) ||
4151 rt2x00_rt(rt2x00dev, RT5390) ||
4152 rt2x00_rt(rt2x00dev, RT5392))
4153 rt2800_bbp_write(rt2x00dev, 86, 0x38);
4154 else
4155 rt2800_bbp_write(rt2x00dev, 86, 0x00);
4156 4368
4157 if (rt2x00_rt(rt2x00dev, RT3352) || 4369 rt2800_bbp_write(rt2x00dev, 86, 0x38);
4158 rt2x00_rt(rt2x00dev, RT5392)) 4370
4371 if (rt2x00_rt(rt2x00dev, RT5392))
4159 rt2800_bbp_write(rt2x00dev, 88, 0x90); 4372 rt2800_bbp_write(rt2x00dev, 88, 0x90);
4160 4373
4161 rt2800_bbp_write(rt2x00dev, 91, 0x04); 4374 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4162 4375
4163 if (rt2x00_rt(rt2x00dev, RT3290) || 4376 rt2800_bbp_write(rt2x00dev, 92, 0x02);
4164 rt2x00_rt(rt2x00dev, RT3352) ||
4165 rt2x00_rt(rt2x00dev, RT5390) ||
4166 rt2x00_rt(rt2x00dev, RT5392))
4167 rt2800_bbp_write(rt2x00dev, 92, 0x02);
4168 else
4169 rt2800_bbp_write(rt2x00dev, 92, 0x00);
4170 4377
4171 if (rt2x00_rt(rt2x00dev, RT5392)) { 4378 if (rt2x00_rt(rt2x00dev, RT5392)) {
4172 rt2800_bbp_write(rt2x00dev, 95, 0x9a); 4379 rt2800_bbp_write(rt2x00dev, 95, 0x9a);
4173 rt2800_bbp_write(rt2x00dev, 98, 0x12); 4380 rt2800_bbp_write(rt2x00dev, 98, 0x12);
4174 } 4381 }
4175 4382
4176 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || 4383 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4177 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
4178 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
4179 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
4180 rt2x00_rt(rt2x00dev, RT3290) ||
4181 rt2x00_rt(rt2x00dev, RT3352) ||
4182 rt2x00_rt(rt2x00dev, RT3572) ||
4183 rt2x00_rt(rt2x00dev, RT5390) ||
4184 rt2x00_rt(rt2x00dev, RT5392) ||
4185 rt2800_is_305x_soc(rt2x00dev))
4186 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4187 else
4188 rt2800_bbp_write(rt2x00dev, 103, 0x00);
4189 4384
4190 if (rt2x00_rt(rt2x00dev, RT3290) || 4385 rt2800_bbp_write(rt2x00dev, 104, 0x92);
4191 rt2x00_rt(rt2x00dev, RT3352) ||
4192 rt2x00_rt(rt2x00dev, RT5390) ||
4193 rt2x00_rt(rt2x00dev, RT5392))
4194 rt2800_bbp_write(rt2x00dev, 104, 0x92);
4195 4386
4196 if (rt2800_is_305x_soc(rt2x00dev)) 4387 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
4197 rt2800_bbp_write(rt2x00dev, 105, 0x01);
4198 else if (rt2x00_rt(rt2x00dev, RT3290))
4199 rt2800_bbp_write(rt2x00dev, 105, 0x1c);
4200 else if (rt2x00_rt(rt2x00dev, RT3352))
4201 rt2800_bbp_write(rt2x00dev, 105, 0x34);
4202 else if (rt2x00_rt(rt2x00dev, RT5390) ||
4203 rt2x00_rt(rt2x00dev, RT5392))
4204 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
4205 else
4206 rt2800_bbp_write(rt2x00dev, 105, 0x05);
4207 4388
4208 if (rt2x00_rt(rt2x00dev, RT3290) || 4389 if (rt2x00_rt(rt2x00dev, RT5390))
4209 rt2x00_rt(rt2x00dev, RT5390))
4210 rt2800_bbp_write(rt2x00dev, 106, 0x03); 4390 rt2800_bbp_write(rt2x00dev, 106, 0x03);
4211 else if (rt2x00_rt(rt2x00dev, RT3352))
4212 rt2800_bbp_write(rt2x00dev, 106, 0x05);
4213 else if (rt2x00_rt(rt2x00dev, RT5392)) 4391 else if (rt2x00_rt(rt2x00dev, RT5392))
4214 rt2800_bbp_write(rt2x00dev, 106, 0x12); 4392 rt2800_bbp_write(rt2x00dev, 106, 0x12);
4215 else 4393 else
4216 rt2800_bbp_write(rt2x00dev, 106, 0x35); 4394 WARN_ON(1);
4217
4218 if (rt2x00_rt(rt2x00dev, RT3352))
4219 rt2800_bbp_write(rt2x00dev, 120, 0x50);
4220 4395
4221 if (rt2x00_rt(rt2x00dev, RT3290) || 4396 rt2800_bbp_write(rt2x00dev, 128, 0x12);
4222 rt2x00_rt(rt2x00dev, RT5390) ||
4223 rt2x00_rt(rt2x00dev, RT5392))
4224 rt2800_bbp_write(rt2x00dev, 128, 0x12);
4225 4397
4226 if (rt2x00_rt(rt2x00dev, RT5392)) { 4398 if (rt2x00_rt(rt2x00dev, RT5392)) {
4227 rt2800_bbp_write(rt2x00dev, 134, 0xd0); 4399 rt2800_bbp_write(rt2x00dev, 134, 0xd0);
4228 rt2800_bbp_write(rt2x00dev, 135, 0xf6); 4400 rt2800_bbp_write(rt2x00dev, 135, 0xf6);
4229 } 4401 }
4230 4402
4231 if (rt2x00_rt(rt2x00dev, RT3352)) 4403 rt2800_disable_unused_dac_adc(rt2x00dev);
4232 rt2800_bbp_write(rt2x00dev, 137, 0x0f);
4233 4404
4234 if (rt2x00_rt(rt2x00dev, RT3071) || 4405 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
4235 rt2x00_rt(rt2x00dev, RT3090) || 4406 div_mode = rt2x00_get_field16(eeprom,
4236 rt2x00_rt(rt2x00dev, RT3390) || 4407 EEPROM_NIC_CONF1_ANT_DIVERSITY);
4237 rt2x00_rt(rt2x00dev, RT3572) || 4408 ant = (div_mode == 3) ? 1 : 0;
4238 rt2x00_rt(rt2x00dev, RT5390) ||
4239 rt2x00_rt(rt2x00dev, RT5392)) {
4240 rt2800_bbp_read(rt2x00dev, 138, &value);
4241 4409
4242 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 4410 /* check if this is a Bluetooth combo card */
4243 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) 4411 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
4244 value |= 0x20; 4412 u32 reg;
4245 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
4246 value &= ~0x02;
4247 4413
4248 rt2800_bbp_write(rt2x00dev, 138, value); 4414 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
4415 rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
4416 rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
4417 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
4418 rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
4419 if (ant == 0)
4420 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
4421 else if (ant == 1)
4422 rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
4423 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
4249 } 4424 }
4250 4425
4251 if (rt2x00_rt(rt2x00dev, RT3290)) { 4426 /* This chip has hardware antenna diversity*/
4252 rt2800_bbp_write(rt2x00dev, 67, 0x24); 4427 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
4253 rt2800_bbp_write(rt2x00dev, 143, 0x04); 4428 rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
4254 rt2800_bbp_write(rt2x00dev, 142, 0x99); 4429 rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
4255 rt2800_bbp_write(rt2x00dev, 150, 0x30); 4430 rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
4256 rt2800_bbp_write(rt2x00dev, 151, 0x2e);
4257 rt2800_bbp_write(rt2x00dev, 152, 0x20);
4258 rt2800_bbp_write(rt2x00dev, 153, 0x34);
4259 rt2800_bbp_write(rt2x00dev, 154, 0x40);
4260 rt2800_bbp_write(rt2x00dev, 155, 0x3b);
4261 rt2800_bbp_write(rt2x00dev, 253, 0x04);
4262
4263 rt2800_bbp_read(rt2x00dev, 47, &value);
4264 rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
4265 rt2800_bbp_write(rt2x00dev, 47, value);
4266
4267 /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
4268 rt2800_bbp_read(rt2x00dev, 3, &value);
4269 rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
4270 rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
4271 rt2800_bbp_write(rt2x00dev, 3, value);
4272 } 4431 }
4273 4432
4274 if (rt2x00_rt(rt2x00dev, RT3352)) { 4433 rt2800_bbp_read(rt2x00dev, 152, &value);
4275 rt2800_bbp_write(rt2x00dev, 163, 0xbd); 4434 if (ant == 0)
4276 /* Set ITxBF timeout to 0x9c40=1000msec */ 4435 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
4277 rt2800_bbp_write(rt2x00dev, 179, 0x02); 4436 else
4278 rt2800_bbp_write(rt2x00dev, 180, 0x00); 4437 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
4279 rt2800_bbp_write(rt2x00dev, 182, 0x40); 4438 rt2800_bbp_write(rt2x00dev, 152, value);
4280 rt2800_bbp_write(rt2x00dev, 180, 0x01); 4439
4281 rt2800_bbp_write(rt2x00dev, 182, 0x9c); 4440 rt2800_init_freq_calibration(rt2x00dev);
4282 rt2800_bbp_write(rt2x00dev, 179, 0x00); 4441}
4283 /* Reprogram the inband interface to put right values in RXWI */ 4442
4284 rt2800_bbp_write(rt2x00dev, 142, 0x04); 4443static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
4285 rt2800_bbp_write(rt2x00dev, 143, 0x3b); 4444{
4286 rt2800_bbp_write(rt2x00dev, 142, 0x06); 4445 int ant, div_mode;
4287 rt2800_bbp_write(rt2x00dev, 143, 0xa0); 4446 u16 eeprom;
4288 rt2800_bbp_write(rt2x00dev, 142, 0x07); 4447 u8 value;
4289 rt2800_bbp_write(rt2x00dev, 143, 0xa1); 4448
4290 rt2800_bbp_write(rt2x00dev, 142, 0x08); 4449 rt2800_init_bbp_early(rt2x00dev);
4291 rt2800_bbp_write(rt2x00dev, 143, 0xa2); 4450
4292 4451 rt2800_bbp_read(rt2x00dev, 105, &value);
4293 rt2800_bbp_write(rt2x00dev, 148, 0xc8); 4452 rt2x00_set_field8(&value, BBP105_MLD,
4453 rt2x00dev->default_ant.rx_chain_num == 2);
4454 rt2800_bbp_write(rt2x00dev, 105, value);
4455
4456 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
4457
4458 rt2800_bbp_write(rt2x00dev, 20, 0x06);
4459 rt2800_bbp_write(rt2x00dev, 31, 0x08);
4460 rt2800_bbp_write(rt2x00dev, 65, 0x2C);
4461 rt2800_bbp_write(rt2x00dev, 68, 0xDD);
4462 rt2800_bbp_write(rt2x00dev, 69, 0x1A);
4463 rt2800_bbp_write(rt2x00dev, 70, 0x05);
4464 rt2800_bbp_write(rt2x00dev, 73, 0x13);
4465 rt2800_bbp_write(rt2x00dev, 74, 0x0F);
4466 rt2800_bbp_write(rt2x00dev, 75, 0x4F);
4467 rt2800_bbp_write(rt2x00dev, 76, 0x28);
4468 rt2800_bbp_write(rt2x00dev, 77, 0x59);
4469 rt2800_bbp_write(rt2x00dev, 84, 0x9A);
4470 rt2800_bbp_write(rt2x00dev, 86, 0x38);
4471 rt2800_bbp_write(rt2x00dev, 88, 0x90);
4472 rt2800_bbp_write(rt2x00dev, 91, 0x04);
4473 rt2800_bbp_write(rt2x00dev, 92, 0x02);
4474 rt2800_bbp_write(rt2x00dev, 95, 0x9a);
4475 rt2800_bbp_write(rt2x00dev, 98, 0x12);
4476 rt2800_bbp_write(rt2x00dev, 103, 0xC0);
4477 rt2800_bbp_write(rt2x00dev, 104, 0x92);
4478 /* FIXME BBP105 owerwrite */
4479 rt2800_bbp_write(rt2x00dev, 105, 0x3C);
4480 rt2800_bbp_write(rt2x00dev, 106, 0x35);
4481 rt2800_bbp_write(rt2x00dev, 128, 0x12);
4482 rt2800_bbp_write(rt2x00dev, 134, 0xD0);
4483 rt2800_bbp_write(rt2x00dev, 135, 0xF6);
4484 rt2800_bbp_write(rt2x00dev, 137, 0x0F);
4485
4486 /* Initialize GLRT (Generalized Likehood Radio Test) */
4487 rt2800_init_bbp_5592_glrt(rt2x00dev);
4488
4489 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
4490
4491 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
4492 div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
4493 ant = (div_mode == 3) ? 1 : 0;
4494 rt2800_bbp_read(rt2x00dev, 152, &value);
4495 if (ant == 0) {
4496 /* Main antenna */
4497 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
4498 } else {
4499 /* Auxiliary antenna */
4500 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
4294 } 4501 }
4502 rt2800_bbp_write(rt2x00dev, 152, value);
4295 4503
4296 if (rt2x00_rt(rt2x00dev, RT5390) || 4504 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
4297 rt2x00_rt(rt2x00dev, RT5392)) { 4505 rt2800_bbp_read(rt2x00dev, 254, &value);
4298 int ant, div_mode; 4506 rt2x00_set_field8(&value, BBP254_BIT7, 1);
4507 rt2800_bbp_write(rt2x00dev, 254, value);
4508 }
4299 4509
4300 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 4510 rt2800_init_freq_calibration(rt2x00dev);
4301 div_mode = rt2x00_get_field16(eeprom,
4302 EEPROM_NIC_CONF1_ANT_DIVERSITY);
4303 ant = (div_mode == 3) ? 1 : 0;
4304 4511
4305 /* check if this is a Bluetooth combo card */ 4512 rt2800_bbp_write(rt2x00dev, 84, 0x19);
4306 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { 4513 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
4307 u32 reg; 4514 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4308 4515}
4309 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
4310 rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
4311 rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
4312 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
4313 rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
4314 if (ant == 0)
4315 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
4316 else if (ant == 1)
4317 rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
4318 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
4319 }
4320 4516
4321 /* This chip has hardware antenna diversity*/ 4517static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
4322 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { 4518{
4323 rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ 4519 unsigned int i;
4324 rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ 4520 u16 eeprom;
4325 rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ 4521 u8 reg_id;
4326 } 4522 u8 value;
4327 4523
4328 rt2800_bbp_read(rt2x00dev, 152, &value); 4524 if (rt2800_is_305x_soc(rt2x00dev))
4329 if (ant == 0) 4525 rt2800_init_bbp_305x_soc(rt2x00dev);
4330 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
4331 else
4332 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
4333 rt2800_bbp_write(rt2x00dev, 152, value);
4334 4526
4335 rt2800_init_freq_calibration(rt2x00dev); 4527 switch (rt2x00dev->chip.rt) {
4528 case RT2860:
4529 case RT2872:
4530 case RT2883:
4531 rt2800_init_bbp_28xx(rt2x00dev);
4532 break;
4533 case RT3070:
4534 case RT3071:
4535 case RT3090:
4536 rt2800_init_bbp_30xx(rt2x00dev);
4537 break;
4538 case RT3290:
4539 rt2800_init_bbp_3290(rt2x00dev);
4540 break;
4541 case RT3352:
4542 rt2800_init_bbp_3352(rt2x00dev);
4543 break;
4544 case RT3390:
4545 rt2800_init_bbp_3390(rt2x00dev);
4546 break;
4547 case RT3572:
4548 rt2800_init_bbp_3572(rt2x00dev);
4549 break;
4550 case RT5390:
4551 case RT5392:
4552 rt2800_init_bbp_53xx(rt2x00dev);
4553 break;
4554 case RT5592:
4555 rt2800_init_bbp_5592(rt2x00dev);
4556 return;
4336 } 4557 }
4337 4558
4338 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 4559 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
@@ -4344,8 +4565,6 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
4344 rt2800_bbp_write(rt2x00dev, reg_id, value); 4565 rt2800_bbp_write(rt2x00dev, reg_id, value);
4345 } 4566 }
4346 } 4567 }
4347
4348 return 0;
4349} 4568}
4350 4569
4351static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev) 4570static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev)
@@ -5196,9 +5415,11 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
5196 } 5415 }
5197 msleep(1); 5416 msleep(1);
5198 5417
5199 if (unlikely(rt2800_init_bbp(rt2x00dev))) 5418 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
5419 rt2800_wait_bbp_ready(rt2x00dev)))
5200 return -EIO; 5420 return -EIO;
5201 5421
5422 rt2800_init_bbp(rt2x00dev);
5202 rt2800_init_rfcsr(rt2x00dev); 5423 rt2800_init_rfcsr(rt2x00dev);
5203 5424
5204 if (rt2x00_is_usb(rt2x00dev) && 5425 if (rt2x00_is_usb(rt2x00dev) &&
@@ -6056,8 +6277,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
6056 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); 6277 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
6057 6278
6058 for (i = 14; i < spec->num_channels; i++) { 6279 for (i = 14; i < spec->num_channels; i++) {
6059 info[i].default_power1 = default_power1[i]; 6280 info[i].default_power1 = default_power1[i - 14];
6060 info[i].default_power2 = default_power2[i]; 6281 info[i].default_power2 = default_power2[i - 14];
6061 } 6282 }
6062 } 6283 }
6063 6284
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 6f4a861af336..00055627eb8d 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -637,6 +637,7 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
637 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 637 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
638 __le32 *txd = entry_priv->desc; 638 __le32 *txd = entry_priv->desc;
639 u32 word; 639 u32 word;
640 const unsigned int txwi_size = entry->queue->winfo_size;
640 641
641 /* 642 /*
642 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 643 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
@@ -659,14 +660,14 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
659 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 660 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
660 rt2x00_set_field32(&word, TXD_W1_BURST, 661 rt2x00_set_field32(&word, TXD_W1_BURST,
661 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 662 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
662 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, TXWI_DESC_SIZE); 663 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
663 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0); 664 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
664 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0); 665 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
665 rt2x00_desc_write(txd, 1, word); 666 rt2x00_desc_write(txd, 1, word);
666 667
667 word = 0; 668 word = 0;
668 rt2x00_set_field32(&word, TXD_W2_SD_PTR1, 669 rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
669 skbdesc->skb_dma + TXWI_DESC_SIZE); 670 skbdesc->skb_dma + txwi_size);
670 rt2x00_desc_write(txd, 2, word); 671 rt2x00_desc_write(txd, 2, word);
671 672
672 word = 0; 673 word = 0;
@@ -1014,7 +1015,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
1014 * Since we have only one producer and one consumer we don't 1015 * Since we have only one producer and one consumer we don't
1015 * need to lock the kfifo. 1016 * need to lock the kfifo.
1016 */ 1017 */
1017 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { 1018 for (i = 0; i < rt2x00dev->tx->limit; i++) {
1018 rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status); 1019 rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
1019 1020
1020 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 1021 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
@@ -1186,29 +1187,43 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1186 .sta_remove = rt2800_sta_remove, 1187 .sta_remove = rt2800_sta_remove,
1187}; 1188};
1188 1189
1189static const struct data_queue_desc rt2800pci_queue_rx = { 1190static void rt2800pci_queue_init(struct data_queue *queue)
1190 .entry_num = 128, 1191{
1191 .data_size = AGGREGATION_SIZE, 1192 switch (queue->qid) {
1192 .desc_size = RXD_DESC_SIZE, 1193 case QID_RX:
1193 .winfo_size = RXWI_DESC_SIZE, 1194 queue->limit = 128;
1194 .priv_size = sizeof(struct queue_entry_priv_mmio), 1195 queue->data_size = AGGREGATION_SIZE;
1195}; 1196 queue->desc_size = RXD_DESC_SIZE;
1197 queue->winfo_size = RXWI_DESC_SIZE_4WORDS;
1198 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1199 break;
1196 1200
1197static const struct data_queue_desc rt2800pci_queue_tx = { 1201 case QID_AC_VO:
1198 .entry_num = 64, 1202 case QID_AC_VI:
1199 .data_size = AGGREGATION_SIZE, 1203 case QID_AC_BE:
1200 .desc_size = TXD_DESC_SIZE, 1204 case QID_AC_BK:
1201 .winfo_size = TXWI_DESC_SIZE, 1205 queue->limit = 64;
1202 .priv_size = sizeof(struct queue_entry_priv_mmio), 1206 queue->data_size = AGGREGATION_SIZE;
1203}; 1207 queue->desc_size = TXD_DESC_SIZE;
1208 queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
1209 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1210 break;
1204 1211
1205static const struct data_queue_desc rt2800pci_queue_bcn = { 1212 case QID_BEACON:
1206 .entry_num = 8, 1213 queue->limit = 8;
1207 .data_size = 0, /* No DMA required for beacons */ 1214 queue->data_size = 0; /* No DMA required for beacons */
1208 .desc_size = TXD_DESC_SIZE, 1215 queue->desc_size = TXD_DESC_SIZE;
1209 .winfo_size = TXWI_DESC_SIZE, 1216 queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
1210 .priv_size = sizeof(struct queue_entry_priv_mmio), 1217 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1211}; 1218 break;
1219
1220 case QID_ATIM:
1221 /* fallthrough */
1222 default:
1223 BUG();
1224 break;
1225 }
1226}
1212 1227
1213static const struct rt2x00_ops rt2800pci_ops = { 1228static const struct rt2x00_ops rt2800pci_ops = {
1214 .name = KBUILD_MODNAME, 1229 .name = KBUILD_MODNAME,
@@ -1217,10 +1232,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
1217 .eeprom_size = EEPROM_SIZE, 1232 .eeprom_size = EEPROM_SIZE,
1218 .rf_size = RF_SIZE, 1233 .rf_size = RF_SIZE,
1219 .tx_queues = NUM_TX_QUEUES, 1234 .tx_queues = NUM_TX_QUEUES,
1220 .extra_tx_headroom = TXWI_DESC_SIZE, 1235 .queue_init = rt2800pci_queue_init,
1221 .rx = &rt2800pci_queue_rx,
1222 .tx = &rt2800pci_queue_tx,
1223 .bcn = &rt2800pci_queue_bcn,
1224 .lib = &rt2800pci_rt2x00_ops, 1236 .lib = &rt2800pci_rt2x00_ops,
1225 .drv = &rt2800pci_rt2800_ops, 1237 .drv = &rt2800pci_rt2800_ops,
1226 .hw = &rt2800pci_mac80211_ops, 1238 .hw = &rt2800pci_mac80211_ops,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ac854d75bd6c..840833b26bfa 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -327,7 +327,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
327 * this limit so reduce the number to prevent errors. 327 * this limit so reduce the number to prevent errors.
328 */ 328 */
329 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT, 329 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
330 ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE) 330 ((rt2x00dev->rx->limit * DATA_FRAME_SIZE)
331 / 1024) - 3); 331 / 1024) - 3);
332 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1); 332 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
333 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); 333 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
@@ -849,85 +849,63 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
849 .sta_remove = rt2800_sta_remove, 849 .sta_remove = rt2800_sta_remove,
850}; 850};
851 851
852static const struct data_queue_desc rt2800usb_queue_rx = { 852static void rt2800usb_queue_init(struct data_queue *queue)
853 .entry_num = 128, 853{
854 .data_size = AGGREGATION_SIZE, 854 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
855 .desc_size = RXINFO_DESC_SIZE, 855 unsigned short txwi_size, rxwi_size;
856 .winfo_size = RXWI_DESC_SIZE,
857 .priv_size = sizeof(struct queue_entry_priv_usb),
858};
859
860static const struct data_queue_desc rt2800usb_queue_tx = {
861 .entry_num = 16,
862 .data_size = AGGREGATION_SIZE,
863 .desc_size = TXINFO_DESC_SIZE,
864 .winfo_size = TXWI_DESC_SIZE,
865 .priv_size = sizeof(struct queue_entry_priv_usb),
866};
867
868static const struct data_queue_desc rt2800usb_queue_bcn = {
869 .entry_num = 8,
870 .data_size = MGMT_FRAME_SIZE,
871 .desc_size = TXINFO_DESC_SIZE,
872 .winfo_size = TXWI_DESC_SIZE,
873 .priv_size = sizeof(struct queue_entry_priv_usb),
874};
875 856
876static const struct rt2x00_ops rt2800usb_ops = { 857 if (rt2x00_rt(rt2x00dev, RT5592)) {
877 .name = KBUILD_MODNAME, 858 txwi_size = TXWI_DESC_SIZE_5WORDS;
878 .drv_data_size = sizeof(struct rt2800_drv_data), 859 rxwi_size = RXWI_DESC_SIZE_6WORDS;
879 .max_ap_intf = 8, 860 } else {
880 .eeprom_size = EEPROM_SIZE, 861 txwi_size = TXWI_DESC_SIZE_4WORDS;
881 .rf_size = RF_SIZE, 862 rxwi_size = RXWI_DESC_SIZE_4WORDS;
882 .tx_queues = NUM_TX_QUEUES, 863 }
883 .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
884 .rx = &rt2800usb_queue_rx,
885 .tx = &rt2800usb_queue_tx,
886 .bcn = &rt2800usb_queue_bcn,
887 .lib = &rt2800usb_rt2x00_ops,
888 .drv = &rt2800usb_rt2800_ops,
889 .hw = &rt2800usb_mac80211_ops,
890#ifdef CONFIG_RT2X00_LIB_DEBUGFS
891 .debugfs = &rt2800_rt2x00debug,
892#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
893};
894 864
895static const struct data_queue_desc rt2800usb_queue_rx_5592 = { 865 switch (queue->qid) {
896 .entry_num = 128, 866 case QID_RX:
897 .data_size = AGGREGATION_SIZE, 867 queue->limit = 128;
898 .desc_size = RXINFO_DESC_SIZE, 868 queue->data_size = AGGREGATION_SIZE;
899 .winfo_size = RXWI_DESC_SIZE_5592, 869 queue->desc_size = RXINFO_DESC_SIZE;
900 .priv_size = sizeof(struct queue_entry_priv_usb), 870 queue->winfo_size = rxwi_size;
901}; 871 queue->priv_size = sizeof(struct queue_entry_priv_usb);
872 break;
902 873
903static const struct data_queue_desc rt2800usb_queue_tx_5592 = { 874 case QID_AC_VO:
904 .entry_num = 16, 875 case QID_AC_VI:
905 .data_size = AGGREGATION_SIZE, 876 case QID_AC_BE:
906 .desc_size = TXINFO_DESC_SIZE, 877 case QID_AC_BK:
907 .winfo_size = TXWI_DESC_SIZE_5592, 878 queue->limit = 16;
908 .priv_size = sizeof(struct queue_entry_priv_usb), 879 queue->data_size = AGGREGATION_SIZE;
909}; 880 queue->desc_size = TXINFO_DESC_SIZE;
881 queue->winfo_size = txwi_size;
882 queue->priv_size = sizeof(struct queue_entry_priv_usb);
883 break;
910 884
911static const struct data_queue_desc rt2800usb_queue_bcn_5592 = { 885 case QID_BEACON:
912 .entry_num = 8, 886 queue->limit = 8;
913 .data_size = MGMT_FRAME_SIZE, 887 queue->data_size = MGMT_FRAME_SIZE;
914 .desc_size = TXINFO_DESC_SIZE, 888 queue->desc_size = TXINFO_DESC_SIZE;
915 .winfo_size = TXWI_DESC_SIZE_5592, 889 queue->winfo_size = txwi_size;
916 .priv_size = sizeof(struct queue_entry_priv_usb), 890 queue->priv_size = sizeof(struct queue_entry_priv_usb);
917}; 891 break;
918 892
893 case QID_ATIM:
894 /* fallthrough */
895 default:
896 BUG();
897 break;
898 }
899}
919 900
920static const struct rt2x00_ops rt2800usb_ops_5592 = { 901static const struct rt2x00_ops rt2800usb_ops = {
921 .name = KBUILD_MODNAME, 902 .name = KBUILD_MODNAME,
922 .drv_data_size = sizeof(struct rt2800_drv_data), 903 .drv_data_size = sizeof(struct rt2800_drv_data),
923 .max_ap_intf = 8, 904 .max_ap_intf = 8,
924 .eeprom_size = EEPROM_SIZE, 905 .eeprom_size = EEPROM_SIZE,
925 .rf_size = RF_SIZE, 906 .rf_size = RF_SIZE,
926 .tx_queues = NUM_TX_QUEUES, 907 .tx_queues = NUM_TX_QUEUES,
927 .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592, 908 .queue_init = rt2800usb_queue_init,
928 .rx = &rt2800usb_queue_rx_5592,
929 .tx = &rt2800usb_queue_tx_5592,
930 .bcn = &rt2800usb_queue_bcn_5592,
931 .lib = &rt2800usb_rt2x00_ops, 909 .lib = &rt2800usb_rt2x00_ops,
932 .drv = &rt2800usb_rt2800_ops, 910 .drv = &rt2800usb_rt2800_ops,
933 .hw = &rt2800usb_mac80211_ops, 911 .hw = &rt2800usb_mac80211_ops,
@@ -1248,15 +1226,15 @@ static struct usb_device_id rt2800usb_device_table[] = {
1248#endif 1226#endif
1249#ifdef CONFIG_RT2800USB_RT55XX 1227#ifdef CONFIG_RT2800USB_RT55XX
1250 /* Arcadyan */ 1228 /* Arcadyan */
1251 { USB_DEVICE(0x043e, 0x7a32), .driver_info = 5592 }, 1229 { USB_DEVICE(0x043e, 0x7a32) },
1252 /* AVM GmbH */ 1230 /* AVM GmbH */
1253 { USB_DEVICE(0x057c, 0x8501), .driver_info = 5592 }, 1231 { USB_DEVICE(0x057c, 0x8501) },
1254 /* D-Link DWA-160-B2 */ 1232 /* D-Link DWA-160-B2 */
1255 { USB_DEVICE(0x2001, 0x3c1a), .driver_info = 5592 }, 1233 { USB_DEVICE(0x2001, 0x3c1a) },
1256 /* Proware */ 1234 /* Proware */
1257 { USB_DEVICE(0x043e, 0x7a13), .driver_info = 5592 }, 1235 { USB_DEVICE(0x043e, 0x7a13) },
1258 /* Ralink */ 1236 /* Ralink */
1259 { USB_DEVICE(0x148f, 0x5572), .driver_info = 5592 }, 1237 { USB_DEVICE(0x148f, 0x5572) },
1260#endif 1238#endif
1261#ifdef CONFIG_RT2800USB_UNKNOWN 1239#ifdef CONFIG_RT2800USB_UNKNOWN
1262 /* 1240 /*
@@ -1361,9 +1339,6 @@ MODULE_LICENSE("GPL");
1361static int rt2800usb_probe(struct usb_interface *usb_intf, 1339static int rt2800usb_probe(struct usb_interface *usb_intf,
1362 const struct usb_device_id *id) 1340 const struct usb_device_id *id)
1363{ 1341{
1364 if (id->driver_info == 5592)
1365 return rt2x00usb_probe(usb_intf, &rt2800usb_ops_5592);
1366
1367 return rt2x00usb_probe(usb_intf, &rt2800usb_ops); 1342 return rt2x00usb_probe(usb_intf, &rt2800usb_ops);
1368} 1343}
1369 1344
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 7510723a8c37..ee3fc570b11d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -648,11 +648,7 @@ struct rt2x00_ops {
648 const unsigned int eeprom_size; 648 const unsigned int eeprom_size;
649 const unsigned int rf_size; 649 const unsigned int rf_size;
650 const unsigned int tx_queues; 650 const unsigned int tx_queues;
651 const unsigned int extra_tx_headroom; 651 void (*queue_init)(struct data_queue *queue);
652 const struct data_queue_desc *rx;
653 const struct data_queue_desc *tx;
654 const struct data_queue_desc *bcn;
655 const struct data_queue_desc *atim;
656 const struct rt2x00lib_ops *lib; 652 const struct rt2x00lib_ops *lib;
657 const void *drv; 653 const void *drv;
658 const struct ieee80211_ops *hw; 654 const struct ieee80211_ops *hw;
@@ -1010,6 +1006,9 @@ struct rt2x00_dev {
1010 */ 1006 */
1011 struct list_head bar_list; 1007 struct list_head bar_list;
1012 spinlock_t bar_list_lock; 1008 spinlock_t bar_list_lock;
1009
1010 /* Extra TX headroom required for alignment purposes. */
1011 unsigned int extra_tx_headroom;
1013}; 1012};
1014 1013
1015struct rt2x00_bar_list_entry { 1014struct rt2x00_bar_list_entry {
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 90dc14336980..b16521e6bf4a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -334,7 +334,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
334 /* 334 /*
335 * Remove the extra tx headroom from the skb. 335 * Remove the extra tx headroom from the skb.
336 */ 336 */
337 skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom); 337 skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
338 338
339 /* 339 /*
340 * Signal that the TX descriptor is no longer in the skb. 340 * Signal that the TX descriptor is no longer in the skb.
@@ -1049,7 +1049,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
1049 */ 1049 */
1050 rt2x00dev->hw->extra_tx_headroom = 1050 rt2x00dev->hw->extra_tx_headroom =
1051 max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM, 1051 max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
1052 rt2x00dev->ops->extra_tx_headroom); 1052 rt2x00dev->extra_tx_headroom);
1053 1053
1054 /* 1054 /*
1055 * Take TX headroom required for alignment into account. 1055 * Take TX headroom required for alignment into account.
@@ -1077,7 +1077,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
1077 */ 1077 */
1078 int kfifo_size = 1078 int kfifo_size =
1079 roundup_pow_of_two(rt2x00dev->ops->tx_queues * 1079 roundup_pow_of_two(rt2x00dev->ops->tx_queues *
1080 rt2x00dev->ops->tx->entry_num * 1080 rt2x00dev->tx->limit *
1081 sizeof(u32)); 1081 sizeof(u32));
1082 1082
1083 status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size, 1083 status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size,
@@ -1256,6 +1256,17 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
1256 rt2x00dev->hw->wiphy->n_iface_combinations = 1; 1256 rt2x00dev->hw->wiphy->n_iface_combinations = 1;
1257} 1257}
1258 1258
1259static unsigned int rt2x00dev_extra_tx_headroom(struct rt2x00_dev *rt2x00dev)
1260{
1261 if (WARN_ON(!rt2x00dev->tx))
1262 return 0;
1263
1264 if (rt2x00_is_usb(rt2x00dev))
1265 return rt2x00dev->tx[0].winfo_size + rt2x00dev->tx[0].desc_size;
1266
1267 return rt2x00dev->tx[0].winfo_size;
1268}
1269
1259/* 1270/*
1260 * driver allocation handlers. 1271 * driver allocation handlers.
1261 */ 1272 */
@@ -1301,27 +1312,10 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1301 (rt2x00dev->ops->max_ap_intf - 1); 1312 (rt2x00dev->ops->max_ap_intf - 1);
1302 1313
1303 /* 1314 /*
1304 * Determine which operating modes are supported, all modes
1305 * which require beaconing, depend on the availability of
1306 * beacon entries.
1307 */
1308 rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1309 if (rt2x00dev->ops->bcn->entry_num > 0)
1310 rt2x00dev->hw->wiphy->interface_modes |=
1311 BIT(NL80211_IFTYPE_ADHOC) |
1312 BIT(NL80211_IFTYPE_AP) |
1313#ifdef CONFIG_MAC80211_MESH
1314 BIT(NL80211_IFTYPE_MESH_POINT) |
1315#endif
1316 BIT(NL80211_IFTYPE_WDS);
1317
1318 rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1319
1320 /*
1321 * Initialize work. 1315 * Initialize work.
1322 */ 1316 */
1323 rt2x00dev->workqueue = 1317 rt2x00dev->workqueue =
1324 alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0); 1318 alloc_ordered_workqueue("%s", 0, wiphy_name(rt2x00dev->hw->wiphy));
1325 if (!rt2x00dev->workqueue) { 1319 if (!rt2x00dev->workqueue) {
1326 retval = -ENOMEM; 1320 retval = -ENOMEM;
1327 goto exit; 1321 goto exit;
@@ -1347,6 +1341,26 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1347 if (retval) 1341 if (retval)
1348 goto exit; 1342 goto exit;
1349 1343
1344 /* Cache TX headroom value */
1345 rt2x00dev->extra_tx_headroom = rt2x00dev_extra_tx_headroom(rt2x00dev);
1346
1347 /*
1348 * Determine which operating modes are supported, all modes
1349 * which require beaconing, depend on the availability of
1350 * beacon entries.
1351 */
1352 rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1353 if (rt2x00dev->bcn->limit > 0)
1354 rt2x00dev->hw->wiphy->interface_modes |=
1355 BIT(NL80211_IFTYPE_ADHOC) |
1356 BIT(NL80211_IFTYPE_AP) |
1357#ifdef CONFIG_MAC80211_MESH
1358 BIT(NL80211_IFTYPE_MESH_POINT) |
1359#endif
1360 BIT(NL80211_IFTYPE_WDS);
1361
1362 rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1363
1350 /* 1364 /*
1351 * Initialize ieee80211 structure. 1365 * Initialize ieee80211 structure.
1352 */ 1366 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index dc49e525ae5e..76d95deb274b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -105,11 +105,13 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
105 goto exit_release_regions; 105 goto exit_release_regions;
106 } 106 }
107 107
108 pci_enable_msi(pci_dev);
109
108 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 110 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
109 if (!hw) { 111 if (!hw) {
110 rt2x00_probe_err("Failed to allocate hardware\n"); 112 rt2x00_probe_err("Failed to allocate hardware\n");
111 retval = -ENOMEM; 113 retval = -ENOMEM;
112 goto exit_release_regions; 114 goto exit_disable_msi;
113 } 115 }
114 116
115 pci_set_drvdata(pci_dev, hw); 117 pci_set_drvdata(pci_dev, hw);
@@ -150,6 +152,9 @@ exit_free_reg:
150exit_free_device: 152exit_free_device:
151 ieee80211_free_hw(hw); 153 ieee80211_free_hw(hw);
152 154
155exit_disable_msi:
156 pci_disable_msi(pci_dev);
157
153exit_release_regions: 158exit_release_regions:
154 pci_release_regions(pci_dev); 159 pci_release_regions(pci_dev);
155 160
@@ -174,6 +179,8 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
174 rt2x00pci_free_reg(rt2x00dev); 179 rt2x00pci_free_reg(rt2x00dev);
175 ieee80211_free_hw(hw); 180 ieee80211_free_hw(hw);
176 181
182 pci_disable_msi(pci_dev);
183
177 /* 184 /*
178 * Free the PCI device data. 185 * Free the PCI device data.
179 */ 186 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 2c12311467a9..6c0a91ff963c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -542,8 +542,8 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
542 /* 542 /*
543 * Add the requested extra tx headroom in front of the skb. 543 * Add the requested extra tx headroom in front of the skb.
544 */ 544 */
545 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom); 545 skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
546 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom); 546 memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
547 547
548 /* 548 /*
549 * Call the driver's write_tx_data function, if it exists. 549 * Call the driver's write_tx_data function, if it exists.
@@ -596,7 +596,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry)
596{ 596{
597 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 597 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
598 struct ieee80211_bar *bar = (void *) (entry->skb->data + 598 struct ieee80211_bar *bar = (void *) (entry->skb->data +
599 rt2x00dev->ops->extra_tx_headroom); 599 rt2x00dev->extra_tx_headroom);
600 struct rt2x00_bar_list_entry *bar_entry; 600 struct rt2x00_bar_list_entry *bar_entry;
601 601
602 if (likely(!ieee80211_is_back_req(bar->frame_control))) 602 if (likely(!ieee80211_is_back_req(bar->frame_control)))
@@ -1161,8 +1161,7 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1161 } 1161 }
1162} 1162}
1163 1163
1164static int rt2x00queue_alloc_entries(struct data_queue *queue, 1164static int rt2x00queue_alloc_entries(struct data_queue *queue)
1165 const struct data_queue_desc *qdesc)
1166{ 1165{
1167 struct queue_entry *entries; 1166 struct queue_entry *entries;
1168 unsigned int entry_size; 1167 unsigned int entry_size;
@@ -1170,16 +1169,10 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
1170 1169
1171 rt2x00queue_reset(queue); 1170 rt2x00queue_reset(queue);
1172 1171
1173 queue->limit = qdesc->entry_num;
1174 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1175 queue->data_size = qdesc->data_size;
1176 queue->desc_size = qdesc->desc_size;
1177 queue->winfo_size = qdesc->winfo_size;
1178
1179 /* 1172 /*
1180 * Allocate all queue entries. 1173 * Allocate all queue entries.
1181 */ 1174 */
1182 entry_size = sizeof(*entries) + qdesc->priv_size; 1175 entry_size = sizeof(*entries) + queue->priv_size;
1183 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); 1176 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1184 if (!entries) 1177 if (!entries)
1185 return -ENOMEM; 1178 return -ENOMEM;
@@ -1195,7 +1188,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
1195 entries[i].entry_idx = i; 1188 entries[i].entry_idx = i;
1196 entries[i].priv_data = 1189 entries[i].priv_data =
1197 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, 1190 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1198 sizeof(*entries), qdesc->priv_size); 1191 sizeof(*entries), queue->priv_size);
1199 } 1192 }
1200 1193
1201#undef QUEUE_ENTRY_PRIV_OFFSET 1194#undef QUEUE_ENTRY_PRIV_OFFSET
@@ -1237,23 +1230,22 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1237 struct data_queue *queue; 1230 struct data_queue *queue;
1238 int status; 1231 int status;
1239 1232
1240 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx); 1233 status = rt2x00queue_alloc_entries(rt2x00dev->rx);
1241 if (status) 1234 if (status)
1242 goto exit; 1235 goto exit;
1243 1236
1244 tx_queue_for_each(rt2x00dev, queue) { 1237 tx_queue_for_each(rt2x00dev, queue) {
1245 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx); 1238 status = rt2x00queue_alloc_entries(queue);
1246 if (status) 1239 if (status)
1247 goto exit; 1240 goto exit;
1248 } 1241 }
1249 1242
1250 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn); 1243 status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
1251 if (status) 1244 if (status)
1252 goto exit; 1245 goto exit;
1253 1246
1254 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { 1247 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1255 status = rt2x00queue_alloc_entries(rt2x00dev->atim, 1248 status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1256 rt2x00dev->ops->atim);
1257 if (status) 1249 if (status)
1258 goto exit; 1250 goto exit;
1259 } 1251 }
@@ -1297,6 +1289,10 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1297 queue->aifs = 2; 1289 queue->aifs = 2;
1298 queue->cw_min = 5; 1290 queue->cw_min = 5;
1299 queue->cw_max = 10; 1291 queue->cw_max = 10;
1292
1293 rt2x00dev->ops->queue_init(queue);
1294
1295 queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1300} 1296}
1301 1297
1302int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) 1298int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 4a7b34e9261b..ebe117224979 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -453,6 +453,7 @@ enum data_queue_flags {
453 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). 453 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
454 * @data_size: Maximum data size for the frames in this queue. 454 * @data_size: Maximum data size for the frames in this queue.
455 * @desc_size: Hardware descriptor size for the data in this queue. 455 * @desc_size: Hardware descriptor size for the data in this queue.
456 * @priv_size: Size of per-queue_entry private data.
456 * @usb_endpoint: Device endpoint used for communication (USB only) 457 * @usb_endpoint: Device endpoint used for communication (USB only)
457 * @usb_maxpacket: Max packet size for given endpoint (USB only) 458 * @usb_maxpacket: Max packet size for given endpoint (USB only)
458 */ 459 */
@@ -481,31 +482,13 @@ struct data_queue {
481 unsigned short data_size; 482 unsigned short data_size;
482 unsigned char desc_size; 483 unsigned char desc_size;
483 unsigned char winfo_size; 484 unsigned char winfo_size;
485 unsigned short priv_size;
484 486
485 unsigned short usb_endpoint; 487 unsigned short usb_endpoint;
486 unsigned short usb_maxpacket; 488 unsigned short usb_maxpacket;
487}; 489};
488 490
489/** 491/**
490 * struct data_queue_desc: Data queue description
491 *
492 * The information in this structure is used by drivers
493 * to inform rt2x00lib about the creation of the data queue.
494 *
495 * @entry_num: Maximum number of entries for a queue.
496 * @data_size: Maximum data size for the frames in this queue.
497 * @desc_size: Hardware descriptor size for the data in this queue.
498 * @priv_size: Size of per-queue_entry private data.
499 */
500struct data_queue_desc {
501 unsigned short entry_num;
502 unsigned short data_size;
503 unsigned char desc_size;
504 unsigned char winfo_size;
505 unsigned short priv_size;
506};
507
508/**
509 * queue_end - Return pointer to the last queue (HELPER MACRO). 492 * queue_end - Return pointer to the last queue (HELPER MACRO).
510 * @__dev: Pointer to &struct rt2x00_dev 493 * @__dev: Pointer to &struct rt2x00_dev
511 * 494 *
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0dc8180e251b..54d3ddfc9888 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2175,7 +2175,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2175 * that the TX_STA_FIFO stack has a size of 16. We stick to our 2175 * that the TX_STA_FIFO stack has a size of 16. We stick to our
2176 * tx ring size for now. 2176 * tx ring size for now.
2177 */ 2177 */
2178 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { 2178 for (i = 0; i < rt2x00dev->tx->limit; i++) {
2179 rt2x00mmio_register_read(rt2x00dev, STA_CSR4, &reg); 2179 rt2x00mmio_register_read(rt2x00dev, STA_CSR4, &reg);
2180 if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) 2180 if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
2181 break; 2181 break;
@@ -2825,7 +2825,8 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2825 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2825 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2826 for (i = 14; i < spec->num_channels; i++) { 2826 for (i = 14; i < spec->num_channels; i++) {
2827 info[i].max_power = MAX_TXPOWER; 2827 info[i].max_power = MAX_TXPOWER;
2828 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2828 info[i].default_power1 =
2829 TXPOWER_FROM_DEV(tx_power[i - 14]);
2829 } 2830 }
2830 } 2831 }
2831 2832
@@ -3025,26 +3026,40 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
3025 .config = rt61pci_config, 3026 .config = rt61pci_config,
3026}; 3027};
3027 3028
3028static const struct data_queue_desc rt61pci_queue_rx = { 3029static void rt61pci_queue_init(struct data_queue *queue)
3029 .entry_num = 32, 3030{
3030 .data_size = DATA_FRAME_SIZE, 3031 switch (queue->qid) {
3031 .desc_size = RXD_DESC_SIZE, 3032 case QID_RX:
3032 .priv_size = sizeof(struct queue_entry_priv_mmio), 3033 queue->limit = 32;
3033}; 3034 queue->data_size = DATA_FRAME_SIZE;
3035 queue->desc_size = RXD_DESC_SIZE;
3036 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
3037 break;
3034 3038
3035static const struct data_queue_desc rt61pci_queue_tx = { 3039 case QID_AC_VO:
3036 .entry_num = 32, 3040 case QID_AC_VI:
3037 .data_size = DATA_FRAME_SIZE, 3041 case QID_AC_BE:
3038 .desc_size = TXD_DESC_SIZE, 3042 case QID_AC_BK:
3039 .priv_size = sizeof(struct queue_entry_priv_mmio), 3043 queue->limit = 32;
3040}; 3044 queue->data_size = DATA_FRAME_SIZE;
3045 queue->desc_size = TXD_DESC_SIZE;
3046 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
3047 break;
3041 3048
3042static const struct data_queue_desc rt61pci_queue_bcn = { 3049 case QID_BEACON:
3043 .entry_num = 4, 3050 queue->limit = 4;
3044 .data_size = 0, /* No DMA required for beacons */ 3051 queue->data_size = 0; /* No DMA required for beacons */
3045 .desc_size = TXINFO_SIZE, 3052 queue->desc_size = TXINFO_SIZE;
3046 .priv_size = sizeof(struct queue_entry_priv_mmio), 3053 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
3047}; 3054 break;
3055
3056 case QID_ATIM:
3057 /* fallthrough */
3058 default:
3059 BUG();
3060 break;
3061 }
3062}
3048 3063
3049static const struct rt2x00_ops rt61pci_ops = { 3064static const struct rt2x00_ops rt61pci_ops = {
3050 .name = KBUILD_MODNAME, 3065 .name = KBUILD_MODNAME,
@@ -3052,10 +3067,7 @@ static const struct rt2x00_ops rt61pci_ops = {
3052 .eeprom_size = EEPROM_SIZE, 3067 .eeprom_size = EEPROM_SIZE,
3053 .rf_size = RF_SIZE, 3068 .rf_size = RF_SIZE,
3054 .tx_queues = NUM_TX_QUEUES, 3069 .tx_queues = NUM_TX_QUEUES,
3055 .extra_tx_headroom = 0, 3070 .queue_init = rt61pci_queue_init,
3056 .rx = &rt61pci_queue_rx,
3057 .tx = &rt61pci_queue_tx,
3058 .bcn = &rt61pci_queue_bcn,
3059 .lib = &rt61pci_rt2x00_ops, 3071 .lib = &rt61pci_rt2x00_ops,
3060 .hw = &rt61pci_mac80211_ops, 3072 .hw = &rt61pci_mac80211_ops,
3061#ifdef CONFIG_RT2X00_LIB_DEBUGFS 3073#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 377e09bb0b81..1d3880e09a13 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2167,7 +2167,8 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2167 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2167 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2168 for (i = 14; i < spec->num_channels; i++) { 2168 for (i = 14; i < spec->num_channels; i++) {
2169 info[i].max_power = MAX_TXPOWER; 2169 info[i].max_power = MAX_TXPOWER;
2170 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2170 info[i].default_power1 =
2171 TXPOWER_FROM_DEV(tx_power[i - 14]);
2171 } 2172 }
2172 } 2173 }
2173 2174
@@ -2359,26 +2360,40 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2359 .config = rt73usb_config, 2360 .config = rt73usb_config,
2360}; 2361};
2361 2362
2362static const struct data_queue_desc rt73usb_queue_rx = { 2363static void rt73usb_queue_init(struct data_queue *queue)
2363 .entry_num = 32, 2364{
2364 .data_size = DATA_FRAME_SIZE, 2365 switch (queue->qid) {
2365 .desc_size = RXD_DESC_SIZE, 2366 case QID_RX:
2366 .priv_size = sizeof(struct queue_entry_priv_usb), 2367 queue->limit = 32;
2367}; 2368 queue->data_size = DATA_FRAME_SIZE;
2369 queue->desc_size = RXD_DESC_SIZE;
2370 queue->priv_size = sizeof(struct queue_entry_priv_usb);
2371 break;
2368 2372
2369static const struct data_queue_desc rt73usb_queue_tx = { 2373 case QID_AC_VO:
2370 .entry_num = 32, 2374 case QID_AC_VI:
2371 .data_size = DATA_FRAME_SIZE, 2375 case QID_AC_BE:
2372 .desc_size = TXD_DESC_SIZE, 2376 case QID_AC_BK:
2373 .priv_size = sizeof(struct queue_entry_priv_usb), 2377 queue->limit = 32;
2374}; 2378 queue->data_size = DATA_FRAME_SIZE;
2379 queue->desc_size = TXD_DESC_SIZE;
2380 queue->priv_size = sizeof(struct queue_entry_priv_usb);
2381 break;
2375 2382
2376static const struct data_queue_desc rt73usb_queue_bcn = { 2383 case QID_BEACON:
2377 .entry_num = 4, 2384 queue->limit = 4;
2378 .data_size = MGMT_FRAME_SIZE, 2385 queue->data_size = MGMT_FRAME_SIZE;
2379 .desc_size = TXINFO_SIZE, 2386 queue->desc_size = TXINFO_SIZE;
2380 .priv_size = sizeof(struct queue_entry_priv_usb), 2387 queue->priv_size = sizeof(struct queue_entry_priv_usb);
2381}; 2388 break;
2389
2390 case QID_ATIM:
2391 /* fallthrough */
2392 default:
2393 BUG();
2394 break;
2395 }
2396}
2382 2397
2383static const struct rt2x00_ops rt73usb_ops = { 2398static const struct rt2x00_ops rt73usb_ops = {
2384 .name = KBUILD_MODNAME, 2399 .name = KBUILD_MODNAME,
@@ -2386,10 +2401,7 @@ static const struct rt2x00_ops rt73usb_ops = {
2386 .eeprom_size = EEPROM_SIZE, 2401 .eeprom_size = EEPROM_SIZE,
2387 .rf_size = RF_SIZE, 2402 .rf_size = RF_SIZE,
2388 .tx_queues = NUM_TX_QUEUES, 2403 .tx_queues = NUM_TX_QUEUES,
2389 .extra_tx_headroom = TXD_DESC_SIZE, 2404 .queue_init = rt73usb_queue_init,
2390 .rx = &rt73usb_queue_rx,
2391 .tx = &rt73usb_queue_tx,
2392 .bcn = &rt73usb_queue_bcn,
2393 .lib = &rt73usb_rt2x00_ops, 2405 .lib = &rt73usb_rt2x00_ops,
2394 .hw = &rt73usb_mac80211_ops, 2406 .hw = &rt73usb_mac80211_ops,
2395#ifdef CONFIG_RT2X00_LIB_DEBUGFS 2407#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index af59dd5718e1..9d558ac77b0c 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -380,7 +380,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
380 380
381 /* <2> work queue */ 381 /* <2> work queue */
382 rtlpriv->works.hw = hw; 382 rtlpriv->works.hw = hw;
383 rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0); 383 rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
384 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, 384 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
385 (void *)rtl_watchdog_wq_callback); 385 (void *)rtl_watchdog_wq_callback);
386 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, 386 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
@@ -1817,7 +1817,7 @@ static ssize_t rtl_store_debug_level(struct device *d,
1817 unsigned long val; 1817 unsigned long val;
1818 int ret; 1818 int ret;
1819 1819
1820 ret = strict_strtoul(buf, 0, &val); 1820 ret = kstrtoul(buf, 0, &val);
1821 if (ret) { 1821 if (ret) {
1822 printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf); 1822 printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
1823 } else { 1823 } else {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 953f1a0f8532..2119313a737b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
104 tx_agc[RF90_PATH_A] = 0x10101010; 104 tx_agc[RF90_PATH_A] = 0x10101010;
105 tx_agc[RF90_PATH_B] = 0x10101010; 105 tx_agc[RF90_PATH_B] = 0x10101010;
106 } else if (rtlpriv->dm.dynamic_txhighpower_lvl == 106 } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
107 TXHIGHPWRLEVEL_LEVEL1) { 107 TXHIGHPWRLEVEL_LEVEL2) {
108 tx_agc[RF90_PATH_A] = 0x00000000; 108 tx_agc[RF90_PATH_A] = 0x00000000;
109 tx_agc[RF90_PATH_B] = 0x00000000; 109 tx_agc[RF90_PATH_B] = 0x00000000;
110 } else{ 110 } else{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 826f085c29dd..2bd598526217 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -359,6 +359,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
359 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 359 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
360 {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ 360 {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
361 {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ 361 {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
362 {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
362 {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/ 363 {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
363 {} 364 {}
364}; 365};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 19a765532603..47875ba09ff8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -842,7 +842,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
842 long val_y, ele_c = 0; 842 long val_y, ele_c = 0;
843 u8 ofdm_index[2]; 843 u8 ofdm_index[2];
844 s8 cck_index = 0; 844 s8 cck_index = 0;
845 u8 ofdm_index_old[2]; 845 u8 ofdm_index_old[2] = {0, 0};
846 s8 cck_index_old = 0; 846 s8 cck_index_old = 0;
847 u8 index; 847 u8 index;
848 int i; 848 int i;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index e4c4cdc3eb67..d9ee2efffe5f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -251,7 +251,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
251 .bar_id = 2, 251 .bar_id = 2,
252 .write_readback = true, 252 .write_readback = true,
253 .name = "rtl8723ae_pci", 253 .name = "rtl8723ae_pci",
254 .fw_name = "rtlwifi/rtl8723aefw.bin", 254 .fw_name = "rtlwifi/rtl8723fw.bin",
255 .ops = &rtl8723ae_hal_ops, 255 .ops = &rtl8723ae_hal_ops,
256 .mod_params = &rtl8723ae_mod_params, 256 .mod_params = &rtl8723ae_mod_params,
257 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, 257 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
@@ -353,8 +353,8 @@ MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
353MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); 353MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
354MODULE_LICENSE("GPL"); 354MODULE_LICENSE("GPL");
355MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless"); 355MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
356MODULE_FIRMWARE("rtlwifi/rtl8723aefw.bin"); 356MODULE_FIRMWARE("rtlwifi/rtl8723fw.bin");
357MODULE_FIRMWARE("rtlwifi/rtl8723aefw_B.bin"); 357MODULE_FIRMWARE("rtlwifi/rtl8723fw_B.bin");
358 358
359module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444); 359module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444);
360module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444); 360module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444);
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 4c67c2f9ea71..c7dc6feab2ff 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -93,8 +93,7 @@ static void wl1251_spi_wake(struct wl1251 *wl)
93 memset(&t, 0, sizeof(t)); 93 memset(&t, 0, sizeof(t));
94 spi_message_init(&m); 94 spi_message_init(&m);
95 95
96 /* 96 /* Set WSPI_INIT_COMMAND
97 * Set WSPI_INIT_COMMAND
98 * the data is being send from the MSB to LSB 97 * the data is being send from the MSB to LSB
99 */ 98 */
100 cmd[2] = 0xff; 99 cmd[2] = 0xff;
@@ -262,7 +261,8 @@ static int wl1251_spi_probe(struct spi_device *spi)
262 wl->if_ops = &wl1251_spi_ops; 261 wl->if_ops = &wl1251_spi_ops;
263 262
264 /* This is the only SPI value that we need to set here, the rest 263 /* This is the only SPI value that we need to set here, the rest
265 * comes from the board-peripherals file */ 264 * comes from the board-peripherals file
265 */
266 spi->bits_per_word = 32; 266 spi->bits_per_word = 32;
267 267
268 ret = spi_setup(spi); 268 ret = spi_setup(spi);
@@ -329,29 +329,7 @@ static struct spi_driver wl1251_spi_driver = {
329 .remove = wl1251_spi_remove, 329 .remove = wl1251_spi_remove,
330}; 330};
331 331
332static int __init wl1251_spi_init(void) 332module_spi_driver(wl1251_spi_driver);
333{
334 int ret;
335
336 ret = spi_register_driver(&wl1251_spi_driver);
337 if (ret < 0) {
338 wl1251_error("failed to register spi driver: %d", ret);
339 goto out;
340 }
341
342out:
343 return ret;
344}
345
346static void __exit wl1251_spi_exit(void)
347{
348 spi_unregister_driver(&wl1251_spi_driver);
349
350 wl1251_notice("unloaded");
351}
352
353module_init(wl1251_spi_init);
354module_exit(wl1251_spi_exit);
355 333
356MODULE_LICENSE("GPL"); 334MODULE_LICENSE("GPL");
357MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>"); 335MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 9fa692d11025..7aa0eb848c5a 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -23,6 +23,7 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/ip.h> 24#include <linux/ip.h>
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/etherdevice.h>
26 27
27#include "../wlcore/wlcore.h" 28#include "../wlcore/wlcore.h"
28#include "../wlcore/debug.h" 29#include "../wlcore/debug.h"
@@ -594,8 +595,8 @@ static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
594 .mem3 = { .start = 0x00000000, .size = 0x00000000 }, 595 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
595 }, 596 },
596 [PART_PHY_INIT] = { 597 [PART_PHY_INIT] = {
597 .mem = { .start = 0x80926000, 598 .mem = { .start = WL18XX_PHY_INIT_MEM_ADDR,
598 .size = sizeof(struct wl18xx_mac_and_phy_params) }, 599 .size = WL18XX_PHY_INIT_MEM_SIZE },
599 .reg = { .start = 0x00000000, .size = 0x00000000 }, 600 .reg = { .start = 0x00000000, .size = 0x00000000 },
600 .mem2 = { .start = 0x00000000, .size = 0x00000000 }, 601 .mem2 = { .start = 0x00000000, .size = 0x00000000 },
601 .mem3 = { .start = 0x00000000, .size = 0x00000000 }, 602 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
@@ -799,6 +800,9 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
799 u32 tmp; 800 u32 tmp;
800 int ret; 801 int ret;
801 802
803 BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
804 WL18XX_PHY_INIT_MEM_SIZE);
805
802 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 806 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
803 if (ret < 0) 807 if (ret < 0)
804 goto out; 808 goto out;
@@ -815,6 +819,35 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
815 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); 819 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
816 820
817 ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp); 821 ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp);
822 if (ret < 0)
823 goto out;
824
825 /*
826 * Workaround for FDSP code RAM corruption (needed for PG2.1
827 * and newer; for older chips it's a NOP). Change FDSP clock
828 * settings so that it's muxed to the ATGP clock instead of
829 * its own clock.
830 */
831
832 ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
833 if (ret < 0)
834 goto out;
835
836 /* disable FDSP clock */
837 ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
838 MEM_FDSP_CLK_120_DISABLE);
839 if (ret < 0)
840 goto out;
841
842 /* set ATPG clock toward FDSP Code RAM rather than its own clock */
843 ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
844 MEM_FDSP_CODERAM_FUNC_CLK_SEL);
845 if (ret < 0)
846 goto out;
847
848 /* re-enable FDSP clock */
849 ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
850 MEM_FDSP_CLK_120_ENABLE);
818 851
819out: 852out:
820 return ret; 853 return ret;
@@ -1286,6 +1319,16 @@ static int wl18xx_get_mac(struct wl1271 *wl)
1286 ((mac1 & 0xff000000) >> 24); 1319 ((mac1 & 0xff000000) >> 24);
1287 wl->fuse_nic_addr = (mac1 & 0xffffff); 1320 wl->fuse_nic_addr = (mac1 & 0xffffff);
1288 1321
1322 if (!wl->fuse_oui_addr && !wl->fuse_nic_addr) {
1323 u8 mac[ETH_ALEN];
1324
1325 eth_random_addr(mac);
1326
1327 wl->fuse_oui_addr = (mac[0] << 16) + (mac[1] << 8) + mac[2];
1328 wl->fuse_nic_addr = (mac[3] << 16) + (mac[4] << 8) + mac[5];
1329 wl1271_warning("MAC address from fuse not available, using random locally administered addresses.");
1330 }
1331
1289 ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]); 1332 ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
1290 1333
1291out: 1334out:
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 6306e04cd258..05dd8bad2746 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -38,6 +38,9 @@
38#define WL18XX_REG_BOOT_PART_SIZE 0x00014578 38#define WL18XX_REG_BOOT_PART_SIZE 0x00014578
39 39
40#define WL18XX_PHY_INIT_MEM_ADDR 0x80926000 40#define WL18XX_PHY_INIT_MEM_ADDR 0x80926000
41#define WL18XX_PHY_END_MEM_ADDR 0x8093CA44
42#define WL18XX_PHY_INIT_MEM_SIZE \
43 (WL18XX_PHY_END_MEM_ADDR - WL18XX_PHY_INIT_MEM_ADDR)
41 44
42#define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE) 45#define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE)
43#define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000) 46#define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000)
@@ -217,4 +220,16 @@ static const char * const rdl_names[] = {
217 [RDL_4_SP] = "1897 MIMO", 220 [RDL_4_SP] = "1897 MIMO",
218}; 221};
219 222
223/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
224#define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40
225
226/* command to disable FDSP clock */
227#define MEM_FDSP_CLK_120_DISABLE 0x80000000
228
229/* command to set ATPG clock toward FDSP Code RAM rather than its own clock */
230#define MEM_FDSP_CODERAM_FUNC_CLK_SEL 0xC0000000
231
232/* command to re-enable FDSP clock */
233#define MEM_FDSP_CLK_120_ENABLE 0x40000000
234
220#endif /* __REG_H__ */ 235#endif /* __REG_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index b21398f6c3ec..4f23931d7bd5 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -1,5 +1,5 @@
1wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \ 1wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
2 boot.o init.o debugfs.o scan.o 2 boot.o init.o debugfs.o scan.o sysfs.o
3 3
4wlcore_spi-objs = spi.o 4wlcore_spi-objs = spi.o
5wlcore_sdio-objs = sdio.o 5wlcore_sdio-objs = sdio.o
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index c3e1f79c7856..e17630c2a849 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -1056,7 +1056,7 @@ static ssize_t dev_mem_read(struct file *file,
1056 return -EINVAL; 1056 return -EINVAL;
1057 1057
1058 memset(&part, 0, sizeof(part)); 1058 memset(&part, 0, sizeof(part));
1059 part.mem.start = file->f_pos; 1059 part.mem.start = *ppos;
1060 part.mem.size = bytes; 1060 part.mem.size = bytes;
1061 1061
1062 buf = kmalloc(bytes, GFP_KERNEL); 1062 buf = kmalloc(bytes, GFP_KERNEL);
@@ -1137,7 +1137,7 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
1137 return -EINVAL; 1137 return -EINVAL;
1138 1138
1139 memset(&part, 0, sizeof(part)); 1139 memset(&part, 0, sizeof(part));
1140 part.mem.start = file->f_pos; 1140 part.mem.start = *ppos;
1141 part.mem.size = bytes; 1141 part.mem.size = bytes;
1142 1142
1143 buf = kmalloc(bytes, GFP_KERNEL); 1143 buf = kmalloc(bytes, GFP_KERNEL);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 953111a502ee..b8db55c868c7 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1,10 +1,9 @@
1 1
2/* 2/*
3 * This file is part of wl1271 3 * This file is part of wlcore
4 * 4 *
5 * Copyright (C) 2008-2010 Nokia Corporation 5 * Copyright (C) 2008-2010 Nokia Corporation
6 * 6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 * 7 *
9 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -24,34 +23,23 @@
24 23
25#include <linux/module.h> 24#include <linux/module.h>
26#include <linux/firmware.h> 25#include <linux/firmware.h>
27#include <linux/delay.h>
28#include <linux/spi/spi.h>
29#include <linux/crc32.h>
30#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
31#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
32#include <linux/platform_device.h>
33#include <linux/slab.h>
34#include <linux/wl12xx.h> 28#include <linux/wl12xx.h>
35#include <linux/sched.h>
36#include <linux/interrupt.h> 29#include <linux/interrupt.h>
37 30
38#include "wlcore.h" 31#include "wlcore.h"
39#include "debug.h" 32#include "debug.h"
40#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
41#include "io.h" 34#include "io.h"
42#include "event.h"
43#include "tx.h" 35#include "tx.h"
44#include "rx.h"
45#include "ps.h" 36#include "ps.h"
46#include "init.h" 37#include "init.h"
47#include "debugfs.h" 38#include "debugfs.h"
48#include "cmd.h"
49#include "boot.h"
50#include "testmode.h" 39#include "testmode.h"
51#include "scan.h" 40#include "scan.h"
52#include "hw_ops.h" 41#include "hw_ops.h"
53 42#include "sysfs.h"
54#define WL1271_BOOT_RETRIES 3
55 43
56#define WL1271_BOOT_RETRIES 3 44#define WL1271_BOOT_RETRIES 3
57 45
@@ -65,8 +53,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
65static void wlcore_op_stop_locked(struct wl1271 *wl); 53static void wlcore_op_stop_locked(struct wl1271 *wl);
66static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); 54static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67 55
68static int wl12xx_set_authorized(struct wl1271 *wl, 56static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
69 struct wl12xx_vif *wlvif)
70{ 57{
71 int ret; 58 int ret;
72 59
@@ -983,7 +970,7 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
983 970
984static int wl1271_setup(struct wl1271 *wl) 971static int wl1271_setup(struct wl1271 *wl)
985{ 972{
986 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) + 973 wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
987 sizeof(*wl->fw_status_2) + 974 sizeof(*wl->fw_status_2) +
988 wl->fw_status_priv_len, GFP_KERNEL); 975 wl->fw_status_priv_len, GFP_KERNEL);
989 if (!wl->fw_status_1) 976 if (!wl->fw_status_1)
@@ -993,7 +980,7 @@ static int wl1271_setup(struct wl1271 *wl)
993 (((u8 *) wl->fw_status_1) + 980 (((u8 *) wl->fw_status_1) +
994 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc)); 981 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
995 982
996 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); 983 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
997 if (!wl->tx_res_if) { 984 if (!wl->tx_res_if) {
998 kfree(wl->fw_status_1); 985 kfree(wl->fw_status_1);
999 return -ENOMEM; 986 return -ENOMEM;
@@ -1668,8 +1655,7 @@ static int wl1271_configure_suspend(struct wl1271 *wl,
1668 return 0; 1655 return 0;
1669} 1656}
1670 1657
1671static void wl1271_configure_resume(struct wl1271 *wl, 1658static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1672 struct wl12xx_vif *wlvif)
1673{ 1659{
1674 int ret = 0; 1660 int ret = 0;
1675 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; 1661 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
@@ -2603,6 +2589,7 @@ unlock:
2603 cancel_work_sync(&wlvif->rx_streaming_enable_work); 2589 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2604 cancel_work_sync(&wlvif->rx_streaming_disable_work); 2590 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2605 cancel_delayed_work_sync(&wlvif->connection_loss_work); 2591 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2592 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2606 2593
2607 mutex_lock(&wl->mutex); 2594 mutex_lock(&wl->mutex);
2608} 2595}
@@ -3210,14 +3197,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3210 if (ret < 0) 3197 if (ret < 0)
3211 return ret; 3198 return ret;
3212 3199
3213 /* the default WEP key needs to be configured at least once */
3214 if (key_type == KEY_WEP) {
3215 ret = wl12xx_cmd_set_default_wep_key(wl,
3216 wlvif->default_key,
3217 wlvif->sta.hlid);
3218 if (ret < 0)
3219 return ret;
3220 }
3221 } 3200 }
3222 3201
3223 return 0; 3202 return 0;
@@ -3374,6 +3353,46 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3374} 3353}
3375EXPORT_SYMBOL_GPL(wlcore_set_key); 3354EXPORT_SYMBOL_GPL(wlcore_set_key);
3376 3355
3356static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3357 struct ieee80211_vif *vif,
3358 int key_idx)
3359{
3360 struct wl1271 *wl = hw->priv;
3361 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3362 int ret;
3363
3364 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3365 key_idx);
3366
3367 mutex_lock(&wl->mutex);
3368
3369 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3370 ret = -EAGAIN;
3371 goto out_unlock;
3372 }
3373
3374 ret = wl1271_ps_elp_wakeup(wl);
3375 if (ret < 0)
3376 goto out_unlock;
3377
3378 wlvif->default_key = key_idx;
3379
3380 /* the default WEP key needs to be configured at least once */
3381 if (wlvif->encryption_type == KEY_WEP) {
3382 ret = wl12xx_cmd_set_default_wep_key(wl,
3383 key_idx,
3384 wlvif->sta.hlid);
3385 if (ret < 0)
3386 goto out_sleep;
3387 }
3388
3389out_sleep:
3390 wl1271_ps_elp_sleep(wl);
3391
3392out_unlock:
3393 mutex_unlock(&wl->mutex);
3394}
3395
3377void wlcore_regdomain_config(struct wl1271 *wl) 3396void wlcore_regdomain_config(struct wl1271 *wl)
3378{ 3397{
3379 int ret; 3398 int ret;
@@ -3782,8 +3801,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
3782 struct ieee80211_hdr *hdr; 3801 struct ieee80211_hdr *hdr;
3783 u32 min_rate; 3802 u32 min_rate;
3784 int ret; 3803 int ret;
3785 int ieoffset = offsetof(struct ieee80211_mgmt, 3804 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3786 u.beacon.variable);
3787 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); 3805 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3788 u16 tmpl_id; 3806 u16 tmpl_id;
3789 3807
@@ -4230,8 +4248,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4230 } 4248 }
4231 4249
4232 /* Handle new association with HT. Do this after join. */ 4250 /* Handle new association with HT. Do this after join. */
4233 if (sta_exists && 4251 if (sta_exists) {
4234 (changed & BSS_CHANGED_HT)) {
4235 bool enabled = 4252 bool enabled =
4236 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT; 4253 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4237 4254
@@ -5368,6 +5385,7 @@ static const struct ieee80211_ops wl1271_ops = {
5368 .ampdu_action = wl1271_op_ampdu_action, 5385 .ampdu_action = wl1271_op_ampdu_action,
5369 .tx_frames_pending = wl1271_tx_frames_pending, 5386 .tx_frames_pending = wl1271_tx_frames_pending,
5370 .set_bitrate_mask = wl12xx_set_bitrate_mask, 5387 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5388 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5371 .channel_switch = wl12xx_op_channel_switch, 5389 .channel_switch = wl12xx_op_channel_switch,
5372 .flush = wlcore_op_flush, 5390 .flush = wlcore_op_flush,
5373 .remain_on_channel = wlcore_op_remain_on_channel, 5391 .remain_on_channel = wlcore_op_remain_on_channel,
@@ -5403,151 +5421,6 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5403 return idx; 5421 return idx;
5404} 5422}
5405 5423
5406static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5407 struct device_attribute *attr,
5408 char *buf)
5409{
5410 struct wl1271 *wl = dev_get_drvdata(dev);
5411 ssize_t len;
5412
5413 len = PAGE_SIZE;
5414
5415 mutex_lock(&wl->mutex);
5416 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5417 wl->sg_enabled);
5418 mutex_unlock(&wl->mutex);
5419
5420 return len;
5421
5422}
5423
5424static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5425 struct device_attribute *attr,
5426 const char *buf, size_t count)
5427{
5428 struct wl1271 *wl = dev_get_drvdata(dev);
5429 unsigned long res;
5430 int ret;
5431
5432 ret = kstrtoul(buf, 10, &res);
5433 if (ret < 0) {
5434 wl1271_warning("incorrect value written to bt_coex_mode");
5435 return count;
5436 }
5437
5438 mutex_lock(&wl->mutex);
5439
5440 res = !!res;
5441
5442 if (res == wl->sg_enabled)
5443 goto out;
5444
5445 wl->sg_enabled = res;
5446
5447 if (unlikely(wl->state != WLCORE_STATE_ON))
5448 goto out;
5449
5450 ret = wl1271_ps_elp_wakeup(wl);
5451 if (ret < 0)
5452 goto out;
5453
5454 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5455 wl1271_ps_elp_sleep(wl);
5456
5457 out:
5458 mutex_unlock(&wl->mutex);
5459 return count;
5460}
5461
5462static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5463 wl1271_sysfs_show_bt_coex_state,
5464 wl1271_sysfs_store_bt_coex_state);
5465
5466static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5467 struct device_attribute *attr,
5468 char *buf)
5469{
5470 struct wl1271 *wl = dev_get_drvdata(dev);
5471 ssize_t len;
5472
5473 len = PAGE_SIZE;
5474
5475 mutex_lock(&wl->mutex);
5476 if (wl->hw_pg_ver >= 0)
5477 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5478 else
5479 len = snprintf(buf, len, "n/a\n");
5480 mutex_unlock(&wl->mutex);
5481
5482 return len;
5483}
5484
5485static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5486 wl1271_sysfs_show_hw_pg_ver, NULL);
5487
5488static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5489 struct bin_attribute *bin_attr,
5490 char *buffer, loff_t pos, size_t count)
5491{
5492 struct device *dev = container_of(kobj, struct device, kobj);
5493 struct wl1271 *wl = dev_get_drvdata(dev);
5494 ssize_t len;
5495 int ret;
5496
5497 ret = mutex_lock_interruptible(&wl->mutex);
5498 if (ret < 0)
5499 return -ERESTARTSYS;
5500
5501 /* Let only one thread read the log at a time, blocking others */
5502 while (wl->fwlog_size == 0) {
5503 DEFINE_WAIT(wait);
5504
5505 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5506 &wait,
5507 TASK_INTERRUPTIBLE);
5508
5509 if (wl->fwlog_size != 0) {
5510 finish_wait(&wl->fwlog_waitq, &wait);
5511 break;
5512 }
5513
5514 mutex_unlock(&wl->mutex);
5515
5516 schedule();
5517 finish_wait(&wl->fwlog_waitq, &wait);
5518
5519 if (signal_pending(current))
5520 return -ERESTARTSYS;
5521
5522 ret = mutex_lock_interruptible(&wl->mutex);
5523 if (ret < 0)
5524 return -ERESTARTSYS;
5525 }
5526
5527 /* Check if the fwlog is still valid */
5528 if (wl->fwlog_size < 0) {
5529 mutex_unlock(&wl->mutex);
5530 return 0;
5531 }
5532
5533 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5534 len = min(count, (size_t)wl->fwlog_size);
5535 wl->fwlog_size -= len;
5536 memcpy(buffer, wl->fwlog, len);
5537
5538 /* Make room for new messages */
5539 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5540
5541 mutex_unlock(&wl->mutex);
5542
5543 return len;
5544}
5545
5546static struct bin_attribute fwlog_attr = {
5547 .attr = {.name = "fwlog", .mode = S_IRUSR},
5548 .read = wl1271_sysfs_read_fwlog,
5549};
5550
5551static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic) 5424static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5552{ 5425{
5553 int i; 5426 int i;
@@ -5827,8 +5700,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5827 return 0; 5700 return 0;
5828} 5701}
5829 5702
5830#define WL1271_DEFAULT_CHANNEL 0
5831
5832struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, 5703struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5833 u32 mbox_size) 5704 u32 mbox_size)
5834{ 5705{
@@ -5881,7 +5752,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5881 goto err_hw; 5752 goto err_hw;
5882 } 5753 }
5883 5754
5884 wl->channel = WL1271_DEFAULT_CHANNEL; 5755 wl->channel = 0;
5885 wl->rx_counter = 0; 5756 wl->rx_counter = 0;
5886 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 5757 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5887 wl->band = IEEE80211_BAND_2GHZ; 5758 wl->band = IEEE80211_BAND_2GHZ;
@@ -5988,11 +5859,8 @@ int wlcore_free_hw(struct wl1271 *wl)
5988 wake_up_interruptible_all(&wl->fwlog_waitq); 5859 wake_up_interruptible_all(&wl->fwlog_waitq);
5989 mutex_unlock(&wl->mutex); 5860 mutex_unlock(&wl->mutex);
5990 5861
5991 device_remove_bin_file(wl->dev, &fwlog_attr); 5862 wlcore_sysfs_free(wl);
5992
5993 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5994 5863
5995 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5996 kfree(wl->buffer_32); 5864 kfree(wl->buffer_32);
5997 kfree(wl->mbox); 5865 kfree(wl->mbox);
5998 free_page((unsigned long)wl->fwlog); 5866 free_page((unsigned long)wl->fwlog);
@@ -6018,6 +5886,15 @@ int wlcore_free_hw(struct wl1271 *wl)
6018} 5886}
6019EXPORT_SYMBOL_GPL(wlcore_free_hw); 5887EXPORT_SYMBOL_GPL(wlcore_free_hw);
6020 5888
5889#ifdef CONFIG_PM
5890static const struct wiphy_wowlan_support wlcore_wowlan_support = {
5891 .flags = WIPHY_WOWLAN_ANY,
5892 .n_patterns = WL1271_MAX_RX_FILTERS,
5893 .pattern_min_len = 1,
5894 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
5895};
5896#endif
5897
6021static void wlcore_nvs_cb(const struct firmware *fw, void *context) 5898static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6022{ 5899{
6023 struct wl1271 *wl = context; 5900 struct wl1271 *wl = context;
@@ -6071,14 +5948,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6071 if (!ret) { 5948 if (!ret) {
6072 wl->irq_wake_enabled = true; 5949 wl->irq_wake_enabled = true;
6073 device_init_wakeup(wl->dev, 1); 5950 device_init_wakeup(wl->dev, 1);
6074 if (pdata->pwr_in_suspend) { 5951 if (pdata->pwr_in_suspend)
6075 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; 5952 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6076 wl->hw->wiphy->wowlan.n_patterns =
6077 WL1271_MAX_RX_FILTERS;
6078 wl->hw->wiphy->wowlan.pattern_min_len = 1;
6079 wl->hw->wiphy->wowlan.pattern_max_len =
6080 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
6081 }
6082 } 5953 }
6083#endif 5954#endif
6084 disable_irq(wl->irq); 5955 disable_irq(wl->irq);
@@ -6101,36 +5972,13 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6101 if (ret) 5972 if (ret)
6102 goto out_irq; 5973 goto out_irq;
6103 5974
6104 /* Create sysfs file to control bt coex state */ 5975 ret = wlcore_sysfs_init(wl);
6105 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); 5976 if (ret)
6106 if (ret < 0) {
6107 wl1271_error("failed to create sysfs file bt_coex_state");
6108 goto out_unreg; 5977 goto out_unreg;
6109 }
6110
6111 /* Create sysfs file to get HW PG version */
6112 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
6113 if (ret < 0) {
6114 wl1271_error("failed to create sysfs file hw_pg_ver");
6115 goto out_bt_coex_state;
6116 }
6117
6118 /* Create sysfs file for the FW log */
6119 ret = device_create_bin_file(wl->dev, &fwlog_attr);
6120 if (ret < 0) {
6121 wl1271_error("failed to create sysfs file fwlog");
6122 goto out_hw_pg_ver;
6123 }
6124 5978
6125 wl->initialized = true; 5979 wl->initialized = true;
6126 goto out; 5980 goto out;
6127 5981
6128out_hw_pg_ver:
6129 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
6130
6131out_bt_coex_state:
6132 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
6133
6134out_unreg: 5982out_unreg:
6135 wl1271_unregister_hw(wl); 5983 wl1271_unregister_hw(wl);
6136 5984
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 9654577efd01..98066d40c2ad 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -110,7 +110,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
110 DECLARE_COMPLETION_ONSTACK(compl); 110 DECLARE_COMPLETION_ONSTACK(compl);
111 unsigned long flags; 111 unsigned long flags;
112 int ret; 112 int ret;
113 u32 start_time = jiffies; 113 unsigned long start_time = jiffies;
114 bool pending = false; 114 bool pending = false;
115 115
116 /* 116 /*
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index e26447832683..1b0cd98e35f1 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -434,19 +434,7 @@ static struct spi_driver wl1271_spi_driver = {
434 .remove = wl1271_remove, 434 .remove = wl1271_remove,
435}; 435};
436 436
437static int __init wl1271_init(void) 437module_spi_driver(wl1271_spi_driver);
438{
439 return spi_register_driver(&wl1271_spi_driver);
440}
441
442static void __exit wl1271_exit(void)
443{
444 spi_unregister_driver(&wl1271_spi_driver);
445}
446
447module_init(wl1271_init);
448module_exit(wl1271_exit);
449
450MODULE_LICENSE("GPL"); 438MODULE_LICENSE("GPL");
451MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 439MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
452MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 440MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
new file mode 100644
index 000000000000..8e583497940d
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -0,0 +1,216 @@
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2013 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include "wlcore.h"
23#include "debug.h"
24#include "ps.h"
25#include "sysfs.h"
26
27static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
28 struct device_attribute *attr,
29 char *buf)
30{
31 struct wl1271 *wl = dev_get_drvdata(dev);
32 ssize_t len;
33
34 len = PAGE_SIZE;
35
36 mutex_lock(&wl->mutex);
37 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
38 wl->sg_enabled);
39 mutex_unlock(&wl->mutex);
40
41 return len;
42
43}
44
45static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
46 struct device_attribute *attr,
47 const char *buf, size_t count)
48{
49 struct wl1271 *wl = dev_get_drvdata(dev);
50 unsigned long res;
51 int ret;
52
53 ret = kstrtoul(buf, 10, &res);
54 if (ret < 0) {
55 wl1271_warning("incorrect value written to bt_coex_mode");
56 return count;
57 }
58
59 mutex_lock(&wl->mutex);
60
61 res = !!res;
62
63 if (res == wl->sg_enabled)
64 goto out;
65
66 wl->sg_enabled = res;
67
68 if (unlikely(wl->state != WLCORE_STATE_ON))
69 goto out;
70
71 ret = wl1271_ps_elp_wakeup(wl);
72 if (ret < 0)
73 goto out;
74
75 wl1271_acx_sg_enable(wl, wl->sg_enabled);
76 wl1271_ps_elp_sleep(wl);
77
78 out:
79 mutex_unlock(&wl->mutex);
80 return count;
81}
82
83static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
84 wl1271_sysfs_show_bt_coex_state,
85 wl1271_sysfs_store_bt_coex_state);
86
87static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
88 struct device_attribute *attr,
89 char *buf)
90{
91 struct wl1271 *wl = dev_get_drvdata(dev);
92 ssize_t len;
93
94 len = PAGE_SIZE;
95
96 mutex_lock(&wl->mutex);
97 if (wl->hw_pg_ver >= 0)
98 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
99 else
100 len = snprintf(buf, len, "n/a\n");
101 mutex_unlock(&wl->mutex);
102
103 return len;
104}
105
106static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
107 wl1271_sysfs_show_hw_pg_ver, NULL);
108
109static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
110 struct bin_attribute *bin_attr,
111 char *buffer, loff_t pos, size_t count)
112{
113 struct device *dev = container_of(kobj, struct device, kobj);
114 struct wl1271 *wl = dev_get_drvdata(dev);
115 ssize_t len;
116 int ret;
117
118 ret = mutex_lock_interruptible(&wl->mutex);
119 if (ret < 0)
120 return -ERESTARTSYS;
121
122 /* Let only one thread read the log at a time, blocking others */
123 while (wl->fwlog_size == 0) {
124 DEFINE_WAIT(wait);
125
126 prepare_to_wait_exclusive(&wl->fwlog_waitq,
127 &wait,
128 TASK_INTERRUPTIBLE);
129
130 if (wl->fwlog_size != 0) {
131 finish_wait(&wl->fwlog_waitq, &wait);
132 break;
133 }
134
135 mutex_unlock(&wl->mutex);
136
137 schedule();
138 finish_wait(&wl->fwlog_waitq, &wait);
139
140 if (signal_pending(current))
141 return -ERESTARTSYS;
142
143 ret = mutex_lock_interruptible(&wl->mutex);
144 if (ret < 0)
145 return -ERESTARTSYS;
146 }
147
148 /* Check if the fwlog is still valid */
149 if (wl->fwlog_size < 0) {
150 mutex_unlock(&wl->mutex);
151 return 0;
152 }
153
154 /* Seeking is not supported - old logs are not kept. Disregard pos. */
155 len = min(count, (size_t)wl->fwlog_size);
156 wl->fwlog_size -= len;
157 memcpy(buffer, wl->fwlog, len);
158
159 /* Make room for new messages */
160 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
161
162 mutex_unlock(&wl->mutex);
163
164 return len;
165}
166
167static struct bin_attribute fwlog_attr = {
168 .attr = {.name = "fwlog", .mode = S_IRUSR},
169 .read = wl1271_sysfs_read_fwlog,
170};
171
172int wlcore_sysfs_init(struct wl1271 *wl)
173{
174 int ret;
175
176 /* Create sysfs file to control bt coex state */
177 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
178 if (ret < 0) {
179 wl1271_error("failed to create sysfs file bt_coex_state");
180 goto out;
181 }
182
183 /* Create sysfs file to get HW PG version */
184 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
185 if (ret < 0) {
186 wl1271_error("failed to create sysfs file hw_pg_ver");
187 goto out_bt_coex_state;
188 }
189
190 /* Create sysfs file for the FW log */
191 ret = device_create_bin_file(wl->dev, &fwlog_attr);
192 if (ret < 0) {
193 wl1271_error("failed to create sysfs file fwlog");
194 goto out_hw_pg_ver;
195 }
196
197 goto out;
198
199out_hw_pg_ver:
200 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
201
202out_bt_coex_state:
203 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
204
205out:
206 return ret;
207}
208
209void wlcore_sysfs_free(struct wl1271 *wl)
210{
211 device_remove_bin_file(wl->dev, &fwlog_attr);
212
213 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
214
215 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
216}
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.h b/drivers/net/wireless/ti/wlcore/sysfs.h
new file mode 100644
index 000000000000..c1488921839d
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/sysfs.h
@@ -0,0 +1,28 @@
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2013 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __SYSFS_H__
23#define __SYSFS_H__
24
25int wlcore_sysfs_init(struct wl1271 *wl);
26void wlcore_sysfs_free(struct wl1271 *wl);
27
28#endif
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 004d02e71f01..7e93fe63a2c7 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -386,7 +386,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
386 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || 386 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
387 (cipher == WLAN_CIPHER_SUITE_WEP104); 387 (cipher == WLAN_CIPHER_SUITE_WEP104);
388 388
389 if (unlikely(is_wep && wlvif->default_key != idx)) { 389 if (WARN_ON(is_wep && wlvif->default_key != idx)) {
390 ret = wl1271_set_default_wep_key(wl, wlvif, idx); 390 ret = wl1271_set_default_wep_key(wl, wlvif, idx);
391 if (ret < 0) 391 if (ret < 0)
392 return ret; 392 return ret;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 9d7f1723dd8f..8a4d77ee9c5b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -57,8 +57,12 @@ struct xenvif {
57 57
58 u8 fe_dev_addr[6]; 58 u8 fe_dev_addr[6];
59 59
60 /* Physical parameters of the comms window. */ 60 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
61 unsigned int irq; 61 unsigned int tx_irq;
62 unsigned int rx_irq;
63 /* Only used when feature-split-event-channels = 1 */
64 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
65 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
62 66
63 /* List of frontends to notify after a batch of frames sent. */ 67 /* List of frontends to notify after a batch of frames sent. */
64 struct list_head notify_list; 68 struct list_head notify_list;
@@ -113,13 +117,15 @@ struct xenvif *xenvif_alloc(struct device *parent,
113 unsigned int handle); 117 unsigned int handle);
114 118
115int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 119int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
116 unsigned long rx_ring_ref, unsigned int evtchn); 120 unsigned long rx_ring_ref, unsigned int tx_evtchn,
121 unsigned int rx_evtchn);
117void xenvif_disconnect(struct xenvif *vif); 122void xenvif_disconnect(struct xenvif *vif);
118 123
119void xenvif_get(struct xenvif *vif); 124void xenvif_get(struct xenvif *vif);
120void xenvif_put(struct xenvif *vif); 125void xenvif_put(struct xenvif *vif);
121 126
122int xenvif_xenbus_init(void); 127int xenvif_xenbus_init(void);
128void xenvif_xenbus_fini(void);
123 129
124int xenvif_schedulable(struct xenvif *vif); 130int xenvif_schedulable(struct xenvif *vif);
125 131
@@ -157,4 +163,6 @@ void xenvif_carrier_off(struct xenvif *vif);
157/* Returns number of ring slots required to send an skb to the frontend */ 163/* Returns number of ring slots required to send an skb to the frontend */
158unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); 164unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
159 165
166extern bool separate_tx_rx_irq;
167
160#endif /* __XEN_NETBACK__COMMON_H__ */ 168#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index d98414168485..087d2db0389d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -60,21 +60,39 @@ static int xenvif_rx_schedulable(struct xenvif *vif)
60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); 60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
61} 61}
62 62
63static irqreturn_t xenvif_interrupt(int irq, void *dev_id) 63static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
64{ 64{
65 struct xenvif *vif = dev_id; 65 struct xenvif *vif = dev_id;
66 66
67 if (vif->netbk == NULL) 67 if (vif->netbk == NULL)
68 return IRQ_NONE; 68 return IRQ_HANDLED;
69 69
70 xen_netbk_schedule_xenvif(vif); 70 xen_netbk_schedule_xenvif(vif);
71 71
72 return IRQ_HANDLED;
73}
74
75static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
76{
77 struct xenvif *vif = dev_id;
78
79 if (vif->netbk == NULL)
80 return IRQ_HANDLED;
81
72 if (xenvif_rx_schedulable(vif)) 82 if (xenvif_rx_schedulable(vif))
73 netif_wake_queue(vif->dev); 83 netif_wake_queue(vif->dev);
74 84
75 return IRQ_HANDLED; 85 return IRQ_HANDLED;
76} 86}
77 87
88static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
89{
90 xenvif_tx_interrupt(irq, dev_id);
91 xenvif_rx_interrupt(irq, dev_id);
92
93 return IRQ_HANDLED;
94}
95
78static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 96static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
79{ 97{
80 struct xenvif *vif = netdev_priv(dev); 98 struct xenvif *vif = netdev_priv(dev);
@@ -125,13 +143,17 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
125static void xenvif_up(struct xenvif *vif) 143static void xenvif_up(struct xenvif *vif)
126{ 144{
127 xen_netbk_add_xenvif(vif); 145 xen_netbk_add_xenvif(vif);
128 enable_irq(vif->irq); 146 enable_irq(vif->tx_irq);
147 if (vif->tx_irq != vif->rx_irq)
148 enable_irq(vif->rx_irq);
129 xen_netbk_check_rx_xenvif(vif); 149 xen_netbk_check_rx_xenvif(vif);
130} 150}
131 151
132static void xenvif_down(struct xenvif *vif) 152static void xenvif_down(struct xenvif *vif)
133{ 153{
134 disable_irq(vif->irq); 154 disable_irq(vif->tx_irq);
155 if (vif->tx_irq != vif->rx_irq)
156 disable_irq(vif->rx_irq);
135 del_timer_sync(&vif->credit_timeout); 157 del_timer_sync(&vif->credit_timeout);
136 xen_netbk_deschedule_xenvif(vif); 158 xen_netbk_deschedule_xenvif(vif);
137 xen_netbk_remove_xenvif(vif); 159 xen_netbk_remove_xenvif(vif);
@@ -308,25 +330,52 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
308} 330}
309 331
310int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 332int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
311 unsigned long rx_ring_ref, unsigned int evtchn) 333 unsigned long rx_ring_ref, unsigned int tx_evtchn,
334 unsigned int rx_evtchn)
312{ 335{
313 int err = -ENOMEM; 336 int err = -ENOMEM;
314 337
315 /* Already connected through? */ 338 /* Already connected through? */
316 if (vif->irq) 339 if (vif->tx_irq)
317 return 0; 340 return 0;
318 341
342 __module_get(THIS_MODULE);
343
319 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 344 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
320 if (err < 0) 345 if (err < 0)
321 goto err; 346 goto err;
322 347
323 err = bind_interdomain_evtchn_to_irqhandler( 348 if (tx_evtchn == rx_evtchn) {
324 vif->domid, evtchn, xenvif_interrupt, 0, 349 /* feature-split-event-channels == 0 */
325 vif->dev->name, vif); 350 err = bind_interdomain_evtchn_to_irqhandler(
326 if (err < 0) 351 vif->domid, tx_evtchn, xenvif_interrupt, 0,
327 goto err_unmap; 352 vif->dev->name, vif);
328 vif->irq = err; 353 if (err < 0)
329 disable_irq(vif->irq); 354 goto err_unmap;
355 vif->tx_irq = vif->rx_irq = err;
356 disable_irq(vif->tx_irq);
357 } else {
358 /* feature-split-event-channels == 1 */
359 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
360 "%s-tx", vif->dev->name);
361 err = bind_interdomain_evtchn_to_irqhandler(
362 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
363 vif->tx_irq_name, vif);
364 if (err < 0)
365 goto err_unmap;
366 vif->tx_irq = err;
367 disable_irq(vif->tx_irq);
368
369 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
370 "%s-rx", vif->dev->name);
371 err = bind_interdomain_evtchn_to_irqhandler(
372 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
373 vif->rx_irq_name, vif);
374 if (err < 0)
375 goto err_tx_unbind;
376 vif->rx_irq = err;
377 disable_irq(vif->rx_irq);
378 }
330 379
331 xenvif_get(vif); 380 xenvif_get(vif);
332 381
@@ -340,9 +389,13 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
340 rtnl_unlock(); 389 rtnl_unlock();
341 390
342 return 0; 391 return 0;
392err_tx_unbind:
393 unbind_from_irqhandler(vif->tx_irq, vif);
394 vif->tx_irq = 0;
343err_unmap: 395err_unmap:
344 xen_netbk_unmap_frontend_rings(vif); 396 xen_netbk_unmap_frontend_rings(vif);
345err: 397err:
398 module_put(THIS_MODULE);
346 return err; 399 return err;
347} 400}
348 401
@@ -360,18 +413,37 @@ void xenvif_carrier_off(struct xenvif *vif)
360 413
361void xenvif_disconnect(struct xenvif *vif) 414void xenvif_disconnect(struct xenvif *vif)
362{ 415{
416 /* Disconnect funtion might get called by generic framework
417 * even before vif connects, so we need to check if we really
418 * need to do a module_put.
419 */
420 int need_module_put = 0;
421
363 if (netif_carrier_ok(vif->dev)) 422 if (netif_carrier_ok(vif->dev))
364 xenvif_carrier_off(vif); 423 xenvif_carrier_off(vif);
365 424
366 atomic_dec(&vif->refcnt); 425 atomic_dec(&vif->refcnt);
367 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); 426 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
368 427
369 if (vif->irq) 428 if (vif->tx_irq) {
370 unbind_from_irqhandler(vif->irq, vif); 429 if (vif->tx_irq == vif->rx_irq)
430 unbind_from_irqhandler(vif->tx_irq, vif);
431 else {
432 unbind_from_irqhandler(vif->tx_irq, vif);
433 unbind_from_irqhandler(vif->rx_irq, vif);
434 }
435 /* vif->irq is valid, we had a module_get in
436 * xenvif_connect.
437 */
438 need_module_put = 1;
439 }
371 440
372 unregister_netdev(vif->dev); 441 unregister_netdev(vif->dev);
373 442
374 xen_netbk_unmap_frontend_rings(vif); 443 xen_netbk_unmap_frontend_rings(vif);
375 444
376 free_netdev(vif->dev); 445 free_netdev(vif->dev);
446
447 if (need_module_put)
448 module_put(THIS_MODULE);
377} 449}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 8c20935d72c9..64828de25d9a 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -47,6 +47,13 @@
47#include <asm/xen/hypercall.h> 47#include <asm/xen/hypercall.h>
48#include <asm/xen/page.h> 48#include <asm/xen/page.h>
49 49
50/* Provide an option to disable split event channels at load time as
51 * event channels are limited resource. Split event channels are
52 * enabled by default.
53 */
54bool separate_tx_rx_irq = 1;
55module_param(separate_tx_rx_irq, bool, 0644);
56
50/* 57/*
51 * This is the maximum slots a skb can have. If a guest sends a skb 58 * This is the maximum slots a skb can have. If a guest sends a skb
52 * which exceeds this limit it is considered malicious. 59 * which exceeds this limit it is considered malicious.
@@ -783,7 +790,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
783 } 790 }
784 791
785 list_for_each_entry_safe(vif, tmp, &notify, notify_list) { 792 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
786 notify_remote_via_irq(vif->irq); 793 notify_remote_via_irq(vif->rx_irq);
787 list_del_init(&vif->notify_list); 794 list_del_init(&vif->notify_list);
788 xenvif_put(vif); 795 xenvif_put(vif);
789 } 796 }
@@ -1763,7 +1770,7 @@ static void make_tx_response(struct xenvif *vif,
1763 vif->tx.rsp_prod_pvt = ++i; 1770 vif->tx.rsp_prod_pvt = ++i;
1764 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); 1771 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1765 if (notify) 1772 if (notify)
1766 notify_remote_via_irq(vif->irq); 1773 notify_remote_via_irq(vif->tx_irq);
1767} 1774}
1768 1775
1769static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 1776static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
@@ -1883,9 +1890,8 @@ static int __init netback_init(void)
1883 return -ENODEV; 1890 return -ENODEV;
1884 1891
1885 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { 1892 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1886 printk(KERN_INFO 1893 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1887 "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", 1894 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1888 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1889 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; 1895 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1890 } 1896 }
1891 1897
@@ -1914,7 +1920,7 @@ static int __init netback_init(void)
1914 "netback/%u", group); 1920 "netback/%u", group);
1915 1921
1916 if (IS_ERR(netbk->task)) { 1922 if (IS_ERR(netbk->task)) {
1917 printk(KERN_ALERT "kthread_create() fails at netback\n"); 1923 pr_alert("kthread_create() fails at netback\n");
1918 del_timer(&netbk->net_timer); 1924 del_timer(&netbk->net_timer);
1919 rc = PTR_ERR(netbk->task); 1925 rc = PTR_ERR(netbk->task);
1920 goto failed_init; 1926 goto failed_init;
@@ -1940,10 +1946,6 @@ static int __init netback_init(void)
1940failed_init: 1946failed_init:
1941 while (--group >= 0) { 1947 while (--group >= 0) {
1942 struct xen_netbk *netbk = &xen_netbk[group]; 1948 struct xen_netbk *netbk = &xen_netbk[group];
1943 for (i = 0; i < MAX_PENDING_REQS; i++) {
1944 if (netbk->mmap_pages[i])
1945 __free_page(netbk->mmap_pages[i]);
1946 }
1947 del_timer(&netbk->net_timer); 1949 del_timer(&netbk->net_timer);
1948 kthread_stop(netbk->task); 1950 kthread_stop(netbk->task);
1949 } 1951 }
@@ -1954,5 +1956,25 @@ failed_init:
1954 1956
1955module_init(netback_init); 1957module_init(netback_init);
1956 1958
1959static void __exit netback_fini(void)
1960{
1961 int i, j;
1962
1963 xenvif_xenbus_fini();
1964
1965 for (i = 0; i < xen_netbk_group_nr; i++) {
1966 struct xen_netbk *netbk = &xen_netbk[i];
1967 del_timer_sync(&netbk->net_timer);
1968 kthread_stop(netbk->task);
1969 for (j = 0; j < MAX_PENDING_REQS; j++) {
1970 if (netbk->mmap_pages[j])
1971 __free_page(netbk->mmap_pages[j]);
1972 }
1973 }
1974
1975 vfree(xen_netbk);
1976}
1977module_exit(netback_fini);
1978
1957MODULE_LICENSE("Dual BSD/GPL"); 1979MODULE_LICENSE("Dual BSD/GPL");
1958MODULE_ALIAS("xen-backend:vif"); 1980MODULE_ALIAS("xen-backend:vif");
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 410018c4c528..1fe48fe364ed 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -122,6 +122,16 @@ static int netback_probe(struct xenbus_device *dev,
122 goto fail; 122 goto fail;
123 } 123 }
124 124
125 /*
126 * Split event channels support, this is optional so it is not
127 * put inside the above loop.
128 */
129 err = xenbus_printf(XBT_NIL, dev->nodename,
130 "feature-split-event-channels",
131 "%u", separate_tx_rx_irq);
132 if (err)
133 pr_debug("Error writing feature-split-event-channels\n");
134
125 err = xenbus_switch_state(dev, XenbusStateInitWait); 135 err = xenbus_switch_state(dev, XenbusStateInitWait);
126 if (err) 136 if (err)
127 goto fail; 137 goto fail;
@@ -135,7 +145,7 @@ abort_transaction:
135 xenbus_transaction_end(xbt, 1); 145 xenbus_transaction_end(xbt, 1);
136 xenbus_dev_fatal(dev, err, "%s", message); 146 xenbus_dev_fatal(dev, err, "%s", message);
137fail: 147fail:
138 pr_debug("failed"); 148 pr_debug("failed\n");
139 netback_remove(dev); 149 netback_remove(dev);
140 return err; 150 return err;
141} 151}
@@ -218,15 +228,14 @@ static void frontend_changed(struct xenbus_device *dev,
218{ 228{
219 struct backend_info *be = dev_get_drvdata(&dev->dev); 229 struct backend_info *be = dev_get_drvdata(&dev->dev);
220 230
221 pr_debug("frontend state %s", xenbus_strstate(frontend_state)); 231 pr_debug("frontend state %s\n", xenbus_strstate(frontend_state));
222 232
223 be->frontend_state = frontend_state; 233 be->frontend_state = frontend_state;
224 234
225 switch (frontend_state) { 235 switch (frontend_state) {
226 case XenbusStateInitialising: 236 case XenbusStateInitialising:
227 if (dev->state == XenbusStateClosed) { 237 if (dev->state == XenbusStateClosed) {
228 printk(KERN_INFO "%s: %s: prepare for reconnect\n", 238 pr_info("%s: prepare for reconnect\n", dev->nodename);
229 __func__, dev->nodename);
230 xenbus_switch_state(dev, XenbusStateInitWait); 239 xenbus_switch_state(dev, XenbusStateInitWait);
231 } 240 }
232 break; 241 break;
@@ -393,21 +402,36 @@ static int connect_rings(struct backend_info *be)
393 struct xenvif *vif = be->vif; 402 struct xenvif *vif = be->vif;
394 struct xenbus_device *dev = be->dev; 403 struct xenbus_device *dev = be->dev;
395 unsigned long tx_ring_ref, rx_ring_ref; 404 unsigned long tx_ring_ref, rx_ring_ref;
396 unsigned int evtchn, rx_copy; 405 unsigned int tx_evtchn, rx_evtchn, rx_copy;
397 int err; 406 int err;
398 int val; 407 int val;
399 408
400 err = xenbus_gather(XBT_NIL, dev->otherend, 409 err = xenbus_gather(XBT_NIL, dev->otherend,
401 "tx-ring-ref", "%lu", &tx_ring_ref, 410 "tx-ring-ref", "%lu", &tx_ring_ref,
402 "rx-ring-ref", "%lu", &rx_ring_ref, 411 "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
403 "event-channel", "%u", &evtchn, NULL);
404 if (err) { 412 if (err) {
405 xenbus_dev_fatal(dev, err, 413 xenbus_dev_fatal(dev, err,
406 "reading %s/ring-ref and event-channel", 414 "reading %s/ring-ref",
407 dev->otherend); 415 dev->otherend);
408 return err; 416 return err;
409 } 417 }
410 418
419 /* Try split event channels first, then single event channel. */
420 err = xenbus_gather(XBT_NIL, dev->otherend,
421 "event-channel-tx", "%u", &tx_evtchn,
422 "event-channel-rx", "%u", &rx_evtchn, NULL);
423 if (err < 0) {
424 err = xenbus_scanf(XBT_NIL, dev->otherend,
425 "event-channel", "%u", &tx_evtchn);
426 if (err < 0) {
427 xenbus_dev_fatal(dev, err,
428 "reading %s/event-channel(-tx/rx)",
429 dev->otherend);
430 return err;
431 }
432 rx_evtchn = tx_evtchn;
433 }
434
411 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 435 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
412 &rx_copy); 436 &rx_copy);
413 if (err == -ENOENT) { 437 if (err == -ENOENT) {
@@ -454,11 +478,13 @@ static int connect_rings(struct backend_info *be)
454 vif->csum = !val; 478 vif->csum = !val;
455 479
456 /* Map the shared frame, irq etc. */ 480 /* Map the shared frame, irq etc. */
457 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn); 481 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
482 tx_evtchn, rx_evtchn);
458 if (err) { 483 if (err) {
459 xenbus_dev_fatal(dev, err, 484 xenbus_dev_fatal(dev, err,
460 "mapping shared-frames %lu/%lu port %u", 485 "mapping shared-frames %lu/%lu port tx %u rx %u",
461 tx_ring_ref, rx_ring_ref, evtchn); 486 tx_ring_ref, rx_ring_ref,
487 tx_evtchn, rx_evtchn);
462 return err; 488 return err;
463 } 489 }
464 return 0; 490 return 0;
@@ -485,3 +511,8 @@ int xenvif_xenbus_init(void)
485{ 511{
486 return xenbus_register_backend(&netback_driver); 512 return xenbus_register_backend(&netback_driver);
487} 513}
514
515void xenvif_xenbus_fini(void)
516{
517 return xenbus_unregister_driver(&netback_driver);
518}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1db101415069..ff7f111fffee 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -29,6 +29,8 @@
29 * IN THE SOFTWARE. 29 * IN THE SOFTWARE.
30 */ 30 */
31 31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
32#include <linux/module.h> 34#include <linux/module.h>
33#include <linux/kernel.h> 35#include <linux/kernel.h>
34#include <linux/netdevice.h> 36#include <linux/netdevice.h>
@@ -85,7 +87,15 @@ struct netfront_info {
85 87
86 struct napi_struct napi; 88 struct napi_struct napi;
87 89
88 unsigned int evtchn; 90 /* Split event channels support, tx_* == rx_* when using
91 * single event channel.
92 */
93 unsigned int tx_evtchn, rx_evtchn;
94 unsigned int tx_irq, rx_irq;
95 /* Only used when split event channels support is enabled */
96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
98
89 struct xenbus_device *xbdev; 99 struct xenbus_device *xbdev;
90 100
91 spinlock_t tx_lock; 101 spinlock_t tx_lock;
@@ -330,7 +340,7 @@ no_skb:
330 push: 340 push:
331 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 341 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
332 if (notify) 342 if (notify)
333 notify_remote_via_irq(np->netdev->irq); 343 notify_remote_via_irq(np->rx_irq);
334} 344}
335 345
336static int xennet_open(struct net_device *dev) 346static int xennet_open(struct net_device *dev)
@@ -377,9 +387,8 @@ static void xennet_tx_buf_gc(struct net_device *dev)
377 skb = np->tx_skbs[id].skb; 387 skb = np->tx_skbs[id].skb;
378 if (unlikely(gnttab_query_foreign_access( 388 if (unlikely(gnttab_query_foreign_access(
379 np->grant_tx_ref[id]) != 0)) { 389 np->grant_tx_ref[id]) != 0)) {
380 printk(KERN_ALERT "xennet_tx_buf_gc: warning " 390 pr_alert("%s: warning -- grant still in use by backend domain\n",
381 "-- grant still in use by backend " 391 __func__);
382 "domain.\n");
383 BUG(); 392 BUG();
384 } 393 }
385 gnttab_end_foreign_access_ref( 394 gnttab_end_foreign_access_ref(
@@ -623,7 +632,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
623 632
624 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 633 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
625 if (notify) 634 if (notify)
626 notify_remote_via_irq(np->netdev->irq); 635 notify_remote_via_irq(np->tx_irq);
627 636
628 u64_stats_update_begin(&stats->syncp); 637 u64_stats_update_begin(&stats->syncp);
629 stats->tx_bytes += skb->len; 638 stats->tx_bytes += skb->len;
@@ -796,14 +805,14 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
796{ 805{
797 if (!gso->u.gso.size) { 806 if (!gso->u.gso.size) {
798 if (net_ratelimit()) 807 if (net_ratelimit())
799 printk(KERN_WARNING "GSO size must not be zero.\n"); 808 pr_warn("GSO size must not be zero\n");
800 return -EINVAL; 809 return -EINVAL;
801 } 810 }
802 811
803 /* Currently only TCPv4 S.O. is supported. */ 812 /* Currently only TCPv4 S.O. is supported. */
804 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 813 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
805 if (net_ratelimit()) 814 if (net_ratelimit())
806 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); 815 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
807 return -EINVAL; 816 return -EINVAL;
808 } 817 }
809 818
@@ -850,7 +859,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
850static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 859static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
851{ 860{
852 struct iphdr *iph; 861 struct iphdr *iph;
853 unsigned char *th;
854 int err = -EPROTO; 862 int err = -EPROTO;
855 int recalculate_partial_csum = 0; 863 int recalculate_partial_csum = 0;
856 864
@@ -875,27 +883,27 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
875 goto out; 883 goto out;
876 884
877 iph = (void *)skb->data; 885 iph = (void *)skb->data;
878 th = skb->data + 4 * iph->ihl;
879 if (th >= skb_tail_pointer(skb))
880 goto out;
881 886
882 skb->csum_start = th - skb->head;
883 switch (iph->protocol) { 887 switch (iph->protocol) {
884 case IPPROTO_TCP: 888 case IPPROTO_TCP:
885 skb->csum_offset = offsetof(struct tcphdr, check); 889 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
890 offsetof(struct tcphdr, check)))
891 goto out;
886 892
887 if (recalculate_partial_csum) { 893 if (recalculate_partial_csum) {
888 struct tcphdr *tcph = (struct tcphdr *)th; 894 struct tcphdr *tcph = tcp_hdr(skb);
889 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 895 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
890 skb->len - iph->ihl*4, 896 skb->len - iph->ihl*4,
891 IPPROTO_TCP, 0); 897 IPPROTO_TCP, 0);
892 } 898 }
893 break; 899 break;
894 case IPPROTO_UDP: 900 case IPPROTO_UDP:
895 skb->csum_offset = offsetof(struct udphdr, check); 901 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
902 offsetof(struct udphdr, check)))
903 goto out;
896 904
897 if (recalculate_partial_csum) { 905 if (recalculate_partial_csum) {
898 struct udphdr *udph = (struct udphdr *)th; 906 struct udphdr *udph = udp_hdr(skb);
899 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 907 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
900 skb->len - iph->ihl*4, 908 skb->len - iph->ihl*4,
901 IPPROTO_UDP, 0); 909 IPPROTO_UDP, 0);
@@ -903,15 +911,11 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
903 break; 911 break;
904 default: 912 default:
905 if (net_ratelimit()) 913 if (net_ratelimit())
906 printk(KERN_ERR "Attempting to checksum a non-" 914 pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
907 "TCP/UDP packet, dropping a protocol" 915 iph->protocol);
908 " %d packet", iph->protocol);
909 goto out; 916 goto out;
910 } 917 }
911 918
912 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
913 goto out;
914
915 err = 0; 919 err = 0;
916 920
917out: 921out:
@@ -1254,23 +1258,35 @@ static int xennet_set_features(struct net_device *dev,
1254 return 0; 1258 return 0;
1255} 1259}
1256 1260
1257static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1261static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1258{ 1262{
1259 struct net_device *dev = dev_id; 1263 struct netfront_info *np = dev_id;
1260 struct netfront_info *np = netdev_priv(dev); 1264 struct net_device *dev = np->netdev;
1261 unsigned long flags; 1265 unsigned long flags;
1262 1266
1263 spin_lock_irqsave(&np->tx_lock, flags); 1267 spin_lock_irqsave(&np->tx_lock, flags);
1268 xennet_tx_buf_gc(dev);
1269 spin_unlock_irqrestore(&np->tx_lock, flags);
1264 1270
1265 if (likely(netif_carrier_ok(dev))) { 1271 return IRQ_HANDLED;
1266 xennet_tx_buf_gc(dev); 1272}
1267 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1273
1268 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1274static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1275{
1276 struct netfront_info *np = dev_id;
1277 struct net_device *dev = np->netdev;
1278
1279 if (likely(netif_carrier_ok(dev) &&
1280 RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1269 napi_schedule(&np->napi); 1281 napi_schedule(&np->napi);
1270 }
1271 1282
1272 spin_unlock_irqrestore(&np->tx_lock, flags); 1283 return IRQ_HANDLED;
1284}
1273 1285
1286static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1287{
1288 xennet_tx_interrupt(irq, dev_id);
1289 xennet_rx_interrupt(irq, dev_id);
1274 return IRQ_HANDLED; 1290 return IRQ_HANDLED;
1275} 1291}
1276 1292
@@ -1343,14 +1359,14 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1343 /* A grant for every tx ring slot */ 1359 /* A grant for every tx ring slot */
1344 if (gnttab_alloc_grant_references(TX_MAX_TARGET, 1360 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1345 &np->gref_tx_head) < 0) { 1361 &np->gref_tx_head) < 0) {
1346 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 1362 pr_alert("can't alloc tx grant refs\n");
1347 err = -ENOMEM; 1363 err = -ENOMEM;
1348 goto exit_free_stats; 1364 goto exit_free_stats;
1349 } 1365 }
1350 /* A grant for every rx ring slot */ 1366 /* A grant for every rx ring slot */
1351 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1367 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1352 &np->gref_rx_head) < 0) { 1368 &np->gref_rx_head) < 0) {
1353 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); 1369 pr_alert("can't alloc rx grant refs\n");
1354 err = -ENOMEM; 1370 err = -ENOMEM;
1355 goto exit_free_tx; 1371 goto exit_free_tx;
1356 } 1372 }
@@ -1414,16 +1430,14 @@ static int netfront_probe(struct xenbus_device *dev,
1414 1430
1415 err = register_netdev(info->netdev); 1431 err = register_netdev(info->netdev);
1416 if (err) { 1432 if (err) {
1417 printk(KERN_WARNING "%s: register_netdev err=%d\n", 1433 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1418 __func__, err);
1419 goto fail; 1434 goto fail;
1420 } 1435 }
1421 1436
1422 err = xennet_sysfs_addif(info->netdev); 1437 err = xennet_sysfs_addif(info->netdev);
1423 if (err) { 1438 if (err) {
1424 unregister_netdev(info->netdev); 1439 unregister_netdev(info->netdev);
1425 printk(KERN_WARNING "%s: add sysfs failed err=%d\n", 1440 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1426 __func__, err);
1427 goto fail; 1441 goto fail;
1428 } 1442 }
1429 1443
@@ -1451,9 +1465,14 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1451 spin_unlock_irq(&info->tx_lock); 1465 spin_unlock_irq(&info->tx_lock);
1452 spin_unlock_bh(&info->rx_lock); 1466 spin_unlock_bh(&info->rx_lock);
1453 1467
1454 if (info->netdev->irq) 1468 if (info->tx_irq && (info->tx_irq == info->rx_irq))
1455 unbind_from_irqhandler(info->netdev->irq, info->netdev); 1469 unbind_from_irqhandler(info->tx_irq, info);
1456 info->evtchn = info->netdev->irq = 0; 1470 if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1471 unbind_from_irqhandler(info->tx_irq, info);
1472 unbind_from_irqhandler(info->rx_irq, info);
1473 }
1474 info->tx_evtchn = info->rx_evtchn = 0;
1475 info->tx_irq = info->rx_irq = 0;
1457 1476
1458 /* End access and free the pages */ 1477 /* End access and free the pages */
1459 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1478 xennet_end_access(info->tx_ring_ref, info->tx.sring);
@@ -1503,12 +1522,82 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1503 return 0; 1522 return 0;
1504} 1523}
1505 1524
1525static int setup_netfront_single(struct netfront_info *info)
1526{
1527 int err;
1528
1529 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1530 if (err < 0)
1531 goto fail;
1532
1533 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1534 xennet_interrupt,
1535 0, info->netdev->name, info);
1536 if (err < 0)
1537 goto bind_fail;
1538 info->rx_evtchn = info->tx_evtchn;
1539 info->rx_irq = info->tx_irq = err;
1540
1541 return 0;
1542
1543bind_fail:
1544 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1545 info->tx_evtchn = 0;
1546fail:
1547 return err;
1548}
1549
1550static int setup_netfront_split(struct netfront_info *info)
1551{
1552 int err;
1553
1554 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1555 if (err < 0)
1556 goto fail;
1557 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1558 if (err < 0)
1559 goto alloc_rx_evtchn_fail;
1560
1561 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1562 "%s-tx", info->netdev->name);
1563 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1564 xennet_tx_interrupt,
1565 0, info->tx_irq_name, info);
1566 if (err < 0)
1567 goto bind_tx_fail;
1568 info->tx_irq = err;
1569
1570 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1571 "%s-rx", info->netdev->name);
1572 err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1573 xennet_rx_interrupt,
1574 0, info->rx_irq_name, info);
1575 if (err < 0)
1576 goto bind_rx_fail;
1577 info->rx_irq = err;
1578
1579 return 0;
1580
1581bind_rx_fail:
1582 unbind_from_irqhandler(info->tx_irq, info);
1583 info->tx_irq = 0;
1584bind_tx_fail:
1585 xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1586 info->rx_evtchn = 0;
1587alloc_rx_evtchn_fail:
1588 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1589 info->tx_evtchn = 0;
1590fail:
1591 return err;
1592}
1593
1506static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1594static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1507{ 1595{
1508 struct xen_netif_tx_sring *txs; 1596 struct xen_netif_tx_sring *txs;
1509 struct xen_netif_rx_sring *rxs; 1597 struct xen_netif_rx_sring *rxs;
1510 int err; 1598 int err;
1511 struct net_device *netdev = info->netdev; 1599 struct net_device *netdev = info->netdev;
1600 unsigned int feature_split_evtchn;
1512 1601
1513 info->tx_ring_ref = GRANT_INVALID_REF; 1602 info->tx_ring_ref = GRANT_INVALID_REF;
1514 info->rx_ring_ref = GRANT_INVALID_REF; 1603 info->rx_ring_ref = GRANT_INVALID_REF;
@@ -1516,6 +1605,12 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1516 info->tx.sring = NULL; 1605 info->tx.sring = NULL;
1517 netdev->irq = 0; 1606 netdev->irq = 0;
1518 1607
1608 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1609 "feature-split-event-channels", "%u",
1610 &feature_split_evtchn);
1611 if (err < 0)
1612 feature_split_evtchn = 0;
1613
1519 err = xen_net_read_mac(dev, netdev->dev_addr); 1614 err = xen_net_read_mac(dev, netdev->dev_addr);
1520 if (err) { 1615 if (err) {
1521 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1616 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
@@ -1532,40 +1627,50 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1532 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1627 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1533 1628
1534 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1629 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1535 if (err < 0) { 1630 if (err < 0)
1536 free_page((unsigned long)txs); 1631 goto grant_tx_ring_fail;
1537 goto fail;
1538 }
1539 1632
1540 info->tx_ring_ref = err; 1633 info->tx_ring_ref = err;
1541 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1634 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1542 if (!rxs) { 1635 if (!rxs) {
1543 err = -ENOMEM; 1636 err = -ENOMEM;
1544 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1637 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1545 goto fail; 1638 goto alloc_rx_ring_fail;
1546 } 1639 }
1547 SHARED_RING_INIT(rxs); 1640 SHARED_RING_INIT(rxs);
1548 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1641 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1549 1642
1550 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1643 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1551 if (err < 0) { 1644 if (err < 0)
1552 free_page((unsigned long)rxs); 1645 goto grant_rx_ring_fail;
1553 goto fail;
1554 }
1555 info->rx_ring_ref = err; 1646 info->rx_ring_ref = err;
1556 1647
1557 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1648 if (feature_split_evtchn)
1649 err = setup_netfront_split(info);
1650 /* setup single event channel if
1651 * a) feature-split-event-channels == 0
1652 * b) feature-split-event-channels == 1 but failed to setup
1653 */
1654 if (!feature_split_evtchn || (feature_split_evtchn && err))
1655 err = setup_netfront_single(info);
1656
1558 if (err) 1657 if (err)
1559 goto fail; 1658 goto alloc_evtchn_fail;
1560 1659
1561 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1562 0, netdev->name, netdev);
1563 if (err < 0)
1564 goto fail;
1565 netdev->irq = err;
1566 return 0; 1660 return 0;
1567 1661
1568 fail: 1662 /* If we fail to setup netfront, it is safe to just revoke access to
1663 * granted pages because backend is not accessing it at this point.
1664 */
1665alloc_evtchn_fail:
1666 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1667grant_rx_ring_fail:
1668 free_page((unsigned long)rxs);
1669alloc_rx_ring_fail:
1670 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1671grant_tx_ring_fail:
1672 free_page((unsigned long)txs);
1673fail:
1569 return err; 1674 return err;
1570} 1675}
1571 1676
@@ -1601,11 +1706,27 @@ again:
1601 message = "writing rx ring-ref"; 1706 message = "writing rx ring-ref";
1602 goto abort_transaction; 1707 goto abort_transaction;
1603 } 1708 }
1604 err = xenbus_printf(xbt, dev->nodename, 1709
1605 "event-channel", "%u", info->evtchn); 1710 if (info->tx_evtchn == info->rx_evtchn) {
1606 if (err) { 1711 err = xenbus_printf(xbt, dev->nodename,
1607 message = "writing event-channel"; 1712 "event-channel", "%u", info->tx_evtchn);
1608 goto abort_transaction; 1713 if (err) {
1714 message = "writing event-channel";
1715 goto abort_transaction;
1716 }
1717 } else {
1718 err = xenbus_printf(xbt, dev->nodename,
1719 "event-channel-tx", "%u", info->tx_evtchn);
1720 if (err) {
1721 message = "writing event-channel-tx";
1722 goto abort_transaction;
1723 }
1724 err = xenbus_printf(xbt, dev->nodename,
1725 "event-channel-rx", "%u", info->rx_evtchn);
1726 if (err) {
1727 message = "writing event-channel-rx";
1728 goto abort_transaction;
1729 }
1609 } 1730 }
1610 1731
1611 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1732 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
@@ -1718,7 +1839,9 @@ static int xennet_connect(struct net_device *dev)
1718 * packets. 1839 * packets.
1719 */ 1840 */
1720 netif_carrier_on(np->netdev); 1841 netif_carrier_on(np->netdev);
1721 notify_remote_via_irq(np->netdev->irq); 1842 notify_remote_via_irq(np->tx_irq);
1843 if (np->tx_irq != np->rx_irq)
1844 notify_remote_via_irq(np->rx_irq);
1722 xennet_tx_buf_gc(dev); 1845 xennet_tx_buf_gc(dev);
1723 xennet_alloc_rx_buffers(dev); 1846 xennet_alloc_rx_buffers(dev);
1724 1847
@@ -1991,7 +2114,7 @@ static int __init netif_init(void)
1991 if (xen_hvm_domain() && !xen_platform_pci_unplug) 2114 if (xen_hvm_domain() && !xen_platform_pci_unplug)
1992 return -ENODEV; 2115 return -ENODEV;
1993 2116
1994 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); 2117 pr_info("Initialising Xen virtual ethernet driver\n");
1995 2118
1996 return xenbus_register_frontend(&netfront_driver); 2119 return xenbus_register_frontend(&netfront_driver);
1997} 2120}