aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/firestream.h1
-rw-r--r--drivers/bcma/host_pci.c8
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/btmrvl_drv.h12
-rw-r--r--drivers/bluetooth/btmrvl_main.c269
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h2
-rw-r--r--drivers/bluetooth/hci_vhci.c170
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/um_idi.c2
-rw-r--r--drivers/isdn/sc/init.c2
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c199
-rw-r--r--drivers/net/bonding/bond_alb.c150
-rw-r--r--drivers/net/bonding/bond_alb.h4
-rw-r--r--drivers/net/bonding/bond_main.c656
-rw-r--r--drivers/net/bonding/bond_netlink.c131
-rw-r--r--drivers/net/bonding/bond_options.c142
-rw-r--r--drivers/net/bonding/bond_procfs.c21
-rw-r--r--drivers/net/bonding/bond_sysfs.c215
-rw-r--r--drivers/net/bonding/bonding.h110
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c4
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/mscan.h6
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/softing/softing.h24
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/ethernet/3com/Kconfig4
-rw-r--r--drivers/net/ethernet/3com/typhoon.c1
-rw-r--r--drivers/net/ethernet/8390/8390.h40
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h2
-rw-r--r--drivers/net/ethernet/amd/7990.h12
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/declance.c16
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c3
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c80
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c149
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h7
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c142
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c1
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h53
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c25
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h333
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c227
-rw-r--r--drivers/net/ethernet/fealnx.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c87
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h26
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig2
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h14
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h18
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h14
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h18
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/icplus/ipg.c1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h32
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c338
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c480
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c433
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h38
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h74
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c90
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c24
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h22
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h22
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h186
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c103
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c55
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h34
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c111
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/jme.c6
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h56
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c1
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h9
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h101
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c47
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c184
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h109
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c47
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h60
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c127
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c319
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.h105
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c5
-rw-r--r--drivers/net/ethernet/sfc/io.h5
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h120
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h26
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h7
-rw-r--r--drivers/net/ethernet/sfc/nic.c73
-rw-r--r--drivers/net/ethernet/sfc/nic.h256
-rw-r--r--drivers/net/ethernet/sfc/phy.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c90
-rw-r--r--drivers/net/ethernet/sfc/selftest.h15
-rw-r--r--drivers/net/ethernet/sfc/tx.c419
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c161
-rw-r--r--drivers/net/ethernet/ti/cpsw.c159
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.h9
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h29
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c41
-rw-r--r--drivers/net/fddi/skfp/fplustm.c2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h28
-rw-r--r--drivers/net/fddi/skfp/skfddi.c6
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/irda/bfin_sir.c4
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/sir-dev.h29
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/phy/at803x.c57
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/usb/catc.c8
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c57
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wan/x25_asy.h2
-rw-r--r--drivers/net/wan/z85230.h27
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h27
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h117
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c382
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h120
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c70
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h35
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c144
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c241
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c314
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c285
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c244
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c446
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h73
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c67
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c236
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h71
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c92
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c145
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h218
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c91
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c446
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c456
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c126
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h108
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c107
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c167
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c195
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c14
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c181
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h49
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c805
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4657
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1036
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c62
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.h33
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2126
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h127
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h160
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h238
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/atmel.c92
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c234
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h92
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.h22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.h14
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h20
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h38
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h110
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h219
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h371
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h145
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.h48
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h16
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h44
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h87
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h82
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h66
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h9
-rw-r--r--drivers/net/wireless/libertas/if_spi.c2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c4
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h24
-rw-r--r--drivers/net/wireless/mwl8k.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h31
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h44
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c222
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c18
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c29
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h10
-rw-r--r--drivers/net/wireless/rtlwifi/core.c10
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c18
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h29
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.h52
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h52
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c187
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.h49
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.h18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c29
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c6
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c95
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h33
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c158
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h11
-rw-r--r--drivers/net/xen-netback/common.h12
-rw-r--r--drivers/net/xen-netback/interface.c16
-rw-r--r--drivers/net/xen-netback/netback.c294
-rw-r--r--drivers/net/xen-netback/xenbus.c52
-rw-r--r--drivers/net/xen-netfront.c4
505 files changed, 21096 insertions, 8720 deletions
diff --git a/drivers/atm/firestream.h b/drivers/atm/firestream.h
index 49e783e35ee9..364eded31881 100644
--- a/drivers/atm/firestream.h
+++ b/drivers/atm/firestream.h
@@ -420,7 +420,6 @@ struct fs_transmit_config {
420#define RC_FLAGS_BFPS_BFP27 (0xd << 17) 420#define RC_FLAGS_BFPS_BFP27 (0xd << 17)
421#define RC_FLAGS_BFPS_BFP47 (0xe << 17) 421#define RC_FLAGS_BFPS_BFP47 (0xe << 17)
422 422
423#define RC_FLAGS_BFPS (0x1 << 17)
424#define RC_FLAGS_BFPP (0x1 << 21) 423#define RC_FLAGS_BFPP (0x1 << 21)
425#define RC_FLAGS_TEVC (0x1 << 22) 424#define RC_FLAGS_TEVC (0x1 << 22)
426#define RC_FLAGS_TEP (0x1 << 23) 425#define RC_FLAGS_TEP (0x1 << 23)
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index a355e63a3838..6fb98b53533f 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -188,8 +188,11 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
188 pci_write_config_dword(dev, 0x40, val & 0xffff00ff); 188 pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
189 189
190 /* SSB needed additional powering up, do we have any AMBA PCI cards? */ 190 /* SSB needed additional powering up, do we have any AMBA PCI cards? */
191 if (!pci_is_pcie(dev)) 191 if (!pci_is_pcie(dev)) {
192 bcma_err(bus, "PCI card detected, report problems.\n"); 192 bcma_err(bus, "PCI card detected, they are not supported.\n");
193 err = -ENXIO;
194 goto err_pci_release_regions;
195 }
193 196
194 /* Map MMIO */ 197 /* Map MMIO */
195 err = -ENOMEM; 198 err = -ENOMEM;
@@ -269,6 +272,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
269 272
270static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { 273static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
271 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, 274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, 276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 277 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 278 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 4afae20df512..9fe8a875a827 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -30,3 +30,5 @@ hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
30hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o 30hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
31hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o 31hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
32hci_uart-objs := $(hci_uart-y) 32hci_uart-objs := $(hci_uart-y)
33
34ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 27068d149380..f9d183387f45 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -23,6 +23,8 @@
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <net/bluetooth/bluetooth.h> 25#include <net/bluetooth/bluetooth.h>
26#include <linux/ctype.h>
27#include <linux/firmware.h>
26 28
27#define BTM_HEADER_LEN 4 29#define BTM_HEADER_LEN 4
28#define BTM_UPLD_SIZE 2312 30#define BTM_UPLD_SIZE 2312
@@ -41,6 +43,8 @@ struct btmrvl_thread {
41struct btmrvl_device { 43struct btmrvl_device {
42 void *card; 44 void *card;
43 struct hci_dev *hcidev; 45 struct hci_dev *hcidev;
46 struct device *dev;
47 const char *cal_data;
44 48
45 u8 dev_type; 49 u8 dev_type;
46 50
@@ -91,6 +95,7 @@ struct btmrvl_private {
91#define BT_CMD_HOST_SLEEP_CONFIG 0x59 95#define BT_CMD_HOST_SLEEP_CONFIG 0x59
92#define BT_CMD_HOST_SLEEP_ENABLE 0x5A 96#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
93#define BT_CMD_MODULE_CFG_REQ 0x5B 97#define BT_CMD_MODULE_CFG_REQ 0x5B
98#define BT_CMD_LOAD_CONFIG_DATA 0x61
94 99
95/* Sub-commands: Module Bringup/Shutdown Request/Response */ 100/* Sub-commands: Module Bringup/Shutdown Request/Response */
96#define MODULE_BRINGUP_REQ 0xF1 101#define MODULE_BRINGUP_REQ 0xF1
@@ -116,11 +121,8 @@ struct btmrvl_private {
116#define PS_SLEEP 0x01 121#define PS_SLEEP 0x01
117#define PS_AWAKE 0x00 122#define PS_AWAKE 0x00
118 123
119struct btmrvl_cmd { 124#define BT_CMD_DATA_SIZE 32
120 __le16 ocf_ogf; 125#define BT_CAL_DATA_SIZE 28
121 u8 length;
122 u8 data[4];
123} __packed;
124 126
125struct btmrvl_event { 127struct btmrvl_event {
126 u8 ec; /* event counter */ 128 u8 ec; /* event counter */
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 9a9f51875df5..6e7bd4e4adbb 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -57,8 +57,7 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
57 ocf = hci_opcode_ocf(opcode); 57 ocf = hci_opcode_ocf(opcode);
58 ogf = hci_opcode_ogf(opcode); 58 ogf = hci_opcode_ogf(opcode);
59 59
60 if (ocf == BT_CMD_MODULE_CFG_REQ && 60 if (priv->btmrvl_dev.sendcmdflag) {
61 priv->btmrvl_dev.sendcmdflag) {
62 priv->btmrvl_dev.sendcmdflag = false; 61 priv->btmrvl_dev.sendcmdflag = false;
63 priv->adapter->cmd_complete = true; 62 priv->adapter->cmd_complete = true;
64 wake_up_interruptible(&priv->adapter->cmd_wait_q); 63 wake_up_interruptible(&priv->adapter->cmd_wait_q);
@@ -116,7 +115,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
116 adapter->hs_state = HS_ACTIVATED; 115 adapter->hs_state = HS_ACTIVATED;
117 if (adapter->psmode) 116 if (adapter->psmode)
118 adapter->ps_state = PS_SLEEP; 117 adapter->ps_state = PS_SLEEP;
119 wake_up_interruptible(&adapter->cmd_wait_q);
120 BT_DBG("HS ACTIVATED!"); 118 BT_DBG("HS ACTIVATED!");
121 } else { 119 } else {
122 BT_DBG("HS Enable failed"); 120 BT_DBG("HS Enable failed");
@@ -168,22 +166,24 @@ exit:
168} 166}
169EXPORT_SYMBOL_GPL(btmrvl_process_event); 167EXPORT_SYMBOL_GPL(btmrvl_process_event);
170 168
171int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd) 169static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
170 const void *param, u8 len)
172{ 171{
173 struct sk_buff *skb; 172 struct sk_buff *skb;
174 struct btmrvl_cmd *cmd; 173 struct hci_command_hdr *hdr;
175 int ret = 0;
176 174
177 skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); 175 skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
178 if (skb == NULL) { 176 if (skb == NULL) {
179 BT_ERR("No free skb"); 177 BT_ERR("No free skb");
180 return -ENOMEM; 178 return -ENOMEM;
181 } 179 }
182 180
183 cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); 181 hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
184 cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ)); 182 hdr->opcode = cpu_to_le16(hci_opcode_pack(OGF, cmd_no));
185 cmd->length = 1; 183 hdr->plen = len;
186 cmd->data[0] = subcmd; 184
185 if (len)
186 memcpy(skb_put(skb, len), param, len);
187 187
188 bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; 188 bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
189 189
@@ -194,19 +194,23 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
194 194
195 priv->adapter->cmd_complete = false; 195 priv->adapter->cmd_complete = false;
196 196
197 BT_DBG("Queue module cfg Command");
198
199 wake_up_interruptible(&priv->main_thread.wait_q); 197 wake_up_interruptible(&priv->main_thread.wait_q);
200 198
201 if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, 199 if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
202 priv->adapter->cmd_complete, 200 priv->adapter->cmd_complete,
203 msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) { 201 msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
204 ret = -ETIMEDOUT; 202 return -ETIMEDOUT;
205 BT_ERR("module_cfg_cmd(%x): timeout: %d",
206 subcmd, priv->btmrvl_dev.sendcmdflag);
207 }
208 203
209 BT_DBG("module cfg Command done"); 204 return 0;
205}
206
207int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
208{
209 int ret;
210
211 ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1);
212 if (ret)
213 BT_ERR("module_cfg_cmd(%x) failed\n", subcmd);
210 214
211 return ret; 215 return ret;
212} 216}
@@ -214,61 +218,36 @@ EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
214 218
215int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv) 219int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
216{ 220{
217 struct sk_buff *skb; 221 int ret;
218 struct btmrvl_cmd *cmd; 222 u8 param[2];
219
220 skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
221 if (!skb) {
222 BT_ERR("No free skb");
223 return -ENOMEM;
224 }
225
226 cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
227 cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
228 BT_CMD_HOST_SLEEP_CONFIG));
229 cmd->length = 2;
230 cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
231 cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
232 223
233 bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; 224 param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
225 param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
234 226
235 skb->dev = (void *) priv->btmrvl_dev.hcidev; 227 BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x",
236 skb_queue_head(&priv->adapter->tx_queue, skb); 228 param[0], param[1]);
237 229
238 BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0], 230 ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2);
239 cmd->data[1]); 231 if (ret)
232 BT_ERR("HSCFG command failed\n");
240 233
241 return 0; 234 return ret;
242} 235}
243EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd); 236EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
244 237
245int btmrvl_enable_ps(struct btmrvl_private *priv) 238int btmrvl_enable_ps(struct btmrvl_private *priv)
246{ 239{
247 struct sk_buff *skb; 240 int ret;
248 struct btmrvl_cmd *cmd; 241 u8 param;
249
250 skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
251 if (skb == NULL) {
252 BT_ERR("No free skb");
253 return -ENOMEM;
254 }
255
256 cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
257 cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
258 BT_CMD_AUTO_SLEEP_MODE));
259 cmd->length = 1;
260 242
261 if (priv->btmrvl_dev.psmode) 243 if (priv->btmrvl_dev.psmode)
262 cmd->data[0] = BT_PS_ENABLE; 244 param = BT_PS_ENABLE;
263 else 245 else
264 cmd->data[0] = BT_PS_DISABLE; 246 param = BT_PS_DISABLE;
265
266 bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
267 247
268 skb->dev = (void *) priv->btmrvl_dev.hcidev; 248 ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1);
269 skb_queue_head(&priv->adapter->tx_queue, skb); 249 if (ret)
270 250 BT_ERR("PSMODE command failed\n");
271 BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
272 251
273 return 0; 252 return 0;
274} 253}
@@ -276,37 +255,11 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
276 255
277int btmrvl_enable_hs(struct btmrvl_private *priv) 256int btmrvl_enable_hs(struct btmrvl_private *priv)
278{ 257{
279 struct sk_buff *skb; 258 int ret;
280 struct btmrvl_cmd *cmd;
281 int ret = 0;
282
283 skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
284 if (skb == NULL) {
285 BT_ERR("No free skb");
286 return -ENOMEM;
287 }
288
289 cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
290 cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE));
291 cmd->length = 0;
292
293 bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
294
295 skb->dev = (void *) priv->btmrvl_dev.hcidev;
296 skb_queue_head(&priv->adapter->tx_queue, skb);
297
298 BT_DBG("Queue hs enable Command");
299
300 wake_up_interruptible(&priv->main_thread.wait_q);
301 259
302 if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, 260 ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
303 priv->adapter->hs_state, 261 if (ret)
304 msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) { 262 BT_ERR("Host sleep enable command failed\n");
305 ret = -ETIMEDOUT;
306 BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state,
307 priv->adapter->ps_state,
308 priv->adapter->wakeup_tries);
309 }
310 263
311 return ret; 264 return ret;
312} 265}
@@ -480,6 +433,137 @@ static int btmrvl_open(struct hci_dev *hdev)
480} 433}
481 434
482/* 435/*
436 * This function parses provided calibration data input. It should contain
437 * hex bytes separated by space or new line character. Here is an example.
438 * 00 1C 01 37 FF FF FF FF 02 04 7F 01
439 * CE BA 00 00 00 2D C6 C0 00 00 00 00
440 * 00 F0 00 00
441 */
442static int btmrvl_parse_cal_cfg(const u8 *src, u32 len, u8 *dst, u32 dst_size)
443{
444 const u8 *s = src;
445 u8 *d = dst;
446 int ret;
447 u8 tmp[3];
448
449 tmp[2] = '\0';
450 while ((s - src) <= len - 2) {
451 if (isspace(*s)) {
452 s++;
453 continue;
454 }
455
456 if (isxdigit(*s)) {
457 if ((d - dst) >= dst_size) {
458 BT_ERR("calibration data file too big!!!");
459 return -EINVAL;
460 }
461
462 memcpy(tmp, s, 2);
463
464 ret = kstrtou8(tmp, 16, d++);
465 if (ret < 0)
466 return ret;
467
468 s += 2;
469 } else {
470 return -EINVAL;
471 }
472 }
473 if (d == dst)
474 return -EINVAL;
475
476 return 0;
477}
478
479static int btmrvl_load_cal_data(struct btmrvl_private *priv,
480 u8 *config_data)
481{
482 int i, ret;
483 u8 data[BT_CMD_DATA_SIZE];
484
485 data[0] = 0x00;
486 data[1] = 0x00;
487 data[2] = 0x00;
488 data[3] = BT_CMD_DATA_SIZE - 4;
489
490 /* Swap cal-data bytes. Each four bytes are swapped. Considering 4
491 * byte SDIO header offset, mapping of input and output bytes will be
492 * {3, 2, 1, 0} -> {0+4, 1+4, 2+4, 3+4},
493 * {7, 6, 5, 4} -> {4+4, 5+4, 6+4, 7+4} */
494 for (i = 4; i < BT_CMD_DATA_SIZE; i++)
495 data[i] = config_data[(i / 4) * 8 - 1 - i];
496
497 print_hex_dump_bytes("Calibration data: ",
498 DUMP_PREFIX_OFFSET, data, BT_CMD_DATA_SIZE);
499
500 ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
501 BT_CMD_DATA_SIZE);
502 if (ret)
503 BT_ERR("Failed to download caibration data\n");
504
505 return 0;
506}
507
508static int
509btmrvl_process_cal_cfg(struct btmrvl_private *priv, u8 *data, u32 size)
510{
511 u8 cal_data[BT_CAL_DATA_SIZE];
512 int ret;
513
514 ret = btmrvl_parse_cal_cfg(data, size, cal_data, sizeof(cal_data));
515 if (ret)
516 return ret;
517
518 ret = btmrvl_load_cal_data(priv, cal_data);
519 if (ret) {
520 BT_ERR("Fail to load calibrate data");
521 return ret;
522 }
523
524 return 0;
525}
526
527static int btmrvl_cal_data_config(struct btmrvl_private *priv)
528{
529 const struct firmware *cfg;
530 int ret;
531 const char *cal_data = priv->btmrvl_dev.cal_data;
532
533 if (!cal_data)
534 return 0;
535
536 ret = request_firmware(&cfg, cal_data, priv->btmrvl_dev.dev);
537 if (ret < 0) {
538 BT_DBG("Failed to get %s file, skipping cal data download",
539 cal_data);
540 return 0;
541 }
542
543 ret = btmrvl_process_cal_cfg(priv, (u8 *)cfg->data, cfg->size);
544 release_firmware(cfg);
545 return ret;
546}
547
548static int btmrvl_setup(struct hci_dev *hdev)
549{
550 struct btmrvl_private *priv = hci_get_drvdata(hdev);
551
552 btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
553
554 if (btmrvl_cal_data_config(priv))
555 BT_ERR("Set cal data failed");
556
557 priv->btmrvl_dev.psmode = 1;
558 btmrvl_enable_ps(priv);
559
560 priv->btmrvl_dev.gpio_gap = 0xffff;
561 btmrvl_send_hscfg_cmd(priv);
562
563 return 0;
564}
565
566/*
483 * This function handles the event generated by firmware, rx data 567 * This function handles the event generated by firmware, rx data
484 * received from firmware, and tx data sent from kernel. 568 * received from firmware, and tx data sent from kernel.
485 */ 569 */
@@ -572,8 +656,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
572 hdev->flush = btmrvl_flush; 656 hdev->flush = btmrvl_flush;
573 hdev->send = btmrvl_send_frame; 657 hdev->send = btmrvl_send_frame;
574 hdev->ioctl = btmrvl_ioctl; 658 hdev->ioctl = btmrvl_ioctl;
575 659 hdev->setup = btmrvl_setup;
576 btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
577 660
578 hdev->dev_type = priv->btmrvl_dev.dev_type; 661 hdev->dev_type = priv->btmrvl_dev.dev_type;
579 662
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 00da6df9f71e..332475e400cf 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -18,7 +18,6 @@
18 * this warranty disclaimer. 18 * this warranty disclaimer.
19 **/ 19 **/
20 20
21#include <linux/firmware.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23 22
24#include <linux/mmc/sdio_ids.h> 23#include <linux/mmc/sdio_ids.h>
@@ -102,6 +101,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
102static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { 101static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
103 .helper = "mrvl/sd8688_helper.bin", 102 .helper = "mrvl/sd8688_helper.bin",
104 .firmware = "mrvl/sd8688.bin", 103 .firmware = "mrvl/sd8688.bin",
104 .cal_data = NULL,
105 .reg = &btmrvl_reg_8688, 105 .reg = &btmrvl_reg_8688,
106 .sd_blksz_fw_dl = 64, 106 .sd_blksz_fw_dl = 64,
107}; 107};
@@ -109,6 +109,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
109static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = { 109static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
110 .helper = NULL, 110 .helper = NULL,
111 .firmware = "mrvl/sd8787_uapsta.bin", 111 .firmware = "mrvl/sd8787_uapsta.bin",
112 .cal_data = NULL,
112 .reg = &btmrvl_reg_87xx, 113 .reg = &btmrvl_reg_87xx,
113 .sd_blksz_fw_dl = 256, 114 .sd_blksz_fw_dl = 256,
114}; 115};
@@ -116,6 +117,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
116static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { 117static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
117 .helper = NULL, 118 .helper = NULL,
118 .firmware = "mrvl/sd8797_uapsta.bin", 119 .firmware = "mrvl/sd8797_uapsta.bin",
120 .cal_data = "mrvl/sd8797_caldata.conf",
119 .reg = &btmrvl_reg_87xx, 121 .reg = &btmrvl_reg_87xx,
120 .sd_blksz_fw_dl = 256, 122 .sd_blksz_fw_dl = 256,
121}; 123};
@@ -123,6 +125,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
123static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { 125static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
124 .helper = NULL, 126 .helper = NULL,
125 .firmware = "mrvl/sd8897_uapsta.bin", 127 .firmware = "mrvl/sd8897_uapsta.bin",
128 .cal_data = NULL,
126 .reg = &btmrvl_reg_88xx, 129 .reg = &btmrvl_reg_88xx,
127 .sd_blksz_fw_dl = 256, 130 .sd_blksz_fw_dl = 256,
128}; 131};
@@ -1006,6 +1009,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
1006 struct btmrvl_sdio_device *data = (void *) id->driver_data; 1009 struct btmrvl_sdio_device *data = (void *) id->driver_data;
1007 card->helper = data->helper; 1010 card->helper = data->helper;
1008 card->firmware = data->firmware; 1011 card->firmware = data->firmware;
1012 card->cal_data = data->cal_data;
1009 card->reg = data->reg; 1013 card->reg = data->reg;
1010 card->sd_blksz_fw_dl = data->sd_blksz_fw_dl; 1014 card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
1011 } 1015 }
@@ -1034,6 +1038,8 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
1034 } 1038 }
1035 1039
1036 card->priv = priv; 1040 card->priv = priv;
1041 priv->btmrvl_dev.dev = &card->func->dev;
1042 priv->btmrvl_dev.cal_data = card->cal_data;
1037 1043
1038 /* Initialize the interface specific function pointers */ 1044 /* Initialize the interface specific function pointers */
1039 priv->hw_host_to_card = btmrvl_sdio_host_to_card; 1045 priv->hw_host_to_card = btmrvl_sdio_host_to_card;
@@ -1046,12 +1052,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
1046 goto disable_host_int; 1052 goto disable_host_int;
1047 } 1053 }
1048 1054
1049 priv->btmrvl_dev.psmode = 1;
1050 btmrvl_enable_ps(priv);
1051
1052 priv->btmrvl_dev.gpio_gap = 0xffff;
1053 btmrvl_send_hscfg_cmd(priv);
1054
1055 return 0; 1055 return 0;
1056 1056
1057disable_host_int: 1057disable_host_int:
@@ -1222,4 +1222,5 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
1222MODULE_FIRMWARE("mrvl/sd8688.bin"); 1222MODULE_FIRMWARE("mrvl/sd8688.bin");
1223MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); 1223MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
1224MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); 1224MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
1225MODULE_FIRMWARE("mrvl/sd8797_caldata.conf");
1225MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); 1226MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 43d35a609ca9..6872d9ecac07 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -85,6 +85,7 @@ struct btmrvl_sdio_card {
85 u32 ioport; 85 u32 ioport;
86 const char *helper; 86 const char *helper;
87 const char *firmware; 87 const char *firmware;
88 const char *cal_data;
88 const struct btmrvl_sdio_card_reg *reg; 89 const struct btmrvl_sdio_card_reg *reg;
89 u16 sd_blksz_fw_dl; 90 u16 sd_blksz_fw_dl;
90 u8 rx_unit; 91 u8 rx_unit;
@@ -94,6 +95,7 @@ struct btmrvl_sdio_card {
94struct btmrvl_sdio_device { 95struct btmrvl_sdio_device {
95 const char *helper; 96 const char *helper;
96 const char *firmware; 97 const char *firmware;
98 const char *cal_data;
97 const struct btmrvl_sdio_card_reg *reg; 99 const struct btmrvl_sdio_card_reg *reg;
98 u16 sd_blksz_fw_dl; 100 u16 sd_blksz_fw_dl;
99}; 101};
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index d8b7aed6e4a9..c04a3e6fb37c 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <asm/unaligned.h>
27 28
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/init.h> 30#include <linux/init.h>
@@ -39,17 +40,17 @@
39#include <net/bluetooth/bluetooth.h> 40#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 41#include <net/bluetooth/hci_core.h>
41 42
42#define VERSION "1.3" 43#define VERSION "1.4"
43 44
44static bool amp; 45static bool amp;
45 46
46struct vhci_data { 47struct vhci_data {
47 struct hci_dev *hdev; 48 struct hci_dev *hdev;
48 49
49 unsigned long flags;
50
51 wait_queue_head_t read_wait; 50 wait_queue_head_t read_wait;
52 struct sk_buff_head readq; 51 struct sk_buff_head readq;
52
53 struct delayed_work open_timeout;
53}; 54};
54 55
55static int vhci_open_dev(struct hci_dev *hdev) 56static int vhci_open_dev(struct hci_dev *hdev)
@@ -99,16 +100,62 @@ static int vhci_send_frame(struct sk_buff *skb)
99 skb_queue_tail(&data->readq, skb); 100 skb_queue_tail(&data->readq, skb);
100 101
101 wake_up_interruptible(&data->read_wait); 102 wake_up_interruptible(&data->read_wait);
103 return 0;
104}
105
106static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
107{
108 struct hci_dev *hdev;
109 struct sk_buff *skb;
110
111 skb = bt_skb_alloc(4, GFP_KERNEL);
112 if (!skb)
113 return -ENOMEM;
114
115 hdev = hci_alloc_dev();
116 if (!hdev) {
117 kfree_skb(skb);
118 return -ENOMEM;
119 }
120
121 data->hdev = hdev;
122
123 hdev->bus = HCI_VIRTUAL;
124 hdev->dev_type = dev_type;
125 hci_set_drvdata(hdev, data);
126
127 hdev->open = vhci_open_dev;
128 hdev->close = vhci_close_dev;
129 hdev->flush = vhci_flush;
130 hdev->send = vhci_send_frame;
102 131
132 if (hci_register_dev(hdev) < 0) {
133 BT_ERR("Can't register HCI device");
134 hci_free_dev(hdev);
135 data->hdev = NULL;
136 kfree_skb(skb);
137 return -EBUSY;
138 }
139
140 bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
141
142 *skb_put(skb, 1) = 0xff;
143 *skb_put(skb, 1) = dev_type;
144 put_unaligned_le16(hdev->id, skb_put(skb, 2));
145 skb_queue_tail(&data->readq, skb);
146
147 wake_up_interruptible(&data->read_wait);
103 return 0; 148 return 0;
104} 149}
105 150
106static inline ssize_t vhci_get_user(struct vhci_data *data, 151static inline ssize_t vhci_get_user(struct vhci_data *data,
107 const char __user *buf, size_t count) 152 const char __user *buf, size_t count)
108{ 153{
109 struct sk_buff *skb; 154 struct sk_buff *skb;
155 __u8 pkt_type, dev_type;
156 int ret;
110 157
111 if (count > HCI_MAX_FRAME_SIZE) 158 if (count < 2 || count > HCI_MAX_FRAME_SIZE)
112 return -EINVAL; 159 return -EINVAL;
113 160
114 skb = bt_skb_alloc(count, GFP_KERNEL); 161 skb = bt_skb_alloc(count, GFP_KERNEL);
@@ -120,27 +167,70 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
120 return -EFAULT; 167 return -EFAULT;
121 } 168 }
122 169
123 skb->dev = (void *) data->hdev; 170 pkt_type = *((__u8 *) skb->data);
124 bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
125 skb_pull(skb, 1); 171 skb_pull(skb, 1);
126 172
127 hci_recv_frame(skb); 173 switch (pkt_type) {
174 case HCI_EVENT_PKT:
175 case HCI_ACLDATA_PKT:
176 case HCI_SCODATA_PKT:
177 if (!data->hdev) {
178 kfree_skb(skb);
179 return -ENODEV;
180 }
181
182 skb->dev = (void *) data->hdev;
183 bt_cb(skb)->pkt_type = pkt_type;
184
185 ret = hci_recv_frame(skb);
186 break;
187
188 case HCI_VENDOR_PKT:
189 if (data->hdev) {
190 kfree_skb(skb);
191 return -EBADFD;
192 }
128 193
129 return count; 194 cancel_delayed_work_sync(&data->open_timeout);
195
196 dev_type = *((__u8 *) skb->data);
197 skb_pull(skb, 1);
198
199 if (skb->len > 0) {
200 kfree_skb(skb);
201 return -EINVAL;
202 }
203
204 kfree_skb(skb);
205
206 if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
207 return -EINVAL;
208
209 ret = vhci_create_device(data, dev_type);
210 break;
211
212 default:
213 kfree_skb(skb);
214 return -EINVAL;
215 }
216
217 return (ret < 0) ? ret : count;
130} 218}
131 219
132static inline ssize_t vhci_put_user(struct vhci_data *data, 220static inline ssize_t vhci_put_user(struct vhci_data *data,
133 struct sk_buff *skb, char __user *buf, int count) 221 struct sk_buff *skb,
222 char __user *buf, int count)
134{ 223{
135 char __user *ptr = buf; 224 char __user *ptr = buf;
136 int len, total = 0; 225 int len;
137 226
138 len = min_t(unsigned int, skb->len, count); 227 len = min_t(unsigned int, skb->len, count);
139 228
140 if (copy_to_user(ptr, skb->data, len)) 229 if (copy_to_user(ptr, skb->data, len))
141 return -EFAULT; 230 return -EFAULT;
142 231
143 total += len; 232 if (!data->hdev)
233 return len;
144 234
145 data->hdev->stat.byte_tx += len; 235 data->hdev->stat.byte_tx += len;
146 236
@@ -148,21 +238,19 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
148 case HCI_COMMAND_PKT: 238 case HCI_COMMAND_PKT:
149 data->hdev->stat.cmd_tx++; 239 data->hdev->stat.cmd_tx++;
150 break; 240 break;
151
152 case HCI_ACLDATA_PKT: 241 case HCI_ACLDATA_PKT:
153 data->hdev->stat.acl_tx++; 242 data->hdev->stat.acl_tx++;
154 break; 243 break;
155
156 case HCI_SCODATA_PKT: 244 case HCI_SCODATA_PKT:
157 data->hdev->stat.sco_tx++; 245 data->hdev->stat.sco_tx++;
158 break; 246 break;
159 } 247 }
160 248
161 return total; 249 return len;
162} 250}
163 251
164static ssize_t vhci_read(struct file *file, 252static ssize_t vhci_read(struct file *file,
165 char __user *buf, size_t count, loff_t *pos) 253 char __user *buf, size_t count, loff_t *pos)
166{ 254{
167 struct vhci_data *data = file->private_data; 255 struct vhci_data *data = file->private_data;
168 struct sk_buff *skb; 256 struct sk_buff *skb;
@@ -185,7 +273,7 @@ static ssize_t vhci_read(struct file *file,
185 } 273 }
186 274
187 ret = wait_event_interruptible(data->read_wait, 275 ret = wait_event_interruptible(data->read_wait,
188 !skb_queue_empty(&data->readq)); 276 !skb_queue_empty(&data->readq));
189 if (ret < 0) 277 if (ret < 0)
190 break; 278 break;
191 } 279 }
@@ -194,7 +282,7 @@ static ssize_t vhci_read(struct file *file,
194} 282}
195 283
196static ssize_t vhci_write(struct file *file, 284static ssize_t vhci_write(struct file *file,
197 const char __user *buf, size_t count, loff_t *pos) 285 const char __user *buf, size_t count, loff_t *pos)
198{ 286{
199 struct vhci_data *data = file->private_data; 287 struct vhci_data *data = file->private_data;
200 288
@@ -213,10 +301,17 @@ static unsigned int vhci_poll(struct file *file, poll_table *wait)
213 return POLLOUT | POLLWRNORM; 301 return POLLOUT | POLLWRNORM;
214} 302}
215 303
304static void vhci_open_timeout(struct work_struct *work)
305{
306 struct vhci_data *data = container_of(work, struct vhci_data,
307 open_timeout.work);
308
309 vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR);
310}
311
216static int vhci_open(struct inode *inode, struct file *file) 312static int vhci_open(struct inode *inode, struct file *file)
217{ 313{
218 struct vhci_data *data; 314 struct vhci_data *data;
219 struct hci_dev *hdev;
220 315
221 data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL); 316 data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
222 if (!data) 317 if (!data)
@@ -225,35 +320,13 @@ static int vhci_open(struct inode *inode, struct file *file)
225 skb_queue_head_init(&data->readq); 320 skb_queue_head_init(&data->readq);
226 init_waitqueue_head(&data->read_wait); 321 init_waitqueue_head(&data->read_wait);
227 322
228 hdev = hci_alloc_dev(); 323 INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
229 if (!hdev) {
230 kfree(data);
231 return -ENOMEM;
232 }
233
234 data->hdev = hdev;
235
236 hdev->bus = HCI_VIRTUAL;
237 hci_set_drvdata(hdev, data);
238
239 if (amp)
240 hdev->dev_type = HCI_AMP;
241
242 hdev->open = vhci_open_dev;
243 hdev->close = vhci_close_dev;
244 hdev->flush = vhci_flush;
245 hdev->send = vhci_send_frame;
246
247 if (hci_register_dev(hdev) < 0) {
248 BT_ERR("Can't register HCI device");
249 kfree(data);
250 hci_free_dev(hdev);
251 return -EBUSY;
252 }
253 324
254 file->private_data = data; 325 file->private_data = data;
255 nonseekable_open(inode, file); 326 nonseekable_open(inode, file);
256 327
328 schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
329
257 return 0; 330 return 0;
258} 331}
259 332
@@ -262,8 +335,12 @@ static int vhci_release(struct inode *inode, struct file *file)
262 struct vhci_data *data = file->private_data; 335 struct vhci_data *data = file->private_data;
263 struct hci_dev *hdev = data->hdev; 336 struct hci_dev *hdev = data->hdev;
264 337
265 hci_unregister_dev(hdev); 338 cancel_delayed_work_sync(&data->open_timeout);
266 hci_free_dev(hdev); 339
340 if (hdev) {
341 hci_unregister_dev(hdev);
342 hci_free_dev(hdev);
343 }
267 344
268 file->private_data = NULL; 345 file->private_data = NULL;
269 kfree(data); 346 kfree(data);
@@ -309,3 +386,4 @@ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
309MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); 386MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
310MODULE_VERSION(VERSION); 387MODULE_VERSION(VERSION);
311MODULE_LICENSE("GPL"); 388MODULE_LICENSE("GPL");
389MODULE_ALIAS("devname:vhci");
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index dab4b41f1715..a082fd9e7ebe 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2294,7 +2294,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
2294 int low, high, remaining; 2294 int low, high, remaining;
2295 unsigned int rover; 2295 unsigned int rover;
2296 2296
2297 inet_get_local_port_range(&low, &high); 2297 inet_get_local_port_range(&init_net, &low, &high);
2298 remaining = (high - low) + 1; 2298 remaining = (high - low) + 1;
2299 rover = net_random() % remaining + low; 2299 rover = net_random() % remaining + low;
2300retry: 2300retry:
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 52377b4bf039..a2e0ed6c9a4d 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -481,7 +481,7 @@ void __inline__ outpp(void __iomem *addr, word p)
481int diva_os_register_irq(void *context, byte irq, const char *name) 481int diva_os_register_irq(void *context, byte irq, const char *name)
482{ 482{
483 int result = request_irq(irq, diva_os_irq_wrapper, 483 int result = request_irq(irq, diva_os_irq_wrapper,
484 IRQF_DISABLED | IRQF_SHARED, name, context); 484 IRQF_SHARED, name, context);
485 return (result); 485 return (result);
486} 486}
487 487
diff --git a/drivers/isdn/hardware/eicon/um_idi.c b/drivers/isdn/hardware/eicon/um_idi.c
index 7cab5c3276c2..e1519718ce67 100644
--- a/drivers/isdn/hardware/eicon/um_idi.c
+++ b/drivers/isdn/hardware/eicon/um_idi.c
@@ -288,9 +288,9 @@ int divas_um_idi_delete_entity(int adapter_nr, void *entity)
288 cleanup_entity(e); 288 cleanup_entity(e);
289 diva_os_free(0, e->os_context); 289 diva_os_free(0, e->os_context);
290 memset(e, 0x00, sizeof(*e)); 290 memset(e, 0x00, sizeof(*e));
291 diva_os_free(0, e);
292 291
293 DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e)); 292 DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
293 diva_os_free(0, e);
294 294
295 return (0); 295 return (0);
296} 296}
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index ca997bd4e818..92acc81f844d 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -336,7 +336,7 @@ static int __init sc_init(void)
336 */ 336 */
337 sc_adapter[cinst]->interrupt = irq[b]; 337 sc_adapter[cinst]->interrupt = irq[b];
338 if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler, 338 if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
339 IRQF_DISABLED, interface->id, 339 0, interface->id,
340 (void *)(unsigned long) cinst)) 340 (void *)(unsigned long) cinst))
341 { 341 {
342 kfree(sc_adapter[cinst]->channel); 342 kfree(sc_adapter[cinst]->channel);
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 4c21bf6b8b2f..5a5d720da929 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_BONDING) += bonding.o 5obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
8 8
9proc-$(CONFIG_PROC_FS) += bond_procfs.o 9proc-$(CONFIG_PROC_FS) += bond_procfs.o
10bonding-objs += $(proc-y) 10bonding-objs += $(proc-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0d8f427ade93..187b1b7772ef 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -136,41 +136,6 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
136} 136}
137 137
138/** 138/**
139 * __get_first_port - get the first port in the bond
140 * @bond: the bond we're looking at
141 *
142 * Return the port of the first slave in @bond, or %NULL if it can't be found.
143 */
144static inline struct port *__get_first_port(struct bonding *bond)
145{
146 struct slave *first_slave = bond_first_slave(bond);
147
148 return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
149}
150
151/**
152 * __get_next_port - get the next port in the bond
153 * @port: the port we're looking at
154 *
155 * Return the port of the slave that is next in line of @port's slave in the
156 * bond, or %NULL if it can't be found.
157 */
158static inline struct port *__get_next_port(struct port *port)
159{
160 struct bonding *bond = __get_bond_by_port(port);
161 struct slave *slave = port->slave, *slave_next;
162
163 // If there's no bond for this port, or this is the last slave
164 if (bond == NULL)
165 return NULL;
166 slave_next = bond_next_slave(bond, slave);
167 if (!slave_next || bond_is_first_slave(bond, slave_next))
168 return NULL;
169
170 return &(SLAVE_AD_INFO(slave_next).port);
171}
172
173/**
174 * __get_first_agg - get the first aggregator in the bond 139 * __get_first_agg - get the first aggregator in the bond
175 * @bond: the bond we're looking at 140 * @bond: the bond we're looking at
176 * 141 *
@@ -190,28 +155,6 @@ static inline struct aggregator *__get_first_agg(struct port *port)
190 return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL; 155 return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
191} 156}
192 157
193/**
194 * __get_next_agg - get the next aggregator in the bond
195 * @aggregator: the aggregator we're looking at
196 *
197 * Return the aggregator of the slave that is next in line of @aggregator's
198 * slave in the bond, or %NULL if it can't be found.
199 */
200static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
201{
202 struct slave *slave = aggregator->slave, *slave_next;
203 struct bonding *bond = bond_get_bond_by_slave(slave);
204
205 // If there's no bond for this aggregator, or this is the last slave
206 if (bond == NULL)
207 return NULL;
208 slave_next = bond_next_slave(bond, slave);
209 if (!slave_next || bond_is_first_slave(bond, slave_next))
210 return NULL;
211
212 return &(SLAVE_AD_INFO(slave_next).aggregator);
213}
214
215/* 158/*
216 * __agg_has_partner 159 * __agg_has_partner
217 * 160 *
@@ -755,16 +698,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
755 */ 698 */
756static struct aggregator *__get_active_agg(struct aggregator *aggregator) 699static struct aggregator *__get_active_agg(struct aggregator *aggregator)
757{ 700{
758 struct aggregator *retval = NULL; 701 struct bonding *bond = aggregator->slave->bond;
702 struct list_head *iter;
703 struct slave *slave;
759 704
760 for (; aggregator; aggregator = __get_next_agg(aggregator)) { 705 bond_for_each_slave(bond, slave, iter)
761 if (aggregator->is_active) { 706 if (SLAVE_AD_INFO(slave).aggregator.is_active)
762 retval = aggregator; 707 return &(SLAVE_AD_INFO(slave).aggregator);
763 break;
764 }
765 }
766 708
767 return retval; 709 return NULL;
768} 710}
769 711
770/** 712/**
@@ -1274,12 +1216,17 @@ static void ad_port_selection_logic(struct port *port)
1274{ 1216{
1275 struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator; 1217 struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
1276 struct port *last_port = NULL, *curr_port; 1218 struct port *last_port = NULL, *curr_port;
1219 struct list_head *iter;
1220 struct bonding *bond;
1221 struct slave *slave;
1277 int found = 0; 1222 int found = 0;
1278 1223
1279 // if the port is already Selected, do nothing 1224 // if the port is already Selected, do nothing
1280 if (port->sm_vars & AD_PORT_SELECTED) 1225 if (port->sm_vars & AD_PORT_SELECTED)
1281 return; 1226 return;
1282 1227
1228 bond = __get_bond_by_port(port);
1229
1283 // if the port is connected to other aggregator, detach it 1230 // if the port is connected to other aggregator, detach it
1284 if (port->aggregator) { 1231 if (port->aggregator) {
1285 // detach the port from its former aggregator 1232 // detach the port from its former aggregator
@@ -1320,8 +1267,8 @@ static void ad_port_selection_logic(struct port *port)
1320 } 1267 }
1321 } 1268 }
1322 // search on all aggregators for a suitable aggregator for this port 1269 // search on all aggregators for a suitable aggregator for this port
1323 for (aggregator = __get_first_agg(port); aggregator; 1270 bond_for_each_slave(bond, slave, iter) {
1324 aggregator = __get_next_agg(aggregator)) { 1271 aggregator = &(SLAVE_AD_INFO(slave).aggregator);
1325 1272
1326 // keep a free aggregator for later use(if needed) 1273 // keep a free aggregator for later use(if needed)
1327 if (!aggregator->lag_ports) { 1274 if (!aggregator->lag_ports) {
@@ -1515,19 +1462,23 @@ static int agg_device_up(const struct aggregator *agg)
1515static void ad_agg_selection_logic(struct aggregator *agg) 1462static void ad_agg_selection_logic(struct aggregator *agg)
1516{ 1463{
1517 struct aggregator *best, *active, *origin; 1464 struct aggregator *best, *active, *origin;
1465 struct bonding *bond = agg->slave->bond;
1466 struct list_head *iter;
1467 struct slave *slave;
1518 struct port *port; 1468 struct port *port;
1519 1469
1520 origin = agg; 1470 origin = agg;
1521 active = __get_active_agg(agg); 1471 active = __get_active_agg(agg);
1522 best = (active && agg_device_up(active)) ? active : NULL; 1472 best = (active && agg_device_up(active)) ? active : NULL;
1523 1473
1524 do { 1474 bond_for_each_slave(bond, slave, iter) {
1475 agg = &(SLAVE_AD_INFO(slave).aggregator);
1476
1525 agg->is_active = 0; 1477 agg->is_active = 0;
1526 1478
1527 if (agg->num_of_ports && agg_device_up(agg)) 1479 if (agg->num_of_ports && agg_device_up(agg))
1528 best = ad_agg_selection_test(best, agg); 1480 best = ad_agg_selection_test(best, agg);
1529 1481 }
1530 } while ((agg = __get_next_agg(agg)));
1531 1482
1532 if (best && 1483 if (best &&
1533 __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) { 1484 __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
@@ -1565,8 +1516,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1565 best->lag_ports, best->slave, 1516 best->lag_ports, best->slave,
1566 best->slave ? best->slave->dev->name : "NULL"); 1517 best->slave ? best->slave->dev->name : "NULL");
1567 1518
1568 for (agg = __get_first_agg(best->lag_ports); agg; 1519 bond_for_each_slave(bond, slave, iter) {
1569 agg = __get_next_agg(agg)) { 1520 agg = &(SLAVE_AD_INFO(slave).aggregator);
1570 1521
1571 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", 1522 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1572 agg->aggregator_identifier, agg->num_of_ports, 1523 agg->aggregator_identifier, agg->num_of_ports,
@@ -1614,13 +1565,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1614 } 1565 }
1615 } 1566 }
1616 1567
1617 if (origin->slave) { 1568 bond_3ad_set_carrier(bond);
1618 struct bonding *bond;
1619
1620 bond = bond_get_bond_by_slave(origin->slave);
1621 if (bond)
1622 bond_3ad_set_carrier(bond);
1623 }
1624} 1569}
1625 1570
1626/** 1571/**
@@ -1969,6 +1914,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
1969 struct port *port, *prev_port, *temp_port; 1914 struct port *port, *prev_port, *temp_port;
1970 struct aggregator *aggregator, *new_aggregator, *temp_aggregator; 1915 struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
1971 int select_new_active_agg = 0; 1916 int select_new_active_agg = 0;
1917 struct bonding *bond = slave->bond;
1918 struct slave *slave_iter;
1919 struct list_head *iter;
1972 1920
1973 // find the aggregator related to this slave 1921 // find the aggregator related to this slave
1974 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1922 aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1998,14 +1946,16 @@ void bond_3ad_unbind_slave(struct slave *slave)
1998 // reason to search for new aggregator, and that we will find one 1946 // reason to search for new aggregator, and that we will find one
1999 if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) { 1947 if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
2000 // find new aggregator for the related port(s) 1948 // find new aggregator for the related port(s)
2001 new_aggregator = __get_first_agg(port); 1949 bond_for_each_slave(bond, slave_iter, iter) {
2002 for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) { 1950 new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
2003 // if the new aggregator is empty, or it is connected to our port only 1951 // if the new aggregator is empty, or it is connected to our port only
2004 if (!new_aggregator->lag_ports 1952 if (!new_aggregator->lag_ports
2005 || ((new_aggregator->lag_ports == port) 1953 || ((new_aggregator->lag_ports == port)
2006 && !new_aggregator->lag_ports->next_port_in_aggregator)) 1954 && !new_aggregator->lag_ports->next_port_in_aggregator))
2007 break; 1955 break;
2008 } 1956 }
1957 if (!slave_iter)
1958 new_aggregator = NULL;
2009 // if new aggregator found, copy the aggregator's parameters 1959 // if new aggregator found, copy the aggregator's parameters
2010 // and connect the related lag_ports to the new aggregator 1960 // and connect the related lag_ports to the new aggregator
2011 if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) { 1961 if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
@@ -2056,15 +2006,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
2056 pr_info("%s: Removing an active aggregator\n", 2006 pr_info("%s: Removing an active aggregator\n",
2057 slave->bond->dev->name); 2007 slave->bond->dev->name);
2058 // select new active aggregator 2008 // select new active aggregator
2059 ad_agg_selection_logic(__get_first_agg(port)); 2009 temp_aggregator = __get_first_agg(port);
2010 if (temp_aggregator)
2011 ad_agg_selection_logic(temp_aggregator);
2060 } 2012 }
2061 } 2013 }
2062 } 2014 }
2063 2015
2064 pr_debug("Unbinding port %d\n", port->actor_port_number); 2016 pr_debug("Unbinding port %d\n", port->actor_port_number);
2065 // find the aggregator that this port is connected to 2017 // find the aggregator that this port is connected to
2066 temp_aggregator = __get_first_agg(port); 2018 bond_for_each_slave(bond, slave_iter, iter) {
2067 for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) { 2019 temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
2068 prev_port = NULL; 2020 prev_port = NULL;
2069 // search the port in the aggregator's related ports 2021 // search the port in the aggregator's related ports
2070 for (temp_port = temp_aggregator->lag_ports; temp_port; 2022 for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2111,19 +2063,24 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2111{ 2063{
2112 struct bonding *bond = container_of(work, struct bonding, 2064 struct bonding *bond = container_of(work, struct bonding,
2113 ad_work.work); 2065 ad_work.work);
2114 struct port *port;
2115 struct aggregator *aggregator; 2066 struct aggregator *aggregator;
2067 struct list_head *iter;
2068 struct slave *slave;
2069 struct port *port;
2116 2070
2117 read_lock(&bond->lock); 2071 read_lock(&bond->lock);
2118 2072
2119 //check if there are any slaves 2073 //check if there are any slaves
2120 if (list_empty(&bond->slave_list)) 2074 if (!bond_has_slaves(bond))
2121 goto re_arm; 2075 goto re_arm;
2122 2076
2123 // check if agg_select_timer timer after initialize is timed out 2077 // check if agg_select_timer timer after initialize is timed out
2124 if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) { 2078 if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
2079 slave = bond_first_slave(bond);
2080 port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
2081
2125 // select the active aggregator for the bond 2082 // select the active aggregator for the bond
2126 if ((port = __get_first_port(bond))) { 2083 if (port) {
2127 if (!port->slave) { 2084 if (!port->slave) {
2128 pr_warning("%s: Warning: bond's first port is uninitialized\n", 2085 pr_warning("%s: Warning: bond's first port is uninitialized\n",
2129 bond->dev->name); 2086 bond->dev->name);
@@ -2137,7 +2094,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2137 } 2094 }
2138 2095
2139 // for each port run the state machines 2096 // for each port run the state machines
2140 for (port = __get_first_port(bond); port; port = __get_next_port(port)) { 2097 bond_for_each_slave(bond, slave, iter) {
2098 port = &(SLAVE_AD_INFO(slave).port);
2141 if (!port->slave) { 2099 if (!port->slave) {
2142 pr_warning("%s: Warning: Found an uninitialized port\n", 2100 pr_warning("%s: Warning: Found an uninitialized port\n",
2143 bond->dev->name); 2101 bond->dev->name);
@@ -2382,9 +2340,12 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
2382 struct ad_info *ad_info) 2340 struct ad_info *ad_info)
2383{ 2341{
2384 struct aggregator *aggregator = NULL; 2342 struct aggregator *aggregator = NULL;
2343 struct list_head *iter;
2344 struct slave *slave;
2385 struct port *port; 2345 struct port *port;
2386 2346
2387 for (port = __get_first_port(bond); port; port = __get_next_port(port)) { 2347 bond_for_each_slave_rcu(bond, slave, iter) {
2348 port = &(SLAVE_AD_INFO(slave).port);
2388 if (port->aggregator && port->aggregator->is_active) { 2349 if (port->aggregator && port->aggregator->is_active) {
2389 aggregator = port->aggregator; 2350 aggregator = port->aggregator;
2390 break; 2351 break;
@@ -2408,25 +2369,25 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
2408{ 2369{
2409 int ret; 2370 int ret;
2410 2371
2411 read_lock(&bond->lock); 2372 rcu_read_lock();
2412 ret = __bond_3ad_get_active_agg_info(bond, ad_info); 2373 ret = __bond_3ad_get_active_agg_info(bond, ad_info);
2413 read_unlock(&bond->lock); 2374 rcu_read_unlock();
2414 2375
2415 return ret; 2376 return ret;
2416} 2377}
2417 2378
2418int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) 2379int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2419{ 2380{
2420 struct slave *slave, *start_at;
2421 struct bonding *bond = netdev_priv(dev); 2381 struct bonding *bond = netdev_priv(dev);
2422 int slave_agg_no; 2382 struct slave *slave, *first_ok_slave;
2423 int slaves_in_agg; 2383 struct aggregator *agg;
2424 int agg_id;
2425 int i;
2426 struct ad_info ad_info; 2384 struct ad_info ad_info;
2385 struct list_head *iter;
2386 int slaves_in_agg;
2387 int slave_agg_no;
2427 int res = 1; 2388 int res = 1;
2389 int agg_id;
2428 2390
2429 read_lock(&bond->lock);
2430 if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { 2391 if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
2431 pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", 2392 pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
2432 dev->name); 2393 dev->name);
@@ -2437,20 +2398,28 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2437 agg_id = ad_info.aggregator_id; 2398 agg_id = ad_info.aggregator_id;
2438 2399
2439 if (slaves_in_agg == 0) { 2400 if (slaves_in_agg == 0) {
2440 /*the aggregator is empty*/
2441 pr_debug("%s: Error: active aggregator is empty\n", dev->name); 2401 pr_debug("%s: Error: active aggregator is empty\n", dev->name);
2442 goto out; 2402 goto out;
2443 } 2403 }
2444 2404
2445 slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg); 2405 slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
2406 first_ok_slave = NULL;
2446 2407
2447 bond_for_each_slave(bond, slave) { 2408 bond_for_each_slave_rcu(bond, slave, iter) {
2448 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; 2409 agg = SLAVE_AD_INFO(slave).port.aggregator;
2410 if (!agg || agg->aggregator_identifier != agg_id)
2411 continue;
2449 2412
2450 if (agg && (agg->aggregator_identifier == agg_id)) { 2413 if (slave_agg_no >= 0) {
2414 if (!first_ok_slave && SLAVE_IS_OK(slave))
2415 first_ok_slave = slave;
2451 slave_agg_no--; 2416 slave_agg_no--;
2452 if (slave_agg_no < 0) 2417 continue;
2453 break; 2418 }
2419
2420 if (SLAVE_IS_OK(slave)) {
2421 res = bond_dev_queue_xmit(bond, skb, slave->dev);
2422 goto out;
2454 } 2423 }
2455 } 2424 }
2456 2425
@@ -2460,23 +2429,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2460 goto out; 2429 goto out;
2461 } 2430 }
2462 2431
2463 start_at = slave; 2432 /* we couldn't find any suitable slave after the agg_no, so use the
2464 2433 * first suitable found, if found. */
2465 bond_for_each_slave_from(bond, slave, i, start_at) { 2434 if (first_ok_slave)
2466 int slave_agg_id = 0; 2435 res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
2467 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
2468
2469 if (agg)
2470 slave_agg_id = agg->aggregator_identifier;
2471
2472 if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
2473 res = bond_dev_queue_xmit(bond, skb, slave->dev);
2474 break;
2475 }
2476 }
2477 2436
2478out: 2437out:
2479 read_unlock(&bond->lock);
2480 if (res) { 2438 if (res) {
2481 /* no suitable interface, frame not sent */ 2439 /* no suitable interface, frame not sent */
2482 kfree_skb(skb); 2440 kfree_skb(skb);
@@ -2515,11 +2473,12 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
2515void bond_3ad_update_lacp_rate(struct bonding *bond) 2473void bond_3ad_update_lacp_rate(struct bonding *bond)
2516{ 2474{
2517 struct port *port = NULL; 2475 struct port *port = NULL;
2476 struct list_head *iter;
2518 struct slave *slave; 2477 struct slave *slave;
2519 int lacp_fast; 2478 int lacp_fast;
2520 2479
2521 lacp_fast = bond->params.lacp_fast; 2480 lacp_fast = bond->params.lacp_fast;
2522 bond_for_each_slave(bond, slave) { 2481 bond_for_each_slave(bond, slave, iter) {
2523 port = &(SLAVE_AD_INFO(slave).port); 2482 port = &(SLAVE_AD_INFO(slave).port);
2524 __get_state_machine_lock(port); 2483 __get_state_machine_lock(port);
2525 if (lacp_fast) 2484 if (lacp_fast)
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f428ef574372..02872405d35d 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -223,13 +223,14 @@ static long long compute_gap(struct slave *slave)
223static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) 223static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
224{ 224{
225 struct slave *slave, *least_loaded; 225 struct slave *slave, *least_loaded;
226 struct list_head *iter;
226 long long max_gap; 227 long long max_gap;
227 228
228 least_loaded = NULL; 229 least_loaded = NULL;
229 max_gap = LLONG_MIN; 230 max_gap = LLONG_MIN;
230 231
231 /* Find the slave with the largest gap */ 232 /* Find the slave with the largest gap */
232 bond_for_each_slave(bond, slave) { 233 bond_for_each_slave_rcu(bond, slave, iter) {
233 if (SLAVE_IS_OK(slave)) { 234 if (SLAVE_IS_OK(slave)) {
234 long long gap = compute_gap(slave); 235 long long gap = compute_gap(slave);
235 236
@@ -382,30 +383,64 @@ out:
382static struct slave *rlb_next_rx_slave(struct bonding *bond) 383static struct slave *rlb_next_rx_slave(struct bonding *bond)
383{ 384{
384 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 385 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
385 struct slave *rx_slave, *slave, *start_at; 386 struct slave *before = NULL, *rx_slave = NULL, *slave;
386 int i = 0; 387 struct list_head *iter;
388 bool found = false;
387 389
388 if (bond_info->next_rx_slave) 390 bond_for_each_slave(bond, slave, iter) {
389 start_at = bond_info->next_rx_slave; 391 if (!SLAVE_IS_OK(slave))
390 else 392 continue;
391 start_at = bond_first_slave(bond); 393 if (!found) {
394 if (!before || before->speed < slave->speed)
395 before = slave;
396 } else {
397 if (!rx_slave || rx_slave->speed < slave->speed)
398 rx_slave = slave;
399 }
400 if (slave == bond_info->rx_slave)
401 found = true;
402 }
403 /* we didn't find anything after the current or we have something
404 * better before and up to the current slave
405 */
406 if (!rx_slave || (before && rx_slave->speed < before->speed))
407 rx_slave = before;
392 408
393 rx_slave = NULL; 409 if (rx_slave)
410 bond_info->rx_slave = rx_slave;
394 411
395 bond_for_each_slave_from(bond, slave, i, start_at) { 412 return rx_slave;
396 if (SLAVE_IS_OK(slave)) { 413}
397 if (!rx_slave) { 414
398 rx_slave = slave; 415/* Caller must hold rcu_read_lock() for read */
399 } else if (slave->speed > rx_slave->speed) { 416static struct slave *__rlb_next_rx_slave(struct bonding *bond)
417{
418 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
419 struct slave *before = NULL, *rx_slave = NULL, *slave;
420 struct list_head *iter;
421 bool found = false;
422
423 bond_for_each_slave_rcu(bond, slave, iter) {
424 if (!SLAVE_IS_OK(slave))
425 continue;
426 if (!found) {
427 if (!before || before->speed < slave->speed)
428 before = slave;
429 } else {
430 if (!rx_slave || rx_slave->speed < slave->speed)
400 rx_slave = slave; 431 rx_slave = slave;
401 }
402 } 432 }
433 if (slave == bond_info->rx_slave)
434 found = true;
403 } 435 }
436 /* we didn't find anything after the current or we have something
437 * better before and up to the current slave
438 */
439 if (!rx_slave || (before && rx_slave->speed < before->speed))
440 rx_slave = before;
404 441
405 if (rx_slave) { 442 if (rx_slave)
406 slave = bond_next_slave(bond, rx_slave); 443 bond_info->rx_slave = rx_slave;
407 bond_info->next_rx_slave = slave;
408 }
409 444
410 return rx_slave; 445 return rx_slave;
411} 446}
@@ -626,12 +661,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
626{ 661{
627 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 662 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
628 struct arp_pkt *arp = arp_pkt(skb); 663 struct arp_pkt *arp = arp_pkt(skb);
629 struct slave *assigned_slave; 664 struct slave *assigned_slave, *curr_active_slave;
630 struct rlb_client_info *client_info; 665 struct rlb_client_info *client_info;
631 u32 hash_index = 0; 666 u32 hash_index = 0;
632 667
633 _lock_rx_hashtbl(bond); 668 _lock_rx_hashtbl(bond);
634 669
670 curr_active_slave = rcu_dereference(bond->curr_active_slave);
671
635 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst)); 672 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
636 client_info = &(bond_info->rx_hashtbl[hash_index]); 673 client_info = &(bond_info->rx_hashtbl[hash_index]);
637 674
@@ -656,14 +693,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
656 * that the new client can be assigned to this entry. 693 * that the new client can be assigned to this entry.
657 */ 694 */
658 if (bond->curr_active_slave && 695 if (bond->curr_active_slave &&
659 client_info->slave != bond->curr_active_slave) { 696 client_info->slave != curr_active_slave) {
660 client_info->slave = bond->curr_active_slave; 697 client_info->slave = curr_active_slave;
661 rlb_update_client(client_info); 698 rlb_update_client(client_info);
662 } 699 }
663 } 700 }
664 } 701 }
665 /* assign a new slave */ 702 /* assign a new slave */
666 assigned_slave = rlb_next_rx_slave(bond); 703 assigned_slave = __rlb_next_rx_slave(bond);
667 704
668 if (assigned_slave) { 705 if (assigned_slave) {
669 if (!(client_info->assigned && 706 if (!(client_info->assigned &&
@@ -726,7 +763,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
726 /* Don't modify or load balance ARPs that do not originate locally 763 /* Don't modify or load balance ARPs that do not originate locally
727 * (e.g.,arrive via a bridge). 764 * (e.g.,arrive via a bridge).
728 */ 765 */
729 if (!bond_slave_has_mac(bond, arp->mac_src)) 766 if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
730 return NULL; 767 return NULL;
731 768
732 if (arp->op_code == htons(ARPOP_REPLY)) { 769 if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1019,7 +1056,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
1019 1056
1020 /* loop through vlans and send one packet for each */ 1057 /* loop through vlans and send one packet for each */
1021 rcu_read_lock(); 1058 rcu_read_lock();
1022 netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) { 1059 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
1023 if (upper->priv_flags & IFF_802_1Q_VLAN) 1060 if (upper->priv_flags & IFF_802_1Q_VLAN)
1024 alb_send_lp_vid(slave, mac_addr, 1061 alb_send_lp_vid(slave, mac_addr,
1025 vlan_dev_vlan_id(upper)); 1062 vlan_dev_vlan_id(upper));
@@ -1172,10 +1209,11 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
1172 */ 1209 */
1173static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave) 1210static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
1174{ 1211{
1175 struct slave *tmp_slave1, *free_mac_slave = NULL;
1176 struct slave *has_bond_addr = bond->curr_active_slave; 1212 struct slave *has_bond_addr = bond->curr_active_slave;
1213 struct slave *tmp_slave1, *free_mac_slave = NULL;
1214 struct list_head *iter;
1177 1215
1178 if (list_empty(&bond->slave_list)) { 1216 if (!bond_has_slaves(bond)) {
1179 /* this is the first slave */ 1217 /* this is the first slave */
1180 return 0; 1218 return 0;
1181 } 1219 }
@@ -1196,7 +1234,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1196 /* The slave's address is equal to the address of the bond. 1234 /* The slave's address is equal to the address of the bond.
1197 * Search for a spare address in the bond for this slave. 1235 * Search for a spare address in the bond for this slave.
1198 */ 1236 */
1199 bond_for_each_slave(bond, tmp_slave1) { 1237 bond_for_each_slave(bond, tmp_slave1, iter) {
1200 if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) { 1238 if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
1201 /* no slave has tmp_slave1's perm addr 1239 /* no slave has tmp_slave1's perm addr
1202 * as its curr addr 1240 * as its curr addr
@@ -1246,15 +1284,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1246 */ 1284 */
1247static int alb_set_mac_address(struct bonding *bond, void *addr) 1285static int alb_set_mac_address(struct bonding *bond, void *addr)
1248{ 1286{
1249 char tmp_addr[ETH_ALEN]; 1287 struct slave *slave, *rollback_slave;
1250 struct slave *slave; 1288 struct list_head *iter;
1251 struct sockaddr sa; 1289 struct sockaddr sa;
1290 char tmp_addr[ETH_ALEN];
1252 int res; 1291 int res;
1253 1292
1254 if (bond->alb_info.rlb_enabled) 1293 if (bond->alb_info.rlb_enabled)
1255 return 0; 1294 return 0;
1256 1295
1257 bond_for_each_slave(bond, slave) { 1296 bond_for_each_slave(bond, slave, iter) {
1258 /* save net_device's current hw address */ 1297 /* save net_device's current hw address */
1259 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); 1298 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
1260 1299
@@ -1274,10 +1313,12 @@ unwind:
1274 sa.sa_family = bond->dev->type; 1313 sa.sa_family = bond->dev->type;
1275 1314
1276 /* unwind from head to the slave that failed */ 1315 /* unwind from head to the slave that failed */
1277 bond_for_each_slave_continue_reverse(bond, slave) { 1316 bond_for_each_slave(bond, rollback_slave, iter) {
1278 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); 1317 if (rollback_slave == slave)
1279 dev_set_mac_address(slave->dev, &sa); 1318 break;
1280 memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN); 1319 memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
1320 dev_set_mac_address(rollback_slave->dev, &sa);
1321 memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
1281 } 1322 }
1282 1323
1283 return res; 1324 return res;
@@ -1337,11 +1378,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1337 skb_reset_mac_header(skb); 1378 skb_reset_mac_header(skb);
1338 eth_data = eth_hdr(skb); 1379 eth_data = eth_hdr(skb);
1339 1380
1340 /* make sure that the curr_active_slave do not change during tx
1341 */
1342 read_lock(&bond->lock);
1343 read_lock(&bond->curr_slave_lock);
1344
1345 switch (ntohs(skb->protocol)) { 1381 switch (ntohs(skb->protocol)) {
1346 case ETH_P_IP: { 1382 case ETH_P_IP: {
1347 const struct iphdr *iph = ip_hdr(skb); 1383 const struct iphdr *iph = ip_hdr(skb);
@@ -1423,12 +1459,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1423 1459
1424 if (!tx_slave) { 1460 if (!tx_slave) {
1425 /* unbalanced or unassigned, send through primary */ 1461 /* unbalanced or unassigned, send through primary */
1426 tx_slave = bond->curr_active_slave; 1462 tx_slave = rcu_dereference(bond->curr_active_slave);
1427 bond_info->unbalanced_load += skb->len; 1463 bond_info->unbalanced_load += skb->len;
1428 } 1464 }
1429 1465
1430 if (tx_slave && SLAVE_IS_OK(tx_slave)) { 1466 if (tx_slave && SLAVE_IS_OK(tx_slave)) {
1431 if (tx_slave != bond->curr_active_slave) { 1467 if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
1432 memcpy(eth_data->h_source, 1468 memcpy(eth_data->h_source,
1433 tx_slave->dev->dev_addr, 1469 tx_slave->dev->dev_addr,
1434 ETH_ALEN); 1470 ETH_ALEN);
@@ -1443,8 +1479,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1443 } 1479 }
1444 } 1480 }
1445 1481
1446 read_unlock(&bond->curr_slave_lock);
1447 read_unlock(&bond->lock);
1448 if (res) { 1482 if (res) {
1449 /* no suitable interface, frame not sent */ 1483 /* no suitable interface, frame not sent */
1450 kfree_skb(skb); 1484 kfree_skb(skb);
@@ -1458,11 +1492,12 @@ void bond_alb_monitor(struct work_struct *work)
1458 struct bonding *bond = container_of(work, struct bonding, 1492 struct bonding *bond = container_of(work, struct bonding,
1459 alb_work.work); 1493 alb_work.work);
1460 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 1494 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1495 struct list_head *iter;
1461 struct slave *slave; 1496 struct slave *slave;
1462 1497
1463 read_lock(&bond->lock); 1498 read_lock(&bond->lock);
1464 1499
1465 if (list_empty(&bond->slave_list)) { 1500 if (!bond_has_slaves(bond)) {
1466 bond_info->tx_rebalance_counter = 0; 1501 bond_info->tx_rebalance_counter = 0;
1467 bond_info->lp_counter = 0; 1502 bond_info->lp_counter = 0;
1468 goto re_arm; 1503 goto re_arm;
@@ -1480,7 +1515,7 @@ void bond_alb_monitor(struct work_struct *work)
1480 */ 1515 */
1481 read_lock(&bond->curr_slave_lock); 1516 read_lock(&bond->curr_slave_lock);
1482 1517
1483 bond_for_each_slave(bond, slave) 1518 bond_for_each_slave(bond, slave, iter)
1484 alb_send_learning_packets(slave, slave->dev->dev_addr); 1519 alb_send_learning_packets(slave, slave->dev->dev_addr);
1485 1520
1486 read_unlock(&bond->curr_slave_lock); 1521 read_unlock(&bond->curr_slave_lock);
@@ -1493,7 +1528,7 @@ void bond_alb_monitor(struct work_struct *work)
1493 1528
1494 read_lock(&bond->curr_slave_lock); 1529 read_lock(&bond->curr_slave_lock);
1495 1530
1496 bond_for_each_slave(bond, slave) { 1531 bond_for_each_slave(bond, slave, iter) {
1497 tlb_clear_slave(bond, slave, 1); 1532 tlb_clear_slave(bond, slave, 1);
1498 if (slave == bond->curr_active_slave) { 1533 if (slave == bond->curr_active_slave) {
1499 SLAVE_TLB_INFO(slave).load = 1534 SLAVE_TLB_INFO(slave).load =
@@ -1599,13 +1634,13 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
1599 */ 1634 */
1600void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) 1635void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
1601{ 1636{
1602 if (!list_empty(&bond->slave_list)) 1637 if (bond_has_slaves(bond))
1603 alb_change_hw_addr_on_detach(bond, slave); 1638 alb_change_hw_addr_on_detach(bond, slave);
1604 1639
1605 tlb_clear_slave(bond, slave, 0); 1640 tlb_clear_slave(bond, slave, 0);
1606 1641
1607 if (bond->alb_info.rlb_enabled) { 1642 if (bond->alb_info.rlb_enabled) {
1608 bond->alb_info.next_rx_slave = NULL; 1643 bond->alb_info.rx_slave = NULL;
1609 rlb_clear_slave(bond, slave); 1644 rlb_clear_slave(bond, slave);
1610 } 1645 }
1611} 1646}
@@ -1669,7 +1704,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1669 swap_slave = bond->curr_active_slave; 1704 swap_slave = bond->curr_active_slave;
1670 rcu_assign_pointer(bond->curr_active_slave, new_slave); 1705 rcu_assign_pointer(bond->curr_active_slave, new_slave);
1671 1706
1672 if (!new_slave || list_empty(&bond->slave_list)) 1707 if (!new_slave || !bond_has_slaves(bond))
1673 return; 1708 return;
1674 1709
1675 /* set the new curr_active_slave to the bonds mac address 1710 /* set the new curr_active_slave to the bonds mac address
@@ -1692,6 +1727,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1692 1727
1693 ASSERT_RTNL(); 1728 ASSERT_RTNL();
1694 1729
1730 /* in TLB mode, the slave might flip down/up with the old dev_addr,
1731 * and thus filter bond->dev_addr's packets, so force bond's mac
1732 */
1733 if (bond->params.mode == BOND_MODE_TLB) {
1734 struct sockaddr sa;
1735 u8 tmp_addr[ETH_ALEN];
1736
1737 memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
1738
1739 memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
1740 sa.sa_family = bond->dev->type;
1741 /* we don't care if it can't change its mac, best effort */
1742 dev_set_mac_address(new_slave->dev, &sa);
1743
1744 memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
1745 }
1746
1695 /* curr_active_slave must be set before calling alb_swap_mac_addr */ 1747 /* curr_active_slave must be set before calling alb_swap_mac_addr */
1696 if (swap_slave) { 1748 if (swap_slave) {
1697 /* swap mac address */ 1749 /* swap mac address */
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index c5eff5dafdfe..4226044efd08 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -154,9 +154,7 @@ struct alb_bond_info {
154 u8 rx_ntt; /* flag - need to transmit 154 u8 rx_ntt; /* flag - need to transmit
155 * to all rx clients 155 * to all rx clients
156 */ 156 */
157 struct slave *next_rx_slave;/* next slave to be assigned 157 struct slave *rx_slave;/* last slave to xmit from */
158 * to a new rx client for
159 */
160 u8 primary_is_promisc; /* boolean */ 158 u8 primary_is_promisc; /* boolean */
161 u32 rlb_promisc_timeout_counter;/* counts primary 159 u32 rlb_promisc_timeout_counter;/* counts primary
162 * promiscuity time 160 * promiscuity time
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e883bfe2e727..2daa066c6cdd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -78,6 +78,7 @@
78#include <net/netns/generic.h> 78#include <net/netns/generic.h>
79#include <net/pkt_sched.h> 79#include <net/pkt_sched.h>
80#include <linux/rculist.h> 80#include <linux/rculist.h>
81#include <net/flow_keys.h>
81#include "bonding.h" 82#include "bonding.h"
82#include "bond_3ad.h" 83#include "bond_3ad.h"
83#include "bond_alb.h" 84#include "bond_alb.h"
@@ -159,7 +160,8 @@ MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on
159module_param(xmit_hash_policy, charp, 0); 160module_param(xmit_hash_policy, charp, 0);
160MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; " 161MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
161 "0 for layer 2 (default), 1 for layer 3+4, " 162 "0 for layer 2 (default), 1 for layer 3+4, "
162 "2 for layer 2+3"); 163 "2 for layer 2+3, 3 for encap layer 2+3, "
164 "4 for encap layer 3+4");
163module_param(arp_interval, int, 0); 165module_param(arp_interval, int, 0);
164MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); 166MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
165module_param_array(arp_ip_target, charp, NULL, 0); 167module_param_array(arp_ip_target, charp, NULL, 0);
@@ -217,6 +219,8 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
217{ "layer2", BOND_XMIT_POLICY_LAYER2}, 219{ "layer2", BOND_XMIT_POLICY_LAYER2},
218{ "layer3+4", BOND_XMIT_POLICY_LAYER34}, 220{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
219{ "layer2+3", BOND_XMIT_POLICY_LAYER23}, 221{ "layer2+3", BOND_XMIT_POLICY_LAYER23},
222{ "encap2+3", BOND_XMIT_POLICY_ENCAP23},
223{ "encap3+4", BOND_XMIT_POLICY_ENCAP34},
220{ NULL, -1}, 224{ NULL, -1},
221}; 225};
222 226
@@ -332,10 +336,11 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
332 __be16 proto, u16 vid) 336 __be16 proto, u16 vid)
333{ 337{
334 struct bonding *bond = netdev_priv(bond_dev); 338 struct bonding *bond = netdev_priv(bond_dev);
335 struct slave *slave; 339 struct slave *slave, *rollback_slave;
340 struct list_head *iter;
336 int res; 341 int res;
337 342
338 bond_for_each_slave(bond, slave) { 343 bond_for_each_slave(bond, slave, iter) {
339 res = vlan_vid_add(slave->dev, proto, vid); 344 res = vlan_vid_add(slave->dev, proto, vid);
340 if (res) 345 if (res)
341 goto unwind; 346 goto unwind;
@@ -344,9 +349,13 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
344 return 0; 349 return 0;
345 350
346unwind: 351unwind:
347 /* unwind from the slave that failed */ 352 /* unwind to the slave that failed */
348 bond_for_each_slave_continue_reverse(bond, slave) 353 bond_for_each_slave(bond, rollback_slave, iter) {
349 vlan_vid_del(slave->dev, proto, vid); 354 if (rollback_slave == slave)
355 break;
356
357 vlan_vid_del(rollback_slave->dev, proto, vid);
358 }
350 359
351 return res; 360 return res;
352} 361}
@@ -360,9 +369,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
360 __be16 proto, u16 vid) 369 __be16 proto, u16 vid)
361{ 370{
362 struct bonding *bond = netdev_priv(bond_dev); 371 struct bonding *bond = netdev_priv(bond_dev);
372 struct list_head *iter;
363 struct slave *slave; 373 struct slave *slave;
364 374
365 bond_for_each_slave(bond, slave) 375 bond_for_each_slave(bond, slave, iter)
366 vlan_vid_del(slave->dev, proto, vid); 376 vlan_vid_del(slave->dev, proto, vid);
367 377
368 if (bond_is_lb(bond)) 378 if (bond_is_lb(bond))
@@ -382,15 +392,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
382 */ 392 */
383static int bond_set_carrier(struct bonding *bond) 393static int bond_set_carrier(struct bonding *bond)
384{ 394{
395 struct list_head *iter;
385 struct slave *slave; 396 struct slave *slave;
386 397
387 if (list_empty(&bond->slave_list)) 398 if (!bond_has_slaves(bond))
388 goto down; 399 goto down;
389 400
390 if (bond->params.mode == BOND_MODE_8023AD) 401 if (bond->params.mode == BOND_MODE_8023AD)
391 return bond_3ad_set_carrier(bond); 402 return bond_3ad_set_carrier(bond);
392 403
393 bond_for_each_slave(bond, slave) { 404 bond_for_each_slave(bond, slave, iter) {
394 if (slave->link == BOND_LINK_UP) { 405 if (slave->link == BOND_LINK_UP) {
395 if (!netif_carrier_ok(bond->dev)) { 406 if (!netif_carrier_ok(bond->dev)) {
396 netif_carrier_on(bond->dev); 407 netif_carrier_on(bond->dev);
@@ -522,7 +533,9 @@ static int bond_check_dev_link(struct bonding *bond,
522 */ 533 */
523static int bond_set_promiscuity(struct bonding *bond, int inc) 534static int bond_set_promiscuity(struct bonding *bond, int inc)
524{ 535{
536 struct list_head *iter;
525 int err = 0; 537 int err = 0;
538
526 if (USES_PRIMARY(bond->params.mode)) { 539 if (USES_PRIMARY(bond->params.mode)) {
527 /* write lock already acquired */ 540 /* write lock already acquired */
528 if (bond->curr_active_slave) { 541 if (bond->curr_active_slave) {
@@ -532,7 +545,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
532 } else { 545 } else {
533 struct slave *slave; 546 struct slave *slave;
534 547
535 bond_for_each_slave(bond, slave) { 548 bond_for_each_slave(bond, slave, iter) {
536 err = dev_set_promiscuity(slave->dev, inc); 549 err = dev_set_promiscuity(slave->dev, inc);
537 if (err) 550 if (err)
538 return err; 551 return err;
@@ -546,7 +559,9 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
546 */ 559 */
547static int bond_set_allmulti(struct bonding *bond, int inc) 560static int bond_set_allmulti(struct bonding *bond, int inc)
548{ 561{
562 struct list_head *iter;
549 int err = 0; 563 int err = 0;
564
550 if (USES_PRIMARY(bond->params.mode)) { 565 if (USES_PRIMARY(bond->params.mode)) {
551 /* write lock already acquired */ 566 /* write lock already acquired */
552 if (bond->curr_active_slave) { 567 if (bond->curr_active_slave) {
@@ -556,7 +571,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
556 } else { 571 } else {
557 struct slave *slave; 572 struct slave *slave;
558 573
559 bond_for_each_slave(bond, slave) { 574 bond_for_each_slave(bond, slave, iter) {
560 err = dev_set_allmulti(slave->dev, inc); 575 err = dev_set_allmulti(slave->dev, inc);
561 if (err) 576 if (err)
562 return err; 577 return err;
@@ -774,43 +789,24 @@ static bool bond_should_change_active(struct bonding *bond)
774/** 789/**
775 * find_best_interface - select the best available slave to be the active one 790 * find_best_interface - select the best available slave to be the active one
776 * @bond: our bonding struct 791 * @bond: our bonding struct
777 *
778 * Warning: Caller must hold curr_slave_lock for writing.
779 */ 792 */
780static struct slave *bond_find_best_slave(struct bonding *bond) 793static struct slave *bond_find_best_slave(struct bonding *bond)
781{ 794{
782 struct slave *new_active, *old_active; 795 struct slave *slave, *bestslave = NULL;
783 struct slave *bestslave = NULL; 796 struct list_head *iter;
784 int mintime = bond->params.updelay; 797 int mintime = bond->params.updelay;
785 int i;
786
787 new_active = bond->curr_active_slave;
788 798
789 if (!new_active) { /* there were no active slaves left */ 799 if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
790 new_active = bond_first_slave(bond); 800 bond_should_change_active(bond))
791 if (!new_active) 801 return bond->primary_slave;
792 return NULL; /* still no slave, return NULL */ 802
793 } 803 bond_for_each_slave(bond, slave, iter) {
794 804 if (slave->link == BOND_LINK_UP)
795 if ((bond->primary_slave) && 805 return slave;
796 bond->primary_slave->link == BOND_LINK_UP && 806 if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
797 bond_should_change_active(bond)) { 807 slave->delay < mintime) {
798 new_active = bond->primary_slave; 808 mintime = slave->delay;
799 } 809 bestslave = slave;
800
801 /* remember where to stop iterating over the slaves */
802 old_active = new_active;
803
804 bond_for_each_slave_from(bond, new_active, i, old_active) {
805 if (new_active->link == BOND_LINK_UP) {
806 return new_active;
807 } else if (new_active->link == BOND_LINK_BACK &&
808 IS_UP(new_active->dev)) {
809 /* link up, but waiting for stabilization */
810 if (new_active->delay < mintime) {
811 mintime = new_active->delay;
812 bestslave = new_active;
813 }
814 } 810 }
815 } 811 }
816 812
@@ -971,35 +967,6 @@ void bond_select_active_slave(struct bonding *bond)
971 } 967 }
972} 968}
973 969
974/*--------------------------- slave list handling ---------------------------*/
975
976/*
977 * This function attaches the slave to the end of list.
978 *
979 * bond->lock held for writing by caller.
980 */
981static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
982{
983 list_add_tail_rcu(&new_slave->list, &bond->slave_list);
984 bond->slave_cnt++;
985}
986
987/*
988 * This function detaches the slave from the list.
989 * WARNING: no check is made to verify if the slave effectively
990 * belongs to <bond>.
991 * Nothing is freed on return, structures are just unchained.
992 * If any slave pointer in bond was pointing to <slave>,
993 * it should be changed by the calling function.
994 *
995 * bond->lock held for writing by caller.
996 */
997static void bond_detach_slave(struct bonding *bond, struct slave *slave)
998{
999 list_del_rcu(&slave->list);
1000 bond->slave_cnt--;
1001}
1002
1003#ifdef CONFIG_NET_POLL_CONTROLLER 970#ifdef CONFIG_NET_POLL_CONTROLLER
1004static inline int slave_enable_netpoll(struct slave *slave) 971static inline int slave_enable_netpoll(struct slave *slave)
1005{ 972{
@@ -1046,9 +1013,10 @@ static void bond_poll_controller(struct net_device *bond_dev)
1046static void bond_netpoll_cleanup(struct net_device *bond_dev) 1013static void bond_netpoll_cleanup(struct net_device *bond_dev)
1047{ 1014{
1048 struct bonding *bond = netdev_priv(bond_dev); 1015 struct bonding *bond = netdev_priv(bond_dev);
1016 struct list_head *iter;
1049 struct slave *slave; 1017 struct slave *slave;
1050 1018
1051 bond_for_each_slave(bond, slave) 1019 bond_for_each_slave(bond, slave, iter)
1052 if (IS_UP(slave->dev)) 1020 if (IS_UP(slave->dev))
1053 slave_disable_netpoll(slave); 1021 slave_disable_netpoll(slave);
1054} 1022}
@@ -1056,10 +1024,11 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
1056static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) 1024static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
1057{ 1025{
1058 struct bonding *bond = netdev_priv(dev); 1026 struct bonding *bond = netdev_priv(dev);
1027 struct list_head *iter;
1059 struct slave *slave; 1028 struct slave *slave;
1060 int err = 0; 1029 int err = 0;
1061 1030
1062 bond_for_each_slave(bond, slave) { 1031 bond_for_each_slave(bond, slave, iter) {
1063 err = slave_enable_netpoll(slave); 1032 err = slave_enable_netpoll(slave);
1064 if (err) { 1033 if (err) {
1065 bond_netpoll_cleanup(dev); 1034 bond_netpoll_cleanup(dev);
@@ -1087,10 +1056,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
1087 netdev_features_t features) 1056 netdev_features_t features)
1088{ 1057{
1089 struct bonding *bond = netdev_priv(dev); 1058 struct bonding *bond = netdev_priv(dev);
1059 struct list_head *iter;
1090 netdev_features_t mask; 1060 netdev_features_t mask;
1091 struct slave *slave; 1061 struct slave *slave;
1092 1062
1093 if (list_empty(&bond->slave_list)) { 1063 if (!bond_has_slaves(bond)) {
1094 /* Disable adding VLANs to empty bond. But why? --mq */ 1064 /* Disable adding VLANs to empty bond. But why? --mq */
1095 features |= NETIF_F_VLAN_CHALLENGED; 1065 features |= NETIF_F_VLAN_CHALLENGED;
1096 return features; 1066 return features;
@@ -1100,7 +1070,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
1100 features &= ~NETIF_F_ONE_FOR_ALL; 1070 features &= ~NETIF_F_ONE_FOR_ALL;
1101 features |= NETIF_F_ALL_FOR_ALL; 1071 features |= NETIF_F_ALL_FOR_ALL;
1102 1072
1103 bond_for_each_slave(bond, slave) { 1073 bond_for_each_slave(bond, slave, iter) {
1104 features = netdev_increment_features(features, 1074 features = netdev_increment_features(features,
1105 slave->dev->features, 1075 slave->dev->features,
1106 mask); 1076 mask);
@@ -1118,16 +1088,17 @@ static void bond_compute_features(struct bonding *bond)
1118{ 1088{
1119 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; 1089 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
1120 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1090 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1091 struct net_device *bond_dev = bond->dev;
1092 struct list_head *iter;
1093 struct slave *slave;
1121 unsigned short max_hard_header_len = ETH_HLEN; 1094 unsigned short max_hard_header_len = ETH_HLEN;
1122 unsigned int gso_max_size = GSO_MAX_SIZE; 1095 unsigned int gso_max_size = GSO_MAX_SIZE;
1123 struct net_device *bond_dev = bond->dev;
1124 u16 gso_max_segs = GSO_MAX_SEGS; 1096 u16 gso_max_segs = GSO_MAX_SEGS;
1125 struct slave *slave;
1126 1097
1127 if (list_empty(&bond->slave_list)) 1098 if (!bond_has_slaves(bond))
1128 goto done; 1099 goto done;
1129 1100
1130 bond_for_each_slave(bond, slave) { 1101 bond_for_each_slave(bond, slave, iter) {
1131 vlan_features = netdev_increment_features(vlan_features, 1102 vlan_features = netdev_increment_features(vlan_features,
1132 slave->dev->vlan_features, BOND_VLAN_FEATURES); 1103 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1133 1104
@@ -1233,11 +1204,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1233} 1204}
1234 1205
1235static int bond_master_upper_dev_link(struct net_device *bond_dev, 1206static int bond_master_upper_dev_link(struct net_device *bond_dev,
1236 struct net_device *slave_dev) 1207 struct net_device *slave_dev,
1208 struct slave *slave)
1237{ 1209{
1238 int err; 1210 int err;
1239 1211
1240 err = netdev_master_upper_dev_link(slave_dev, bond_dev); 1212 err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
1241 if (err) 1213 if (err)
1242 return err; 1214 return err;
1243 slave_dev->flags |= IFF_SLAVE; 1215 slave_dev->flags |= IFF_SLAVE;
@@ -1258,7 +1230,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1258{ 1230{
1259 struct bonding *bond = netdev_priv(bond_dev); 1231 struct bonding *bond = netdev_priv(bond_dev);
1260 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 1232 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1261 struct slave *new_slave = NULL; 1233 struct slave *new_slave = NULL, *prev_slave;
1262 struct sockaddr addr; 1234 struct sockaddr addr;
1263 int link_reporting; 1235 int link_reporting;
1264 int res = 0, i; 1236 int res = 0, i;
@@ -1313,7 +1285,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1313 * bond ether type mutual exclusion - don't allow slaves of dissimilar 1285 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1314 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond 1286 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1315 */ 1287 */
1316 if (list_empty(&bond->slave_list)) { 1288 if (!bond_has_slaves(bond)) {
1317 if (bond_dev->type != slave_dev->type) { 1289 if (bond_dev->type != slave_dev->type) {
1318 pr_debug("%s: change device type from %d to %d\n", 1290 pr_debug("%s: change device type from %d to %d\n",
1319 bond_dev->name, 1291 bond_dev->name,
@@ -1352,7 +1324,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1352 } 1324 }
1353 1325
1354 if (slave_ops->ndo_set_mac_address == NULL) { 1326 if (slave_ops->ndo_set_mac_address == NULL) {
1355 if (list_empty(&bond->slave_list)) { 1327 if (!bond_has_slaves(bond)) {
1356 pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.", 1328 pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
1357 bond_dev->name); 1329 bond_dev->name);
1358 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1330 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1368,7 +1340,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1368 1340
1369 /* If this is the first slave, then we need to set the master's hardware 1341 /* If this is the first slave, then we need to set the master's hardware
1370 * address to be the same as the slave's. */ 1342 * address to be the same as the slave's. */
1371 if (list_empty(&bond->slave_list) && 1343 if (!bond_has_slaves(bond) &&
1372 bond->dev->addr_assign_type == NET_ADDR_RANDOM) 1344 bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1373 bond_set_dev_addr(bond->dev, slave_dev); 1345 bond_set_dev_addr(bond->dev, slave_dev);
1374 1346
@@ -1377,7 +1349,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1377 res = -ENOMEM; 1349 res = -ENOMEM;
1378 goto err_undo_flags; 1350 goto err_undo_flags;
1379 } 1351 }
1380 INIT_LIST_HEAD(&new_slave->list);
1381 /* 1352 /*
1382 * Set the new_slave's queue_id to be zero. Queue ID mapping 1353 * Set the new_slave's queue_id to be zero. Queue ID mapping
1383 * is set via sysfs or module option if desired. 1354 * is set via sysfs or module option if desired.
@@ -1413,17 +1384,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1413 } 1384 }
1414 } 1385 }
1415 1386
1416 res = bond_master_upper_dev_link(bond_dev, slave_dev);
1417 if (res) {
1418 pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
1419 goto err_restore_mac;
1420 }
1421
1422 /* open the slave since the application closed it */ 1387 /* open the slave since the application closed it */
1423 res = dev_open(slave_dev); 1388 res = dev_open(slave_dev);
1424 if (res) { 1389 if (res) {
1425 pr_debug("Opening slave %s failed\n", slave_dev->name); 1390 pr_debug("Opening slave %s failed\n", slave_dev->name);
1426 goto err_unset_master; 1391 goto err_restore_mac;
1427 } 1392 }
1428 1393
1429 new_slave->bond = bond; 1394 new_slave->bond = bond;
@@ -1479,21 +1444,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1479 goto err_close; 1444 goto err_close;
1480 } 1445 }
1481 1446
1482 write_lock_bh(&bond->lock); 1447 prev_slave = bond_last_slave(bond);
1483
1484 bond_attach_slave(bond, new_slave);
1485 1448
1486 new_slave->delay = 0; 1449 new_slave->delay = 0;
1487 new_slave->link_failure_count = 0; 1450 new_slave->link_failure_count = 0;
1488 1451
1489 write_unlock_bh(&bond->lock);
1490
1491 bond_compute_features(bond);
1492
1493 bond_update_speed_duplex(new_slave); 1452 bond_update_speed_duplex(new_slave);
1494 1453
1495 read_lock(&bond->lock);
1496
1497 new_slave->last_arp_rx = jiffies - 1454 new_slave->last_arp_rx = jiffies -
1498 (msecs_to_jiffies(bond->params.arp_interval) + 1); 1455 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1499 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) 1456 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
@@ -1554,12 +1511,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1554 } 1511 }
1555 } 1512 }
1556 1513
1557 write_lock_bh(&bond->curr_slave_lock);
1558
1559 switch (bond->params.mode) { 1514 switch (bond->params.mode) {
1560 case BOND_MODE_ACTIVEBACKUP: 1515 case BOND_MODE_ACTIVEBACKUP:
1561 bond_set_slave_inactive_flags(new_slave); 1516 bond_set_slave_inactive_flags(new_slave);
1562 bond_select_active_slave(bond);
1563 break; 1517 break;
1564 case BOND_MODE_8023AD: 1518 case BOND_MODE_8023AD:
1565 /* in 802.3ad mode, the internal mechanism 1519 /* in 802.3ad mode, the internal mechanism
@@ -1568,16 +1522,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1568 */ 1522 */
1569 bond_set_slave_inactive_flags(new_slave); 1523 bond_set_slave_inactive_flags(new_slave);
1570 /* if this is the first slave */ 1524 /* if this is the first slave */
1571 if (bond_first_slave(bond) == new_slave) { 1525 if (!prev_slave) {
1572 SLAVE_AD_INFO(new_slave).id = 1; 1526 SLAVE_AD_INFO(new_slave).id = 1;
1573 /* Initialize AD with the number of times that the AD timer is called in 1 second 1527 /* Initialize AD with the number of times that the AD timer is called in 1 second
1574 * can be called only after the mac address of the bond is set 1528 * can be called only after the mac address of the bond is set
1575 */ 1529 */
1576 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL); 1530 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
1577 } else { 1531 } else {
1578 struct slave *prev_slave;
1579
1580 prev_slave = bond_prev_slave(bond, new_slave);
1581 SLAVE_AD_INFO(new_slave).id = 1532 SLAVE_AD_INFO(new_slave).id =
1582 SLAVE_AD_INFO(prev_slave).id + 1; 1533 SLAVE_AD_INFO(prev_slave).id + 1;
1583 } 1534 }
@@ -1588,7 +1539,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1588 case BOND_MODE_ALB: 1539 case BOND_MODE_ALB:
1589 bond_set_active_slave(new_slave); 1540 bond_set_active_slave(new_slave);
1590 bond_set_slave_inactive_flags(new_slave); 1541 bond_set_slave_inactive_flags(new_slave);
1591 bond_select_active_slave(bond);
1592 break; 1542 break;
1593 default: 1543 default:
1594 pr_debug("This slave is always active in trunk mode\n"); 1544 pr_debug("This slave is always active in trunk mode\n");
@@ -1606,10 +1556,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1606 break; 1556 break;
1607 } /* switch(bond_mode) */ 1557 } /* switch(bond_mode) */
1608 1558
1609 write_unlock_bh(&bond->curr_slave_lock);
1610
1611 bond_set_carrier(bond);
1612
1613#ifdef CONFIG_NET_POLL_CONTROLLER 1559#ifdef CONFIG_NET_POLL_CONTROLLER
1614 slave_dev->npinfo = bond->dev->npinfo; 1560 slave_dev->npinfo = bond->dev->npinfo;
1615 if (slave_dev->npinfo) { 1561 if (slave_dev->npinfo) {
@@ -1624,17 +1570,29 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1624 } 1570 }
1625#endif 1571#endif
1626 1572
1627 read_unlock(&bond->lock);
1628
1629 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1630 if (res)
1631 goto err_detach;
1632
1633 res = netdev_rx_handler_register(slave_dev, bond_handle_frame, 1573 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
1634 new_slave); 1574 new_slave);
1635 if (res) { 1575 if (res) {
1636 pr_debug("Error %d calling netdev_rx_handler_register\n", res); 1576 pr_debug("Error %d calling netdev_rx_handler_register\n", res);
1637 goto err_dest_symlinks; 1577 goto err_detach;
1578 }
1579
1580 res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
1581 if (res) {
1582 pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
1583 goto err_unregister;
1584 }
1585
1586 bond->slave_cnt++;
1587 bond_compute_features(bond);
1588 bond_set_carrier(bond);
1589
1590 if (USES_PRIMARY(bond->params.mode)) {
1591 read_lock(&bond->lock);
1592 write_lock_bh(&bond->curr_slave_lock);
1593 bond_select_active_slave(bond);
1594 write_unlock_bh(&bond->curr_slave_lock);
1595 read_unlock(&bond->lock);
1638 } 1596 }
1639 1597
1640 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1598 pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1646,8 +1604,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1646 return 0; 1604 return 0;
1647 1605
1648/* Undo stages on error */ 1606/* Undo stages on error */
1649err_dest_symlinks: 1607err_unregister:
1650 bond_destroy_slave_symlinks(bond_dev, slave_dev); 1608 netdev_rx_handler_unregister(slave_dev);
1651 1609
1652err_detach: 1610err_detach:
1653 if (!USES_PRIMARY(bond->params.mode)) 1611 if (!USES_PRIMARY(bond->params.mode))
@@ -1655,7 +1613,6 @@ err_detach:
1655 1613
1656 vlan_vids_del_by_dev(slave_dev, bond_dev); 1614 vlan_vids_del_by_dev(slave_dev, bond_dev);
1657 write_lock_bh(&bond->lock); 1615 write_lock_bh(&bond->lock);
1658 bond_detach_slave(bond, new_slave);
1659 if (bond->primary_slave == new_slave) 1616 if (bond->primary_slave == new_slave)
1660 bond->primary_slave = NULL; 1617 bond->primary_slave = NULL;
1661 if (bond->curr_active_slave == new_slave) { 1618 if (bond->curr_active_slave == new_slave) {
@@ -1675,9 +1632,6 @@ err_close:
1675 slave_dev->priv_flags &= ~IFF_BONDING; 1632 slave_dev->priv_flags &= ~IFF_BONDING;
1676 dev_close(slave_dev); 1633 dev_close(slave_dev);
1677 1634
1678err_unset_master:
1679 bond_upper_dev_unlink(bond_dev, slave_dev);
1680
1681err_restore_mac: 1635err_restore_mac:
1682 if (!bond->params.fail_over_mac) { 1636 if (!bond->params.fail_over_mac) {
1683 /* XXX TODO - fom follow mode needs to change master's 1637 /* XXX TODO - fom follow mode needs to change master's
@@ -1696,9 +1650,8 @@ err_free:
1696 kfree(new_slave); 1650 kfree(new_slave);
1697 1651
1698err_undo_flags: 1652err_undo_flags:
1699 bond_compute_features(bond);
1700 /* Enslave of first slave has failed and we need to fix master's mac */ 1653 /* Enslave of first slave has failed and we need to fix master's mac */
1701 if (list_empty(&bond->slave_list) && 1654 if (!bond_has_slaves(bond) &&
1702 ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr)) 1655 ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
1703 eth_hw_addr_random(bond_dev); 1656 eth_hw_addr_random(bond_dev);
1704 1657
@@ -1749,6 +1702,11 @@ static int __bond_release_one(struct net_device *bond_dev,
1749 } 1702 }
1750 1703
1751 write_unlock_bh(&bond->lock); 1704 write_unlock_bh(&bond->lock);
1705
1706 /* release the slave from its bond */
1707 bond->slave_cnt--;
1708
1709 bond_upper_dev_unlink(bond_dev, slave_dev);
1752 /* unregister rx_handler early so bond_handle_frame wouldn't be called 1710 /* unregister rx_handler early so bond_handle_frame wouldn't be called
1753 * for this slave anymore. 1711 * for this slave anymore.
1754 */ 1712 */
@@ -1772,12 +1730,9 @@ static int __bond_release_one(struct net_device *bond_dev,
1772 1730
1773 bond->current_arp_slave = NULL; 1731 bond->current_arp_slave = NULL;
1774 1732
1775 /* release the slave from its bond */
1776 bond_detach_slave(bond, slave);
1777
1778 if (!all && !bond->params.fail_over_mac) { 1733 if (!all && !bond->params.fail_over_mac) {
1779 if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) && 1734 if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
1780 !list_empty(&bond->slave_list)) 1735 bond_has_slaves(bond))
1781 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", 1736 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
1782 bond_dev->name, slave_dev->name, 1737 bond_dev->name, slave_dev->name,
1783 slave->perm_hwaddr, 1738 slave->perm_hwaddr,
@@ -1820,7 +1775,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1820 write_lock_bh(&bond->lock); 1775 write_lock_bh(&bond->lock);
1821 } 1776 }
1822 1777
1823 if (list_empty(&bond->slave_list)) { 1778 if (!bond_has_slaves(bond)) {
1824 bond_set_carrier(bond); 1779 bond_set_carrier(bond);
1825 eth_hw_addr_random(bond_dev); 1780 eth_hw_addr_random(bond_dev);
1826 1781
@@ -1836,7 +1791,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1836 unblock_netpoll_tx(); 1791 unblock_netpoll_tx();
1837 synchronize_rcu(); 1792 synchronize_rcu();
1838 1793
1839 if (list_empty(&bond->slave_list)) { 1794 if (!bond_has_slaves(bond)) {
1840 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 1795 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
1841 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); 1796 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
1842 } 1797 }
@@ -1848,8 +1803,6 @@ static int __bond_release_one(struct net_device *bond_dev,
1848 bond_dev->name, slave_dev->name, bond_dev->name); 1803 bond_dev->name, slave_dev->name, bond_dev->name);
1849 1804
1850 /* must do this from outside any spinlocks */ 1805 /* must do this from outside any spinlocks */
1851 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1852
1853 vlan_vids_del_by_dev(slave_dev, bond_dev); 1806 vlan_vids_del_by_dev(slave_dev, bond_dev);
1854 1807
1855 /* If the mode USES_PRIMARY, then this cases was handled above by 1808 /* If the mode USES_PRIMARY, then this cases was handled above by
@@ -1873,8 +1826,6 @@ static int __bond_release_one(struct net_device *bond_dev,
1873 bond_hw_addr_flush(bond_dev, slave_dev); 1826 bond_hw_addr_flush(bond_dev, slave_dev);
1874 } 1827 }
1875 1828
1876 bond_upper_dev_unlink(bond_dev, slave_dev);
1877
1878 slave_disable_netpoll(slave); 1829 slave_disable_netpoll(slave);
1879 1830
1880 /* close slave before restoring its mac address */ 1831 /* close slave before restoring its mac address */
@@ -1913,7 +1864,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1913 int ret; 1864 int ret;
1914 1865
1915 ret = bond_release(bond_dev, slave_dev); 1866 ret = bond_release(bond_dev, slave_dev);
1916 if (ret == 0 && list_empty(&bond->slave_list)) { 1867 if (ret == 0 && !bond_has_slaves(bond)) {
1917 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1868 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1918 pr_info("%s: destroying bond %s.\n", 1869 pr_info("%s: destroying bond %s.\n",
1919 bond_dev->name, bond_dev->name); 1870 bond_dev->name, bond_dev->name);
@@ -1922,61 +1873,6 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1922 return ret; 1873 return ret;
1923} 1874}
1924 1875
1925/*
1926 * This function changes the active slave to slave <slave_dev>.
1927 * It returns -EINVAL in the following cases.
1928 * - <slave_dev> is not found in the list.
1929 * - There is not active slave now.
1930 * - <slave_dev> is already active.
1931 * - The link state of <slave_dev> is not BOND_LINK_UP.
1932 * - <slave_dev> is not running.
1933 * In these cases, this function does nothing.
1934 * In the other cases, current_slave pointer is changed and 0 is returned.
1935 */
1936static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
1937{
1938 struct bonding *bond = netdev_priv(bond_dev);
1939 struct slave *old_active = NULL;
1940 struct slave *new_active = NULL;
1941 int res = 0;
1942
1943 if (!USES_PRIMARY(bond->params.mode))
1944 return -EINVAL;
1945
1946 /* Verify that bond_dev is indeed the master of slave_dev */
1947 if (!(slave_dev->flags & IFF_SLAVE) ||
1948 !netdev_has_upper_dev(slave_dev, bond_dev))
1949 return -EINVAL;
1950
1951 read_lock(&bond->lock);
1952
1953 old_active = bond->curr_active_slave;
1954 new_active = bond_get_slave_by_dev(bond, slave_dev);
1955 /*
1956 * Changing to the current active: do nothing; return success.
1957 */
1958 if (new_active && new_active == old_active) {
1959 read_unlock(&bond->lock);
1960 return 0;
1961 }
1962
1963 if (new_active &&
1964 old_active &&
1965 new_active->link == BOND_LINK_UP &&
1966 IS_UP(new_active->dev)) {
1967 block_netpoll_tx();
1968 write_lock_bh(&bond->curr_slave_lock);
1969 bond_change_active_slave(bond, new_active);
1970 write_unlock_bh(&bond->curr_slave_lock);
1971 unblock_netpoll_tx();
1972 } else
1973 res = -EINVAL;
1974
1975 read_unlock(&bond->lock);
1976
1977 return res;
1978}
1979
1980static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) 1876static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
1981{ 1877{
1982 struct bonding *bond = netdev_priv(bond_dev); 1878 struct bonding *bond = netdev_priv(bond_dev);
@@ -1994,11 +1890,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
1994static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) 1890static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
1995{ 1891{
1996 struct bonding *bond = netdev_priv(bond_dev); 1892 struct bonding *bond = netdev_priv(bond_dev);
1893 struct list_head *iter;
1997 int i = 0, res = -ENODEV; 1894 int i = 0, res = -ENODEV;
1998 struct slave *slave; 1895 struct slave *slave;
1999 1896
2000 read_lock(&bond->lock); 1897 read_lock(&bond->lock);
2001 bond_for_each_slave(bond, slave) { 1898 bond_for_each_slave(bond, slave, iter) {
2002 if (i++ == (int)info->slave_id) { 1899 if (i++ == (int)info->slave_id) {
2003 res = 0; 1900 res = 0;
2004 strcpy(info->slave_name, slave->dev->name); 1901 strcpy(info->slave_name, slave->dev->name);
@@ -2019,12 +1916,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2019static int bond_miimon_inspect(struct bonding *bond) 1916static int bond_miimon_inspect(struct bonding *bond)
2020{ 1917{
2021 int link_state, commit = 0; 1918 int link_state, commit = 0;
1919 struct list_head *iter;
2022 struct slave *slave; 1920 struct slave *slave;
2023 bool ignore_updelay; 1921 bool ignore_updelay;
2024 1922
2025 ignore_updelay = !bond->curr_active_slave ? true : false; 1923 ignore_updelay = !bond->curr_active_slave ? true : false;
2026 1924
2027 bond_for_each_slave(bond, slave) { 1925 bond_for_each_slave(bond, slave, iter) {
2028 slave->new_link = BOND_LINK_NOCHANGE; 1926 slave->new_link = BOND_LINK_NOCHANGE;
2029 1927
2030 link_state = bond_check_dev_link(bond, slave->dev, 0); 1928 link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2118,9 +2016,10 @@ static int bond_miimon_inspect(struct bonding *bond)
2118 2016
2119static void bond_miimon_commit(struct bonding *bond) 2017static void bond_miimon_commit(struct bonding *bond)
2120{ 2018{
2019 struct list_head *iter;
2121 struct slave *slave; 2020 struct slave *slave;
2122 2021
2123 bond_for_each_slave(bond, slave) { 2022 bond_for_each_slave(bond, slave, iter) {
2124 switch (slave->new_link) { 2023 switch (slave->new_link) {
2125 case BOND_LINK_NOCHANGE: 2024 case BOND_LINK_NOCHANGE:
2126 continue; 2025 continue;
@@ -2225,7 +2124,7 @@ void bond_mii_monitor(struct work_struct *work)
2225 2124
2226 delay = msecs_to_jiffies(bond->params.miimon); 2125 delay = msecs_to_jiffies(bond->params.miimon);
2227 2126
2228 if (list_empty(&bond->slave_list)) 2127 if (!bond_has_slaves(bond))
2229 goto re_arm; 2128 goto re_arm;
2230 2129
2231 should_notify_peers = bond_should_notify_peers(bond); 2130 should_notify_peers = bond_should_notify_peers(bond);
@@ -2274,7 +2173,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2274 return true; 2173 return true;
2275 2174
2276 rcu_read_lock(); 2175 rcu_read_lock();
2277 netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) { 2176 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2278 if (ip == bond_confirm_addr(upper, 0, ip)) { 2177 if (ip == bond_confirm_addr(upper, 0, ip)) {
2279 ret = true; 2178 ret = true;
2280 break; 2179 break;
@@ -2349,10 +2248,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2349 * 2248 *
2350 * TODO: QinQ? 2249 * TODO: QinQ?
2351 */ 2250 */
2352 netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) { 2251 netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
2252 vlan_iter) {
2353 if (!is_vlan_dev(vlan_upper)) 2253 if (!is_vlan_dev(vlan_upper))
2354 continue; 2254 continue;
2355 netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) { 2255 netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
2256 iter) {
2356 if (upper == rt->dst.dev) { 2257 if (upper == rt->dst.dev) {
2357 vlan_id = vlan_dev_vlan_id(vlan_upper); 2258 vlan_id = vlan_dev_vlan_id(vlan_upper);
2358 rcu_read_unlock(); 2259 rcu_read_unlock();
@@ -2365,7 +2266,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2365 * our upper vlans, then just search for any dev that 2266 * our upper vlans, then just search for any dev that
2366 * matches, and in case it's a vlan - save the id 2267 * matches, and in case it's a vlan - save the id
2367 */ 2268 */
2368 netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) { 2269 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2369 if (upper == rt->dst.dev) { 2270 if (upper == rt->dst.dev) {
2370 /* if it's a vlan - get its VID */ 2271 /* if it's a vlan - get its VID */
2371 if (is_vlan_dev(upper)) 2272 if (is_vlan_dev(upper))
@@ -2512,11 +2413,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2512 struct bonding *bond = container_of(work, struct bonding, 2413 struct bonding *bond = container_of(work, struct bonding,
2513 arp_work.work); 2414 arp_work.work);
2514 struct slave *slave, *oldcurrent; 2415 struct slave *slave, *oldcurrent;
2416 struct list_head *iter;
2515 int do_failover = 0; 2417 int do_failover = 0;
2516 2418
2517 read_lock(&bond->lock); 2419 read_lock(&bond->lock);
2518 2420
2519 if (list_empty(&bond->slave_list)) 2421 if (!bond_has_slaves(bond))
2520 goto re_arm; 2422 goto re_arm;
2521 2423
2522 oldcurrent = bond->curr_active_slave; 2424 oldcurrent = bond->curr_active_slave;
@@ -2528,7 +2430,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2528 * TODO: what about up/down delay in arp mode? it wasn't here before 2430 * TODO: what about up/down delay in arp mode? it wasn't here before
2529 * so it can wait 2431 * so it can wait
2530 */ 2432 */
2531 bond_for_each_slave(bond, slave) { 2433 bond_for_each_slave(bond, slave, iter) {
2532 unsigned long trans_start = dev_trans_start(slave->dev); 2434 unsigned long trans_start = dev_trans_start(slave->dev);
2533 2435
2534 if (slave->link != BOND_LINK_UP) { 2436 if (slave->link != BOND_LINK_UP) {
@@ -2619,10 +2521,11 @@ re_arm:
2619static int bond_ab_arp_inspect(struct bonding *bond) 2521static int bond_ab_arp_inspect(struct bonding *bond)
2620{ 2522{
2621 unsigned long trans_start, last_rx; 2523 unsigned long trans_start, last_rx;
2524 struct list_head *iter;
2622 struct slave *slave; 2525 struct slave *slave;
2623 int commit = 0; 2526 int commit = 0;
2624 2527
2625 bond_for_each_slave(bond, slave) { 2528 bond_for_each_slave(bond, slave, iter) {
2626 slave->new_link = BOND_LINK_NOCHANGE; 2529 slave->new_link = BOND_LINK_NOCHANGE;
2627 last_rx = slave_last_rx(bond, slave); 2530 last_rx = slave_last_rx(bond, slave);
2628 2531
@@ -2689,9 +2592,10 @@ static int bond_ab_arp_inspect(struct bonding *bond)
2689static void bond_ab_arp_commit(struct bonding *bond) 2592static void bond_ab_arp_commit(struct bonding *bond)
2690{ 2593{
2691 unsigned long trans_start; 2594 unsigned long trans_start;
2595 struct list_head *iter;
2692 struct slave *slave; 2596 struct slave *slave;
2693 2597
2694 bond_for_each_slave(bond, slave) { 2598 bond_for_each_slave(bond, slave, iter) {
2695 switch (slave->new_link) { 2599 switch (slave->new_link) {
2696 case BOND_LINK_NOCHANGE: 2600 case BOND_LINK_NOCHANGE:
2697 continue; 2601 continue;
@@ -2762,8 +2666,9 @@ do_failover:
2762 */ 2666 */
2763static void bond_ab_arp_probe(struct bonding *bond) 2667static void bond_ab_arp_probe(struct bonding *bond)
2764{ 2668{
2765 struct slave *slave, *next_slave; 2669 struct slave *slave, *before = NULL, *new_slave = NULL;
2766 int i; 2670 struct list_head *iter;
2671 bool found = false;
2767 2672
2768 read_lock(&bond->curr_slave_lock); 2673 read_lock(&bond->curr_slave_lock);
2769 2674
@@ -2793,18 +2698,12 @@ static void bond_ab_arp_probe(struct bonding *bond)
2793 2698
2794 bond_set_slave_inactive_flags(bond->current_arp_slave); 2699 bond_set_slave_inactive_flags(bond->current_arp_slave);
2795 2700
2796 /* search for next candidate */ 2701 bond_for_each_slave(bond, slave, iter) {
2797 next_slave = bond_next_slave(bond, bond->current_arp_slave); 2702 if (!found && !before && IS_UP(slave->dev))
2798 bond_for_each_slave_from(bond, slave, i, next_slave) { 2703 before = slave;
2799 if (IS_UP(slave->dev)) {
2800 slave->link = BOND_LINK_BACK;
2801 bond_set_slave_active_flags(slave);
2802 bond_arp_send_all(bond, slave);
2803 slave->jiffies = jiffies;
2804 bond->current_arp_slave = slave;
2805 break;
2806 }
2807 2704
2705 if (found && !new_slave && IS_UP(slave->dev))
2706 new_slave = slave;
2808 /* if the link state is up at this point, we 2707 /* if the link state is up at this point, we
2809 * mark it down - this can happen if we have 2708 * mark it down - this can happen if we have
2810 * simultaneous link failures and 2709 * simultaneous link failures and
@@ -2812,7 +2711,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
2812 * one the current slave so it is still marked 2711 * one the current slave so it is still marked
2813 * up when it is actually down 2712 * up when it is actually down
2814 */ 2713 */
2815 if (slave->link == BOND_LINK_UP) { 2714 if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
2816 slave->link = BOND_LINK_DOWN; 2715 slave->link = BOND_LINK_DOWN;
2817 if (slave->link_failure_count < UINT_MAX) 2716 if (slave->link_failure_count < UINT_MAX)
2818 slave->link_failure_count++; 2717 slave->link_failure_count++;
@@ -2822,7 +2721,22 @@ static void bond_ab_arp_probe(struct bonding *bond)
2822 pr_info("%s: backup interface %s is now down.\n", 2721 pr_info("%s: backup interface %s is now down.\n",
2823 bond->dev->name, slave->dev->name); 2722 bond->dev->name, slave->dev->name);
2824 } 2723 }
2724 if (slave == bond->current_arp_slave)
2725 found = true;
2825 } 2726 }
2727
2728 if (!new_slave && before)
2729 new_slave = before;
2730
2731 if (!new_slave)
2732 return;
2733
2734 new_slave->link = BOND_LINK_BACK;
2735 bond_set_slave_active_flags(new_slave);
2736 bond_arp_send_all(bond, new_slave);
2737 new_slave->jiffies = jiffies;
2738 bond->current_arp_slave = new_slave;
2739
2826} 2740}
2827 2741
2828void bond_activebackup_arp_mon(struct work_struct *work) 2742void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2836,7 +2750,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
2836 2750
2837 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 2751 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2838 2752
2839 if (list_empty(&bond->slave_list)) 2753 if (!bond_has_slaves(bond))
2840 goto re_arm; 2754 goto re_arm;
2841 2755
2842 should_notify_peers = bond_should_notify_peers(bond); 2756 should_notify_peers = bond_should_notify_peers(bond);
@@ -3033,99 +2947,85 @@ static struct notifier_block bond_netdev_notifier = {
3033 2947
3034/*---------------------------- Hashing Policies -----------------------------*/ 2948/*---------------------------- Hashing Policies -----------------------------*/
3035 2949
3036/* 2950/* L2 hash helper */
3037 * Hash for the output device based upon layer 2 data 2951static inline u32 bond_eth_hash(struct sk_buff *skb)
3038 */
3039static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3040{ 2952{
3041 struct ethhdr *data = (struct ethhdr *)skb->data; 2953 struct ethhdr *data = (struct ethhdr *)skb->data;
3042 2954
3043 if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto)) 2955 if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
3044 return (data->h_dest[5] ^ data->h_source[5]) % count; 2956 return data->h_dest[5] ^ data->h_source[5];
3045 2957
3046 return 0; 2958 return 0;
3047} 2959}
3048 2960
3049/* 2961/* Extract the appropriate headers based on bond's xmit policy */
3050 * Hash for the output device based upon layer 2 and layer 3 data. If 2962static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
3051 * the packet is not IP, fall back on bond_xmit_hash_policy_l2() 2963 struct flow_keys *fk)
3052 */
3053static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3054{ 2964{
3055 const struct ethhdr *data; 2965 const struct ipv6hdr *iph6;
3056 const struct iphdr *iph; 2966 const struct iphdr *iph;
3057 const struct ipv6hdr *ipv6h; 2967 int noff, proto = -1;
3058 u32 v6hash;
3059 const __be32 *s, *d;
3060 2968
3061 if (skb->protocol == htons(ETH_P_IP) && 2969 if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
3062 pskb_network_may_pull(skb, sizeof(*iph))) { 2970 return skb_flow_dissect(skb, fk);
2971
2972 fk->ports = 0;
2973 noff = skb_network_offset(skb);
2974 if (skb->protocol == htons(ETH_P_IP)) {
2975 if (!pskb_may_pull(skb, noff + sizeof(*iph)))
2976 return false;
3063 iph = ip_hdr(skb); 2977 iph = ip_hdr(skb);
3064 data = (struct ethhdr *)skb->data; 2978 fk->src = iph->saddr;
3065 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 2979 fk->dst = iph->daddr;
3066 (data->h_dest[5] ^ data->h_source[5])) % count; 2980 noff += iph->ihl << 2;
3067 } else if (skb->protocol == htons(ETH_P_IPV6) && 2981 if (!ip_is_fragment(iph))
3068 pskb_network_may_pull(skb, sizeof(*ipv6h))) { 2982 proto = iph->protocol;
3069 ipv6h = ipv6_hdr(skb); 2983 } else if (skb->protocol == htons(ETH_P_IPV6)) {
3070 data = (struct ethhdr *)skb->data; 2984 if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
3071 s = &ipv6h->saddr.s6_addr32[0]; 2985 return false;
3072 d = &ipv6h->daddr.s6_addr32[0]; 2986 iph6 = ipv6_hdr(skb);
3073 v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); 2987 fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
3074 v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8); 2988 fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
3075 return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count; 2989 noff += sizeof(*iph6);
3076 } 2990 proto = iph6->nexthdr;
3077 2991 } else {
3078 return bond_xmit_hash_policy_l2(skb, count); 2992 return false;
2993 }
2994 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
2995 fk->ports = skb_flow_get_ports(skb, noff, proto);
2996
2997 return true;
3079} 2998}
3080 2999
3081/* 3000/**
3082 * Hash for the output device based upon layer 3 and layer 4 data. If 3001 * bond_xmit_hash - generate a hash value based on the xmit policy
3083 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3002 * @bond: bonding device
3084 * altogether not IP, fall back on bond_xmit_hash_policy_l2() 3003 * @skb: buffer to use for headers
3004 * @count: modulo value
3005 *
3006 * This function will extract the necessary headers from the skb buffer and use
3007 * them to generate a hash based on the xmit_policy set in the bonding device
3008 * which will be reduced modulo count before returning.
3085 */ 3009 */
3086static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) 3010int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
3087{ 3011{
3088 u32 layer4_xor = 0; 3012 struct flow_keys flow;
3089 const struct iphdr *iph; 3013 u32 hash;
3090 const struct ipv6hdr *ipv6h;
3091 const __be32 *s, *d;
3092 const __be16 *l4 = NULL;
3093 __be16 _l4[2];
3094 int noff = skb_network_offset(skb);
3095 int poff;
3096
3097 if (skb->protocol == htons(ETH_P_IP) &&
3098 pskb_may_pull(skb, noff + sizeof(*iph))) {
3099 iph = ip_hdr(skb);
3100 poff = proto_ports_offset(iph->protocol);
3101 3014
3102 if (!ip_is_fragment(iph) && poff >= 0) { 3015 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
3103 l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, 3016 !bond_flow_dissect(bond, skb, &flow))
3104 sizeof(_l4), &_l4); 3017 return bond_eth_hash(skb) % count;
3105 if (l4) 3018
3106 layer4_xor = ntohs(l4[0] ^ l4[1]); 3019 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
3107 } 3020 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
3108 return (layer4_xor ^ 3021 hash = bond_eth_hash(skb);
3109 ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; 3022 else
3110 } else if (skb->protocol == htons(ETH_P_IPV6) && 3023 hash = (__force u32)flow.ports;
3111 pskb_may_pull(skb, noff + sizeof(*ipv6h))) { 3024 hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
3112 ipv6h = ipv6_hdr(skb); 3025 hash ^= (hash >> 16);
3113 poff = proto_ports_offset(ipv6h->nexthdr); 3026 hash ^= (hash >> 8);
3114 if (poff >= 0) {
3115 l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
3116 sizeof(_l4), &_l4);
3117 if (l4)
3118 layer4_xor = ntohs(l4[0] ^ l4[1]);
3119 }
3120 s = &ipv6h->saddr.s6_addr32[0];
3121 d = &ipv6h->daddr.s6_addr32[0];
3122 layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
3123 layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
3124 (layer4_xor >> 8);
3125 return layer4_xor % count;
3126 }
3127 3027
3128 return bond_xmit_hash_policy_l2(skb, count); 3028 return hash % count;
3129} 3029}
3130 3030
3131/*-------------------------- Device entry points ----------------------------*/ 3031/*-------------------------- Device entry points ----------------------------*/
@@ -3155,13 +3055,14 @@ static void bond_work_cancel_all(struct bonding *bond)
3155static int bond_open(struct net_device *bond_dev) 3055static int bond_open(struct net_device *bond_dev)
3156{ 3056{
3157 struct bonding *bond = netdev_priv(bond_dev); 3057 struct bonding *bond = netdev_priv(bond_dev);
3058 struct list_head *iter;
3158 struct slave *slave; 3059 struct slave *slave;
3159 3060
3160 /* reset slave->backup and slave->inactive */ 3061 /* reset slave->backup and slave->inactive */
3161 read_lock(&bond->lock); 3062 read_lock(&bond->lock);
3162 if (!list_empty(&bond->slave_list)) { 3063 if (bond_has_slaves(bond)) {
3163 read_lock(&bond->curr_slave_lock); 3064 read_lock(&bond->curr_slave_lock);
3164 bond_for_each_slave(bond, slave) { 3065 bond_for_each_slave(bond, slave, iter) {
3165 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 3066 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3166 && (slave != bond->curr_active_slave)) { 3067 && (slave != bond->curr_active_slave)) {
3167 bond_set_slave_inactive_flags(slave); 3068 bond_set_slave_inactive_flags(slave);
@@ -3221,12 +3122,13 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
3221{ 3122{
3222 struct bonding *bond = netdev_priv(bond_dev); 3123 struct bonding *bond = netdev_priv(bond_dev);
3223 struct rtnl_link_stats64 temp; 3124 struct rtnl_link_stats64 temp;
3125 struct list_head *iter;
3224 struct slave *slave; 3126 struct slave *slave;
3225 3127
3226 memset(stats, 0, sizeof(*stats)); 3128 memset(stats, 0, sizeof(*stats));
3227 3129
3228 read_lock_bh(&bond->lock); 3130 read_lock_bh(&bond->lock);
3229 bond_for_each_slave(bond, slave) { 3131 bond_for_each_slave(bond, slave, iter) {
3230 const struct rtnl_link_stats64 *sstats = 3132 const struct rtnl_link_stats64 *sstats =
3231 dev_get_stats(slave->dev, &temp); 3133 dev_get_stats(slave->dev, &temp);
3232 3134
@@ -3263,6 +3165,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
3263 3165
3264static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) 3166static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
3265{ 3167{
3168 struct bonding *bond = netdev_priv(bond_dev);
3266 struct net_device *slave_dev = NULL; 3169 struct net_device *slave_dev = NULL;
3267 struct ifbond k_binfo; 3170 struct ifbond k_binfo;
3268 struct ifbond __user *u_binfo = NULL; 3171 struct ifbond __user *u_binfo = NULL;
@@ -3293,7 +3196,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3293 3196
3294 3197
3295 if (mii->reg_num == 1) { 3198 if (mii->reg_num == 1) {
3296 struct bonding *bond = netdev_priv(bond_dev);
3297 mii->val_out = 0; 3199 mii->val_out = 0;
3298 read_lock(&bond->lock); 3200 read_lock(&bond->lock);
3299 read_lock(&bond->curr_slave_lock); 3201 read_lock(&bond->curr_slave_lock);
@@ -3365,7 +3267,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3365 break; 3267 break;
3366 case BOND_CHANGE_ACTIVE_OLD: 3268 case BOND_CHANGE_ACTIVE_OLD:
3367 case SIOCBONDCHANGEACTIVE: 3269 case SIOCBONDCHANGEACTIVE:
3368 res = bond_ioctl_change_active(bond_dev, slave_dev); 3270 res = bond_option_active_slave_set(bond, slave_dev);
3369 break; 3271 break;
3370 default: 3272 default:
3371 res = -EOPNOTSUPP; 3273 res = -EOPNOTSUPP;
@@ -3393,22 +3295,24 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
3393static void bond_set_rx_mode(struct net_device *bond_dev) 3295static void bond_set_rx_mode(struct net_device *bond_dev)
3394{ 3296{
3395 struct bonding *bond = netdev_priv(bond_dev); 3297 struct bonding *bond = netdev_priv(bond_dev);
3298 struct list_head *iter;
3396 struct slave *slave; 3299 struct slave *slave;
3397 3300
3398 ASSERT_RTNL();
3399 3301
3302 rcu_read_lock();
3400 if (USES_PRIMARY(bond->params.mode)) { 3303 if (USES_PRIMARY(bond->params.mode)) {
3401 slave = rtnl_dereference(bond->curr_active_slave); 3304 slave = rcu_dereference(bond->curr_active_slave);
3402 if (slave) { 3305 if (slave) {
3403 dev_uc_sync(slave->dev, bond_dev); 3306 dev_uc_sync(slave->dev, bond_dev);
3404 dev_mc_sync(slave->dev, bond_dev); 3307 dev_mc_sync(slave->dev, bond_dev);
3405 } 3308 }
3406 } else { 3309 } else {
3407 bond_for_each_slave(bond, slave) { 3310 bond_for_each_slave_rcu(bond, slave, iter) {
3408 dev_uc_sync_multiple(slave->dev, bond_dev); 3311 dev_uc_sync_multiple(slave->dev, bond_dev);
3409 dev_mc_sync_multiple(slave->dev, bond_dev); 3312 dev_mc_sync_multiple(slave->dev, bond_dev);
3410 } 3313 }
3411 } 3314 }
3315 rcu_read_unlock();
3412} 3316}
3413 3317
3414static int bond_neigh_init(struct neighbour *n) 3318static int bond_neigh_init(struct neighbour *n)
@@ -3471,7 +3375,8 @@ static int bond_neigh_setup(struct net_device *dev,
3471static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) 3375static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3472{ 3376{
3473 struct bonding *bond = netdev_priv(bond_dev); 3377 struct bonding *bond = netdev_priv(bond_dev);
3474 struct slave *slave; 3378 struct slave *slave, *rollback_slave;
3379 struct list_head *iter;
3475 int res = 0; 3380 int res = 0;
3476 3381
3477 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond, 3382 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
@@ -3492,10 +3397,9 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3492 * call to the base driver. 3397 * call to the base driver.
3493 */ 3398 */
3494 3399
3495 bond_for_each_slave(bond, slave) { 3400 bond_for_each_slave(bond, slave, iter) {
3496 pr_debug("s %p s->p %p c_m %p\n", 3401 pr_debug("s %p c_m %p\n",
3497 slave, 3402 slave,
3498 bond_prev_slave(bond, slave),
3499 slave->dev->netdev_ops->ndo_change_mtu); 3403 slave->dev->netdev_ops->ndo_change_mtu);
3500 3404
3501 res = dev_set_mtu(slave->dev, new_mtu); 3405 res = dev_set_mtu(slave->dev, new_mtu);
@@ -3520,13 +3424,16 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3520 3424
3521unwind: 3425unwind:
3522 /* unwind from head to the slave that failed */ 3426 /* unwind from head to the slave that failed */
3523 bond_for_each_slave_continue_reverse(bond, slave) { 3427 bond_for_each_slave(bond, rollback_slave, iter) {
3524 int tmp_res; 3428 int tmp_res;
3525 3429
3526 tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu); 3430 if (rollback_slave == slave)
3431 break;
3432
3433 tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
3527 if (tmp_res) { 3434 if (tmp_res) {
3528 pr_debug("unwind err %d dev %s\n", 3435 pr_debug("unwind err %d dev %s\n",
3529 tmp_res, slave->dev->name); 3436 tmp_res, rollback_slave->dev->name);
3530 } 3437 }
3531 } 3438 }
3532 3439
@@ -3543,8 +3450,9 @@ unwind:
3543static int bond_set_mac_address(struct net_device *bond_dev, void *addr) 3450static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3544{ 3451{
3545 struct bonding *bond = netdev_priv(bond_dev); 3452 struct bonding *bond = netdev_priv(bond_dev);
3453 struct slave *slave, *rollback_slave;
3546 struct sockaddr *sa = addr, tmp_sa; 3454 struct sockaddr *sa = addr, tmp_sa;
3547 struct slave *slave; 3455 struct list_head *iter;
3548 int res = 0; 3456 int res = 0;
3549 3457
3550 if (bond->params.mode == BOND_MODE_ALB) 3458 if (bond->params.mode == BOND_MODE_ALB)
@@ -3578,7 +3486,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3578 * call to the base driver. 3486 * call to the base driver.
3579 */ 3487 */
3580 3488
3581 bond_for_each_slave(bond, slave) { 3489 bond_for_each_slave(bond, slave, iter) {
3582 const struct net_device_ops *slave_ops = slave->dev->netdev_ops; 3490 const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
3583 pr_debug("slave %p %s\n", slave, slave->dev->name); 3491 pr_debug("slave %p %s\n", slave, slave->dev->name);
3584 3492
@@ -3610,13 +3518,16 @@ unwind:
3610 tmp_sa.sa_family = bond_dev->type; 3518 tmp_sa.sa_family = bond_dev->type;
3611 3519
3612 /* unwind from head to the slave that failed */ 3520 /* unwind from head to the slave that failed */
3613 bond_for_each_slave_continue_reverse(bond, slave) { 3521 bond_for_each_slave(bond, rollback_slave, iter) {
3614 int tmp_res; 3522 int tmp_res;
3615 3523
3616 tmp_res = dev_set_mac_address(slave->dev, &tmp_sa); 3524 if (rollback_slave == slave)
3525 break;
3526
3527 tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
3617 if (tmp_res) { 3528 if (tmp_res) {
3618 pr_debug("unwind err %d dev %s\n", 3529 pr_debug("unwind err %d dev %s\n",
3619 tmp_res, slave->dev->name); 3530 tmp_res, rollback_slave->dev->name);
3620 } 3531 }
3621 } 3532 }
3622 3533
@@ -3635,11 +3546,12 @@ unwind:
3635 */ 3546 */
3636void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id) 3547void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
3637{ 3548{
3549 struct list_head *iter;
3638 struct slave *slave; 3550 struct slave *slave;
3639 int i = slave_id; 3551 int i = slave_id;
3640 3552
3641 /* Here we start from the slave with slave_id */ 3553 /* Here we start from the slave with slave_id */
3642 bond_for_each_slave_rcu(bond, slave) { 3554 bond_for_each_slave_rcu(bond, slave, iter) {
3643 if (--i < 0) { 3555 if (--i < 0) {
3644 if (slave_can_tx(slave)) { 3556 if (slave_can_tx(slave)) {
3645 bond_dev_queue_xmit(bond, skb, slave->dev); 3557 bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -3650,7 +3562,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
3650 3562
3651 /* Here we start from the first slave up to slave_id */ 3563 /* Here we start from the first slave up to slave_id */
3652 i = slave_id; 3564 i = slave_id;
3653 bond_for_each_slave_rcu(bond, slave) { 3565 bond_for_each_slave_rcu(bond, slave, iter) {
3654 if (--i < 0) 3566 if (--i < 0)
3655 break; 3567 break;
3656 if (slave_can_tx(slave)) { 3568 if (slave_can_tx(slave)) {
@@ -3707,8 +3619,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
3707 return NETDEV_TX_OK; 3619 return NETDEV_TX_OK;
3708} 3620}
3709 3621
3710/* 3622/* In bond_xmit_xor() , we determine the output device by using a pre-
3711 * In bond_xmit_xor() , we determine the output device by using a pre-
3712 * determined xmit_hash_policy(), If the selected device is not enabled, 3623 * determined xmit_hash_policy(), If the selected device is not enabled,
3713 * find the next active slave. 3624 * find the next active slave.
3714 */ 3625 */
@@ -3716,8 +3627,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
3716{ 3627{
3717 struct bonding *bond = netdev_priv(bond_dev); 3628 struct bonding *bond = netdev_priv(bond_dev);
3718 3629
3719 bond_xmit_slave_id(bond, skb, 3630 bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
3720 bond->xmit_hash_policy(skb, bond->slave_cnt));
3721 3631
3722 return NETDEV_TX_OK; 3632 return NETDEV_TX_OK;
3723} 3633}
@@ -3727,8 +3637,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3727{ 3637{
3728 struct bonding *bond = netdev_priv(bond_dev); 3638 struct bonding *bond = netdev_priv(bond_dev);
3729 struct slave *slave = NULL; 3639 struct slave *slave = NULL;
3640 struct list_head *iter;
3730 3641
3731 bond_for_each_slave_rcu(bond, slave) { 3642 bond_for_each_slave_rcu(bond, slave, iter) {
3732 if (bond_is_last_slave(bond, slave)) 3643 if (bond_is_last_slave(bond, slave))
3733 break; 3644 break;
3734 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { 3645 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
@@ -3753,22 +3664,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3753 3664
3754/*------------------------- Device initialization ---------------------------*/ 3665/*------------------------- Device initialization ---------------------------*/
3755 3666
3756static void bond_set_xmit_hash_policy(struct bonding *bond)
3757{
3758 switch (bond->params.xmit_policy) {
3759 case BOND_XMIT_POLICY_LAYER23:
3760 bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
3761 break;
3762 case BOND_XMIT_POLICY_LAYER34:
3763 bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
3764 break;
3765 case BOND_XMIT_POLICY_LAYER2:
3766 default:
3767 bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
3768 break;
3769 }
3770}
3771
3772/* 3667/*
3773 * Lookup the slave that corresponds to a qid 3668 * Lookup the slave that corresponds to a qid
3774 */ 3669 */
@@ -3777,13 +3672,14 @@ static inline int bond_slave_override(struct bonding *bond,
3777{ 3672{
3778 struct slave *slave = NULL; 3673 struct slave *slave = NULL;
3779 struct slave *check_slave; 3674 struct slave *check_slave;
3675 struct list_head *iter;
3780 int res = 1; 3676 int res = 1;
3781 3677
3782 if (!skb->queue_mapping) 3678 if (!skb->queue_mapping)
3783 return 1; 3679 return 1;
3784 3680
3785 /* Find out if any slaves have the same mapping as this skb. */ 3681 /* Find out if any slaves have the same mapping as this skb. */
3786 bond_for_each_slave_rcu(bond, check_slave) { 3682 bond_for_each_slave_rcu(bond, check_slave, iter) {
3787 if (check_slave->queue_id == skb->queue_mapping) { 3683 if (check_slave->queue_id == skb->queue_mapping) {
3788 slave = check_slave; 3684 slave = check_slave;
3789 break; 3685 break;
@@ -3869,7 +3765,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
3869 return NETDEV_TX_BUSY; 3765 return NETDEV_TX_BUSY;
3870 3766
3871 rcu_read_lock(); 3767 rcu_read_lock();
3872 if (!list_empty(&bond->slave_list)) 3768 if (bond_has_slaves(bond))
3873 ret = __bond_start_xmit(skb, dev); 3769 ret = __bond_start_xmit(skb, dev);
3874 else 3770 else
3875 kfree_skb(skb); 3771 kfree_skb(skb);
@@ -3878,43 +3774,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
3878 return ret; 3774 return ret;
3879} 3775}
3880 3776
3881/*
3882 * set bond mode specific net device operations
3883 */
3884void bond_set_mode_ops(struct bonding *bond, int mode)
3885{
3886 struct net_device *bond_dev = bond->dev;
3887
3888 switch (mode) {
3889 case BOND_MODE_ROUNDROBIN:
3890 break;
3891 case BOND_MODE_ACTIVEBACKUP:
3892 break;
3893 case BOND_MODE_XOR:
3894 bond_set_xmit_hash_policy(bond);
3895 break;
3896 case BOND_MODE_BROADCAST:
3897 break;
3898 case BOND_MODE_8023AD:
3899 bond_set_xmit_hash_policy(bond);
3900 break;
3901 case BOND_MODE_ALB:
3902 /* FALLTHRU */
3903 case BOND_MODE_TLB:
3904 break;
3905 default:
3906 /* Should never happen, mode already checked */
3907 pr_err("%s: Error: Unknown bonding mode %d\n",
3908 bond_dev->name, mode);
3909 break;
3910 }
3911}
3912
3913static int bond_ethtool_get_settings(struct net_device *bond_dev, 3777static int bond_ethtool_get_settings(struct net_device *bond_dev,
3914 struct ethtool_cmd *ecmd) 3778 struct ethtool_cmd *ecmd)
3915{ 3779{
3916 struct bonding *bond = netdev_priv(bond_dev); 3780 struct bonding *bond = netdev_priv(bond_dev);
3917 unsigned long speed = 0; 3781 unsigned long speed = 0;
3782 struct list_head *iter;
3918 struct slave *slave; 3783 struct slave *slave;
3919 3784
3920 ecmd->duplex = DUPLEX_UNKNOWN; 3785 ecmd->duplex = DUPLEX_UNKNOWN;
@@ -3926,7 +3791,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
3926 * this is an accurate maximum. 3791 * this is an accurate maximum.
3927 */ 3792 */
3928 read_lock(&bond->lock); 3793 read_lock(&bond->lock);
3929 bond_for_each_slave(bond, slave) { 3794 bond_for_each_slave(bond, slave, iter) {
3930 if (SLAVE_IS_OK(slave)) { 3795 if (SLAVE_IS_OK(slave)) {
3931 if (slave->speed != SPEED_UNKNOWN) 3796 if (slave->speed != SPEED_UNKNOWN)
3932 speed += slave->speed; 3797 speed += slave->speed;
@@ -3994,14 +3859,13 @@ static void bond_destructor(struct net_device *bond_dev)
3994 free_netdev(bond_dev); 3859 free_netdev(bond_dev);
3995} 3860}
3996 3861
3997static void bond_setup(struct net_device *bond_dev) 3862void bond_setup(struct net_device *bond_dev)
3998{ 3863{
3999 struct bonding *bond = netdev_priv(bond_dev); 3864 struct bonding *bond = netdev_priv(bond_dev);
4000 3865
4001 /* initialize rwlocks */ 3866 /* initialize rwlocks */
4002 rwlock_init(&bond->lock); 3867 rwlock_init(&bond->lock);
4003 rwlock_init(&bond->curr_slave_lock); 3868 rwlock_init(&bond->curr_slave_lock);
4004 INIT_LIST_HEAD(&bond->slave_list);
4005 bond->params = bonding_defaults; 3869 bond->params = bonding_defaults;
4006 3870
4007 /* Initialize pointers */ 3871 /* Initialize pointers */
@@ -4011,7 +3875,6 @@ static void bond_setup(struct net_device *bond_dev)
4011 ether_setup(bond_dev); 3875 ether_setup(bond_dev);
4012 bond_dev->netdev_ops = &bond_netdev_ops; 3876 bond_dev->netdev_ops = &bond_netdev_ops;
4013 bond_dev->ethtool_ops = &bond_ethtool_ops; 3877 bond_dev->ethtool_ops = &bond_ethtool_ops;
4014 bond_set_mode_ops(bond, bond->params.mode);
4015 3878
4016 bond_dev->destructor = bond_destructor; 3879 bond_dev->destructor = bond_destructor;
4017 3880
@@ -4057,12 +3920,13 @@ static void bond_setup(struct net_device *bond_dev)
4057static void bond_uninit(struct net_device *bond_dev) 3920static void bond_uninit(struct net_device *bond_dev)
4058{ 3921{
4059 struct bonding *bond = netdev_priv(bond_dev); 3922 struct bonding *bond = netdev_priv(bond_dev);
4060 struct slave *slave, *tmp_slave; 3923 struct list_head *iter;
3924 struct slave *slave;
4061 3925
4062 bond_netpoll_cleanup(bond_dev); 3926 bond_netpoll_cleanup(bond_dev);
4063 3927
4064 /* Release the bonded slaves */ 3928 /* Release the bonded slaves */
4065 list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list) 3929 bond_for_each_slave(bond, slave, iter)
4066 __bond_release_one(bond_dev, slave->dev, true); 3930 __bond_release_one(bond_dev, slave->dev, true);
4067 pr_info("%s: released all slaves\n", bond_dev->name); 3931 pr_info("%s: released all slaves\n", bond_dev->name);
4068 3932
@@ -4495,32 +4359,11 @@ static int bond_init(struct net_device *bond_dev)
4495 return 0; 4359 return 0;
4496} 4360}
4497 4361
4498static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) 4362unsigned int bond_get_num_tx_queues(void)
4499{
4500 if (tb[IFLA_ADDRESS]) {
4501 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
4502 return -EINVAL;
4503 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
4504 return -EADDRNOTAVAIL;
4505 }
4506 return 0;
4507}
4508
4509static unsigned int bond_get_num_tx_queues(void)
4510{ 4363{
4511 return tx_queues; 4364 return tx_queues;
4512} 4365}
4513 4366
4514static struct rtnl_link_ops bond_link_ops __read_mostly = {
4515 .kind = "bond",
4516 .priv_size = sizeof(struct bonding),
4517 .setup = bond_setup,
4518 .validate = bond_validate,
4519 .get_num_tx_queues = bond_get_num_tx_queues,
4520 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
4521 as for TX queues */
4522};
4523
4524/* Create a new bond based on the specified name and bonding parameters. 4367/* Create a new bond based on the specified name and bonding parameters.
4525 * If name is NULL, obtain a suitable "bond%d" name for us. 4368 * If name is NULL, obtain a suitable "bond%d" name for us.
4526 * Caller must NOT hold rtnl_lock; we need to release it here before we 4369 * Caller must NOT hold rtnl_lock; we need to release it here before we
@@ -4607,7 +4450,7 @@ static int __init bonding_init(void)
4607 if (res) 4450 if (res)
4608 goto out; 4451 goto out;
4609 4452
4610 res = rtnl_link_register(&bond_link_ops); 4453 res = bond_netlink_init();
4611 if (res) 4454 if (res)
4612 goto err_link; 4455 goto err_link;
4613 4456
@@ -4623,7 +4466,7 @@ static int __init bonding_init(void)
4623out: 4466out:
4624 return res; 4467 return res;
4625err: 4468err:
4626 rtnl_link_unregister(&bond_link_ops); 4469 bond_netlink_fini();
4627err_link: 4470err_link:
4628 unregister_pernet_subsys(&bond_net_ops); 4471 unregister_pernet_subsys(&bond_net_ops);
4629 goto out; 4472 goto out;
@@ -4636,7 +4479,7 @@ static void __exit bonding_exit(void)
4636 4479
4637 bond_destroy_debugfs(); 4480 bond_destroy_debugfs();
4638 4481
4639 rtnl_link_unregister(&bond_link_ops); 4482 bond_netlink_fini();
4640 unregister_pernet_subsys(&bond_net_ops); 4483 unregister_pernet_subsys(&bond_net_ops);
4641 4484
4642#ifdef CONFIG_NET_POLL_CONTROLLER 4485#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4653,4 +4496,3 @@ MODULE_LICENSE("GPL");
4653MODULE_VERSION(DRV_VERSION); 4496MODULE_VERSION(DRV_VERSION);
4654MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION); 4497MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
4655MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others"); 4498MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
4656MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
new file mode 100644
index 000000000000..7661261de2f0
--- /dev/null
+++ b/drivers/net/bonding/bond_netlink.c
@@ -0,0 +1,131 @@
1/*
2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/module.h>
14#include <linux/errno.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/if_link.h>
18#include <linux/if_ether.h>
19#include <net/netlink.h>
20#include <net/rtnetlink.h>
21#include "bonding.h"
22
23static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
24 [IFLA_BOND_MODE] = { .type = NLA_U8 },
25 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
26};
27
28static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
29{
30 if (tb[IFLA_ADDRESS]) {
31 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
32 return -EINVAL;
33 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
34 return -EADDRNOTAVAIL;
35 }
36 return 0;
37}
38
39static int bond_changelink(struct net_device *bond_dev,
40 struct nlattr *tb[], struct nlattr *data[])
41{
42 struct bonding *bond = netdev_priv(bond_dev);
43 int err;
44
45 if (data && data[IFLA_BOND_MODE]) {
46 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
47
48 err = bond_option_mode_set(bond, mode);
49 if (err)
50 return err;
51 }
52 if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
53 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
54 struct net_device *slave_dev;
55
56 if (ifindex == 0) {
57 slave_dev = NULL;
58 } else {
59 slave_dev = __dev_get_by_index(dev_net(bond_dev),
60 ifindex);
61 if (!slave_dev)
62 return -ENODEV;
63 }
64 err = bond_option_active_slave_set(bond, slave_dev);
65 if (err)
66 return err;
67 }
68 return 0;
69}
70
71static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
72 struct nlattr *tb[], struct nlattr *data[])
73{
74 int err;
75
76 err = bond_changelink(bond_dev, tb, data);
77 if (err < 0)
78 return err;
79
80 return register_netdevice(bond_dev);
81}
82
83static size_t bond_get_size(const struct net_device *bond_dev)
84{
85 return nla_total_size(sizeof(u8)); /* IFLA_BOND_MODE */
86 + nla_total_size(sizeof(u32)); /* IFLA_BOND_ACTIVE_SLAVE */
87}
88
89static int bond_fill_info(struct sk_buff *skb,
90 const struct net_device *bond_dev)
91{
92 struct bonding *bond = netdev_priv(bond_dev);
93 struct net_device *slave_dev = bond_option_active_slave_get(bond);
94
95 if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
96 (slave_dev &&
97 nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
98 goto nla_put_failure;
99 return 0;
100
101nla_put_failure:
102 return -EMSGSIZE;
103}
104
105struct rtnl_link_ops bond_link_ops __read_mostly = {
106 .kind = "bond",
107 .priv_size = sizeof(struct bonding),
108 .setup = bond_setup,
109 .maxtype = IFLA_BOND_MAX,
110 .policy = bond_policy,
111 .validate = bond_validate,
112 .newlink = bond_newlink,
113 .changelink = bond_changelink,
114 .get_size = bond_get_size,
115 .fill_info = bond_fill_info,
116 .get_num_tx_queues = bond_get_num_tx_queues,
117 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
118 as for TX queues */
119};
120
121int __init bond_netlink_init(void)
122{
123 return rtnl_link_register(&bond_link_ops);
124}
125
126void bond_netlink_fini(void)
127{
128 rtnl_link_unregister(&bond_link_ops);
129}
130
131MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
new file mode 100644
index 000000000000..9a5223c7b4d1
--- /dev/null
+++ b/drivers/net/bonding/bond_options.c
@@ -0,0 +1,142 @@
1/*
2 * drivers/net/bond/bond_options.c - bonding options
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/errno.h>
14#include <linux/if.h>
15#include <linux/netdevice.h>
16#include <linux/rwlock.h>
17#include <linux/rcupdate.h>
18#include "bonding.h"
19
20static bool bond_mode_is_valid(int mode)
21{
22 int i;
23
24 for (i = 0; bond_mode_tbl[i].modename; i++);
25
26 return mode >= 0 && mode < i;
27}
28
29int bond_option_mode_set(struct bonding *bond, int mode)
30{
31 if (!bond_mode_is_valid(mode)) {
32 pr_err("invalid mode value %d.\n", mode);
33 return -EINVAL;
34 }
35
36 if (bond->dev->flags & IFF_UP) {
37 pr_err("%s: unable to update mode because interface is up.\n",
38 bond->dev->name);
39 return -EPERM;
40 }
41
42 if (bond_has_slaves(bond)) {
43 pr_err("%s: unable to update mode because bond has slaves.\n",
44 bond->dev->name);
45 return -EPERM;
46 }
47
48 if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
49 pr_err("%s: %s mode is incompatible with arp monitoring.\n",
50 bond->dev->name, bond_mode_tbl[mode].modename);
51 return -EINVAL;
52 }
53
54 /* don't cache arp_validate between modes */
55 bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
56 bond->params.mode = mode;
57 return 0;
58}
59
60static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
61 struct slave *slave)
62{
63 return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
64}
65
66struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
67{
68 struct slave *slave = rcu_dereference(bond->curr_active_slave);
69
70 return __bond_option_active_slave_get(bond, slave);
71}
72
73struct net_device *bond_option_active_slave_get(struct bonding *bond)
74{
75 return __bond_option_active_slave_get(bond, bond->curr_active_slave);
76}
77
78int bond_option_active_slave_set(struct bonding *bond,
79 struct net_device *slave_dev)
80{
81 int ret = 0;
82
83 if (slave_dev) {
84 if (!netif_is_bond_slave(slave_dev)) {
85 pr_err("Device %s is not bonding slave.\n",
86 slave_dev->name);
87 return -EINVAL;
88 }
89
90 if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
91 pr_err("%s: Device %s is not our slave.\n",
92 bond->dev->name, slave_dev->name);
93 return -EINVAL;
94 }
95 }
96
97 if (!USES_PRIMARY(bond->params.mode)) {
98 pr_err("%s: Unable to change active slave; %s is in mode %d\n",
99 bond->dev->name, bond->dev->name, bond->params.mode);
100 return -EINVAL;
101 }
102
103 block_netpoll_tx();
104 read_lock(&bond->lock);
105 write_lock_bh(&bond->curr_slave_lock);
106
107 /* check to see if we are clearing active */
108 if (!slave_dev) {
109 pr_info("%s: Clearing current active slave.\n",
110 bond->dev->name);
111 rcu_assign_pointer(bond->curr_active_slave, NULL);
112 bond_select_active_slave(bond);
113 } else {
114 struct slave *old_active = bond->curr_active_slave;
115 struct slave *new_active = bond_slave_get_rtnl(slave_dev);
116
117 BUG_ON(!new_active);
118
119 if (new_active == old_active) {
120 /* do nothing */
121 pr_info("%s: %s is already the current active slave.\n",
122 bond->dev->name, new_active->dev->name);
123 } else {
124 if (old_active && (new_active->link == BOND_LINK_UP) &&
125 IS_UP(new_active->dev)) {
126 pr_info("%s: Setting %s as active slave.\n",
127 bond->dev->name, new_active->dev->name);
128 bond_change_active_slave(bond, new_active);
129 } else {
130 pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
131 bond->dev->name, new_active->dev->name,
132 new_active->dev->name);
133 ret = -EINVAL;
134 }
135 }
136 }
137
138 write_unlock_bh(&bond->curr_slave_lock);
139 read_unlock(&bond->lock);
140 unblock_netpoll_tx();
141 return ret;
142}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 20a6ee25bb63..fb868d6c22da 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -10,8 +10,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
10 __acquires(&bond->lock) 10 __acquires(&bond->lock)
11{ 11{
12 struct bonding *bond = seq->private; 12 struct bonding *bond = seq->private;
13 loff_t off = 0; 13 struct list_head *iter;
14 struct slave *slave; 14 struct slave *slave;
15 loff_t off = 0;
15 16
16 /* make sure the bond won't be taken away */ 17 /* make sure the bond won't be taken away */
17 rcu_read_lock(); 18 rcu_read_lock();
@@ -20,7 +21,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
20 if (*pos == 0) 21 if (*pos == 0)
21 return SEQ_START_TOKEN; 22 return SEQ_START_TOKEN;
22 23
23 bond_for_each_slave(bond, slave) 24 bond_for_each_slave(bond, slave, iter)
24 if (++off == *pos) 25 if (++off == *pos)
25 return slave; 26 return slave;
26 27
@@ -30,17 +31,25 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
30static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) 31static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
31{ 32{
32 struct bonding *bond = seq->private; 33 struct bonding *bond = seq->private;
33 struct slave *slave = v; 34 struct list_head *iter;
35 struct slave *slave;
36 bool found = false;
34 37
35 ++*pos; 38 ++*pos;
36 if (v == SEQ_START_TOKEN) 39 if (v == SEQ_START_TOKEN)
37 return bond_first_slave(bond); 40 return bond_first_slave(bond);
38 41
39 if (bond_is_last_slave(bond, slave)) 42 if (bond_is_last_slave(bond, v))
40 return NULL; 43 return NULL;
41 slave = bond_next_slave(bond, slave);
42 44
43 return slave; 45 bond_for_each_slave(bond, slave, iter) {
46 if (found)
47 return slave;
48 if (slave == v)
49 found = true;
50 }
51
52 return NULL;
44} 53}
45 54
46static void bond_info_seq_stop(struct seq_file *seq, void *v) 55static void bond_info_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c29b836749b6..47749c970a01 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -168,41 +168,6 @@ static const struct class_attribute class_attr_bonding_masters = {
168 .namespace = bonding_namespace, 168 .namespace = bonding_namespace,
169}; 169};
170 170
171int bond_create_slave_symlinks(struct net_device *master,
172 struct net_device *slave)
173{
174 char linkname[IFNAMSIZ+7];
175 int ret = 0;
176
177 /* first, create a link from the slave back to the master */
178 ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
179 "master");
180 if (ret)
181 return ret;
182 /* next, create a link from the master to the slave */
183 sprintf(linkname, "slave_%s", slave->name);
184 ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
185 linkname);
186
187 /* free the master link created earlier in case of error */
188 if (ret)
189 sysfs_remove_link(&(slave->dev.kobj), "master");
190
191 return ret;
192
193}
194
195void bond_destroy_slave_symlinks(struct net_device *master,
196 struct net_device *slave)
197{
198 char linkname[IFNAMSIZ+7];
199
200 sysfs_remove_link(&(slave->dev.kobj), "master");
201 sprintf(linkname, "slave_%s", slave->name);
202 sysfs_remove_link(&(master->dev.kobj), linkname);
203}
204
205
206/* 171/*
207 * Show the slaves in the current bond. 172 * Show the slaves in the current bond.
208 */ 173 */
@@ -210,11 +175,14 @@ static ssize_t bonding_show_slaves(struct device *d,
210 struct device_attribute *attr, char *buf) 175 struct device_attribute *attr, char *buf)
211{ 176{
212 struct bonding *bond = to_bond(d); 177 struct bonding *bond = to_bond(d);
178 struct list_head *iter;
213 struct slave *slave; 179 struct slave *slave;
214 int res = 0; 180 int res = 0;
215 181
216 read_lock(&bond->lock); 182 if (!rtnl_trylock())
217 bond_for_each_slave(bond, slave) { 183 return restart_syscall();
184
185 bond_for_each_slave(bond, slave, iter) {
218 if (res > (PAGE_SIZE - IFNAMSIZ)) { 186 if (res > (PAGE_SIZE - IFNAMSIZ)) {
219 /* not enough space for another interface name */ 187 /* not enough space for another interface name */
220 if ((PAGE_SIZE - res) > 10) 188 if ((PAGE_SIZE - res) > 10)
@@ -224,7 +192,9 @@ static ssize_t bonding_show_slaves(struct device *d,
224 } 192 }
225 res += sprintf(buf + res, "%s ", slave->dev->name); 193 res += sprintf(buf + res, "%s ", slave->dev->name);
226 } 194 }
227 read_unlock(&bond->lock); 195
196 rtnl_unlock();
197
228 if (res) 198 if (res)
229 buf[res-1] = '\n'; /* eat the leftover space */ 199 buf[res-1] = '\n'; /* eat the leftover space */
230 200
@@ -313,50 +283,26 @@ static ssize_t bonding_store_mode(struct device *d,
313 struct device_attribute *attr, 283 struct device_attribute *attr,
314 const char *buf, size_t count) 284 const char *buf, size_t count)
315{ 285{
316 int new_value, ret = count; 286 int new_value, ret;
317 struct bonding *bond = to_bond(d); 287 struct bonding *bond = to_bond(d);
318 288
319 if (!rtnl_trylock())
320 return restart_syscall();
321
322 if (bond->dev->flags & IFF_UP) {
323 pr_err("unable to update mode of %s because interface is up.\n",
324 bond->dev->name);
325 ret = -EPERM;
326 goto out;
327 }
328
329 if (!list_empty(&bond->slave_list)) {
330 pr_err("unable to update mode of %s because it has slaves.\n",
331 bond->dev->name);
332 ret = -EPERM;
333 goto out;
334 }
335
336 new_value = bond_parse_parm(buf, bond_mode_tbl); 289 new_value = bond_parse_parm(buf, bond_mode_tbl);
337 if (new_value < 0) { 290 if (new_value < 0) {
338 pr_err("%s: Ignoring invalid mode value %.*s.\n", 291 pr_err("%s: Ignoring invalid mode value %.*s.\n",
339 bond->dev->name, (int)strlen(buf) - 1, buf); 292 bond->dev->name, (int)strlen(buf) - 1, buf);
340 ret = -EINVAL; 293 return -EINVAL;
341 goto out;
342 } 294 }
343 if ((new_value == BOND_MODE_ALB || 295 if (!rtnl_trylock())
344 new_value == BOND_MODE_TLB) && 296 return restart_syscall();
345 bond->params.arp_interval) { 297
346 pr_err("%s: %s mode is incompatible with arp monitoring.\n", 298 ret = bond_option_mode_set(bond, new_value);
347 bond->dev->name, bond_mode_tbl[new_value].modename); 299 if (!ret) {
348 ret = -EINVAL; 300 pr_info("%s: setting mode to %s (%d).\n",
349 goto out; 301 bond->dev->name, bond_mode_tbl[new_value].modename,
302 new_value);
303 ret = count;
350 } 304 }
351 305
352 /* don't cache arp_validate between modes */
353 bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
354 bond->params.mode = new_value;
355 bond_set_mode_ops(bond, bond->params.mode);
356 pr_info("%s: setting mode to %s (%d).\n",
357 bond->dev->name, bond_mode_tbl[new_value].modename,
358 new_value);
359out:
360 rtnl_unlock(); 306 rtnl_unlock();
361 return ret; 307 return ret;
362} 308}
@@ -392,7 +338,6 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
392 ret = -EINVAL; 338 ret = -EINVAL;
393 } else { 339 } else {
394 bond->params.xmit_policy = new_value; 340 bond->params.xmit_policy = new_value;
395 bond_set_mode_ops(bond, bond->params.mode);
396 pr_info("%s: setting xmit hash policy to %s (%d).\n", 341 pr_info("%s: setting xmit hash policy to %s (%d).\n",
397 bond->dev->name, 342 bond->dev->name,
398 xmit_hashtype_tbl[new_value].modename, new_value); 343 xmit_hashtype_tbl[new_value].modename, new_value);
@@ -522,7 +467,7 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
522 if (!rtnl_trylock()) 467 if (!rtnl_trylock())
523 return restart_syscall(); 468 return restart_syscall();
524 469
525 if (!list_empty(&bond->slave_list)) { 470 if (bond_has_slaves(bond)) {
526 pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n", 471 pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
527 bond->dev->name); 472 bond->dev->name);
528 ret = -EPERM; 473 ret = -EPERM;
@@ -656,11 +601,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
656 const char *buf, size_t count) 601 const char *buf, size_t count)
657{ 602{
658 struct bonding *bond = to_bond(d); 603 struct bonding *bond = to_bond(d);
604 struct list_head *iter;
659 struct slave *slave; 605 struct slave *slave;
660 __be32 newtarget, *targets; 606 __be32 newtarget, *targets;
661 unsigned long *targets_rx; 607 unsigned long *targets_rx;
662 int ind, i, j, ret = -EINVAL; 608 int ind, i, j, ret = -EINVAL;
663 609
610 if (!rtnl_trylock())
611 return restart_syscall();
612
664 targets = bond->params.arp_targets; 613 targets = bond->params.arp_targets;
665 newtarget = in_aton(buf + 1); 614 newtarget = in_aton(buf + 1);
666 /* look for adds */ 615 /* look for adds */
@@ -688,7 +637,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
688 &newtarget); 637 &newtarget);
689 /* not to race with bond_arp_rcv */ 638 /* not to race with bond_arp_rcv */
690 write_lock_bh(&bond->lock); 639 write_lock_bh(&bond->lock);
691 bond_for_each_slave(bond, slave) 640 bond_for_each_slave(bond, slave, iter)
692 slave->target_last_arp_rx[ind] = jiffies; 641 slave->target_last_arp_rx[ind] = jiffies;
693 targets[ind] = newtarget; 642 targets[ind] = newtarget;
694 write_unlock_bh(&bond->lock); 643 write_unlock_bh(&bond->lock);
@@ -714,7 +663,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
714 &newtarget); 663 &newtarget);
715 664
716 write_lock_bh(&bond->lock); 665 write_lock_bh(&bond->lock);
717 bond_for_each_slave(bond, slave) { 666 bond_for_each_slave(bond, slave, iter) {
718 targets_rx = slave->target_last_arp_rx; 667 targets_rx = slave->target_last_arp_rx;
719 j = ind; 668 j = ind;
720 for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++) 669 for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -734,6 +683,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
734 683
735 ret = count; 684 ret = count;
736out: 685out:
686 rtnl_unlock();
737 return ret; 687 return ret;
738} 688}
739static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets); 689static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -1111,6 +1061,7 @@ static ssize_t bonding_store_primary(struct device *d,
1111 const char *buf, size_t count) 1061 const char *buf, size_t count)
1112{ 1062{
1113 struct bonding *bond = to_bond(d); 1063 struct bonding *bond = to_bond(d);
1064 struct list_head *iter;
1114 char ifname[IFNAMSIZ]; 1065 char ifname[IFNAMSIZ];
1115 struct slave *slave; 1066 struct slave *slave;
1116 1067
@@ -1138,7 +1089,7 @@ static ssize_t bonding_store_primary(struct device *d,
1138 goto out; 1089 goto out;
1139 } 1090 }
1140 1091
1141 bond_for_each_slave(bond, slave) { 1092 bond_for_each_slave(bond, slave, iter) {
1142 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { 1093 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
1143 pr_info("%s: Setting %s as primary slave.\n", 1094 pr_info("%s: Setting %s as primary slave.\n",
1144 bond->dev->name, slave->dev->name); 1095 bond->dev->name, slave->dev->name);
@@ -1268,13 +1219,13 @@ static ssize_t bonding_show_active_slave(struct device *d,
1268 char *buf) 1219 char *buf)
1269{ 1220{
1270 struct bonding *bond = to_bond(d); 1221 struct bonding *bond = to_bond(d);
1271 struct slave *curr; 1222 struct net_device *slave_dev;
1272 int count = 0; 1223 int count = 0;
1273 1224
1274 rcu_read_lock(); 1225 rcu_read_lock();
1275 curr = rcu_dereference(bond->curr_active_slave); 1226 slave_dev = bond_option_active_slave_get_rcu(bond);
1276 if (USES_PRIMARY(bond->params.mode) && curr) 1227 if (slave_dev)
1277 count = sprintf(buf, "%s\n", curr->dev->name); 1228 count = sprintf(buf, "%s\n", slave_dev->name);
1278 rcu_read_unlock(); 1229 rcu_read_unlock();
1279 1230
1280 return count; 1231 return count;
@@ -1284,80 +1235,33 @@ static ssize_t bonding_store_active_slave(struct device *d,
1284 struct device_attribute *attr, 1235 struct device_attribute *attr,
1285 const char *buf, size_t count) 1236 const char *buf, size_t count)
1286{ 1237{
1287 struct slave *slave, *old_active, *new_active; 1238 int ret;
1288 struct bonding *bond = to_bond(d); 1239 struct bonding *bond = to_bond(d);
1289 char ifname[IFNAMSIZ]; 1240 char ifname[IFNAMSIZ];
1241 struct net_device *dev;
1290 1242
1291 if (!rtnl_trylock()) 1243 if (!rtnl_trylock())
1292 return restart_syscall(); 1244 return restart_syscall();
1293 1245
1294 old_active = new_active = NULL;
1295 block_netpoll_tx();
1296 read_lock(&bond->lock);
1297 write_lock_bh(&bond->curr_slave_lock);
1298
1299 if (!USES_PRIMARY(bond->params.mode)) {
1300 pr_info("%s: Unable to change active slave; %s is in mode %d\n",
1301 bond->dev->name, bond->dev->name, bond->params.mode);
1302 goto out;
1303 }
1304
1305 sscanf(buf, "%15s", ifname); /* IFNAMSIZ */ 1246 sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
1306
1307 /* check to see if we are clearing active */
1308 if (!strlen(ifname) || buf[0] == '\n') { 1247 if (!strlen(ifname) || buf[0] == '\n') {
1309 pr_info("%s: Clearing current active slave.\n", 1248 dev = NULL;
1310 bond->dev->name); 1249 } else {
1311 rcu_assign_pointer(bond->curr_active_slave, NULL); 1250 dev = __dev_get_by_name(dev_net(bond->dev), ifname);
1312 bond_select_active_slave(bond); 1251 if (!dev) {
1313 goto out; 1252 ret = -ENODEV;
1314 } 1253 goto out;
1315
1316 bond_for_each_slave(bond, slave) {
1317 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
1318 old_active = bond->curr_active_slave;
1319 new_active = slave;
1320 if (new_active == old_active) {
1321 /* do nothing */
1322 pr_info("%s: %s is already the current"
1323 " active slave.\n",
1324 bond->dev->name,
1325 slave->dev->name);
1326 goto out;
1327 } else {
1328 if ((new_active) &&
1329 (old_active) &&
1330 (new_active->link == BOND_LINK_UP) &&
1331 IS_UP(new_active->dev)) {
1332 pr_info("%s: Setting %s as active"
1333 " slave.\n",
1334 bond->dev->name,
1335 slave->dev->name);
1336 bond_change_active_slave(bond,
1337 new_active);
1338 } else {
1339 pr_info("%s: Could not set %s as"
1340 " active slave; either %s is"
1341 " down or the link is down.\n",
1342 bond->dev->name,
1343 slave->dev->name,
1344 slave->dev->name);
1345 }
1346 goto out;
1347 }
1348 } 1254 }
1349 } 1255 }
1350 1256
1351 pr_info("%s: Unable to set %.*s as active slave.\n", 1257 ret = bond_option_active_slave_set(bond, dev);
1352 bond->dev->name, (int)strlen(buf) - 1, buf); 1258 if (!ret)
1353 out: 1259 ret = count;
1354 write_unlock_bh(&bond->curr_slave_lock);
1355 read_unlock(&bond->lock);
1356 unblock_netpoll_tx();
1357 1260
1261 out:
1358 rtnl_unlock(); 1262 rtnl_unlock();
1359 1263
1360 return count; 1264 return ret;
1361 1265
1362} 1266}
1363static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, 1267static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
@@ -1493,14 +1397,14 @@ static ssize_t bonding_show_queue_id(struct device *d,
1493 char *buf) 1397 char *buf)
1494{ 1398{
1495 struct bonding *bond = to_bond(d); 1399 struct bonding *bond = to_bond(d);
1400 struct list_head *iter;
1496 struct slave *slave; 1401 struct slave *slave;
1497 int res = 0; 1402 int res = 0;
1498 1403
1499 if (!rtnl_trylock()) 1404 if (!rtnl_trylock())
1500 return restart_syscall(); 1405 return restart_syscall();
1501 1406
1502 read_lock(&bond->lock); 1407 bond_for_each_slave(bond, slave, iter) {
1503 bond_for_each_slave(bond, slave) {
1504 if (res > (PAGE_SIZE - IFNAMSIZ - 6)) { 1408 if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
1505 /* not enough space for another interface_name:queue_id pair */ 1409 /* not enough space for another interface_name:queue_id pair */
1506 if ((PAGE_SIZE - res) > 10) 1410 if ((PAGE_SIZE - res) > 10)
@@ -1511,9 +1415,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
1511 res += sprintf(buf + res, "%s:%d ", 1415 res += sprintf(buf + res, "%s:%d ",
1512 slave->dev->name, slave->queue_id); 1416 slave->dev->name, slave->queue_id);
1513 } 1417 }
1514 read_unlock(&bond->lock);
1515 if (res) 1418 if (res)
1516 buf[res-1] = '\n'; /* eat the leftover space */ 1419 buf[res-1] = '\n'; /* eat the leftover space */
1420
1517 rtnl_unlock(); 1421 rtnl_unlock();
1518 1422
1519 return res; 1423 return res;
@@ -1529,6 +1433,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
1529{ 1433{
1530 struct slave *slave, *update_slave; 1434 struct slave *slave, *update_slave;
1531 struct bonding *bond = to_bond(d); 1435 struct bonding *bond = to_bond(d);
1436 struct list_head *iter;
1532 u16 qid; 1437 u16 qid;
1533 int ret = count; 1438 int ret = count;
1534 char *delim; 1439 char *delim;
@@ -1561,11 +1466,9 @@ static ssize_t bonding_store_queue_id(struct device *d,
1561 if (!sdev) 1466 if (!sdev)
1562 goto err_no_cmd; 1467 goto err_no_cmd;
1563 1468
1564 read_lock(&bond->lock);
1565
1566 /* Search for thes slave and check for duplicate qids */ 1469 /* Search for thes slave and check for duplicate qids */
1567 update_slave = NULL; 1470 update_slave = NULL;
1568 bond_for_each_slave(bond, slave) { 1471 bond_for_each_slave(bond, slave, iter) {
1569 if (sdev == slave->dev) 1472 if (sdev == slave->dev)
1570 /* 1473 /*
1571 * We don't need to check the matching 1474 * We don't need to check the matching
@@ -1573,23 +1476,20 @@ static ssize_t bonding_store_queue_id(struct device *d,
1573 */ 1476 */
1574 update_slave = slave; 1477 update_slave = slave;
1575 else if (qid && qid == slave->queue_id) { 1478 else if (qid && qid == slave->queue_id) {
1576 goto err_no_cmd_unlock; 1479 goto err_no_cmd;
1577 } 1480 }
1578 } 1481 }
1579 1482
1580 if (!update_slave) 1483 if (!update_slave)
1581 goto err_no_cmd_unlock; 1484 goto err_no_cmd;
1582 1485
1583 /* Actually set the qids for the slave */ 1486 /* Actually set the qids for the slave */
1584 update_slave->queue_id = qid; 1487 update_slave->queue_id = qid;
1585 1488
1586 read_unlock(&bond->lock);
1587out: 1489out:
1588 rtnl_unlock(); 1490 rtnl_unlock();
1589 return ret; 1491 return ret;
1590 1492
1591err_no_cmd_unlock:
1592 read_unlock(&bond->lock);
1593err_no_cmd: 1493err_no_cmd:
1594 pr_info("invalid input for queue_id set for %s.\n", 1494 pr_info("invalid input for queue_id set for %s.\n",
1595 bond->dev->name); 1495 bond->dev->name);
@@ -1619,8 +1519,12 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1619{ 1519{
1620 struct bonding *bond = to_bond(d); 1520 struct bonding *bond = to_bond(d);
1621 int new_value, ret = count; 1521 int new_value, ret = count;
1522 struct list_head *iter;
1622 struct slave *slave; 1523 struct slave *slave;
1623 1524
1525 if (!rtnl_trylock())
1526 return restart_syscall();
1527
1624 if (sscanf(buf, "%d", &new_value) != 1) { 1528 if (sscanf(buf, "%d", &new_value) != 1) {
1625 pr_err("%s: no all_slaves_active value specified.\n", 1529 pr_err("%s: no all_slaves_active value specified.\n",
1626 bond->dev->name); 1530 bond->dev->name);
@@ -1640,8 +1544,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1640 goto out; 1544 goto out;
1641 } 1545 }
1642 1546
1643 read_lock(&bond->lock); 1547 bond_for_each_slave(bond, slave, iter) {
1644 bond_for_each_slave(bond, slave) {
1645 if (!bond_is_active_slave(slave)) { 1548 if (!bond_is_active_slave(slave)) {
1646 if (new_value) 1549 if (new_value)
1647 slave->inactive = 0; 1550 slave->inactive = 0;
@@ -1649,8 +1552,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1649 slave->inactive = 1; 1552 slave->inactive = 1;
1650 } 1553 }
1651 } 1554 }
1652 read_unlock(&bond->lock);
1653out: 1555out:
1556 rtnl_unlock();
1654 return ret; 1557 return ret;
1655} 1558}
1656static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, 1559static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 03cf3fd14490..046a60535e04 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -58,6 +58,11 @@
58#define TX_QUEUE_OVERRIDE(mode) \ 58#define TX_QUEUE_OVERRIDE(mode) \
59 (((mode) == BOND_MODE_ACTIVEBACKUP) || \ 59 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
60 ((mode) == BOND_MODE_ROUNDROBIN)) 60 ((mode) == BOND_MODE_ROUNDROBIN))
61
62#define BOND_MODE_IS_LB(mode) \
63 (((mode) == BOND_MODE_TLB) || \
64 ((mode) == BOND_MODE_ALB))
65
61/* 66/*
62 * Less bad way to call ioctl from within the kernel; this needs to be 67 * Less bad way to call ioctl from within the kernel; this needs to be
63 * done some other way to get the call out of interrupt context. 68 * done some other way to get the call out of interrupt context.
@@ -72,63 +77,37 @@
72 res; }) 77 res; })
73 78
74/* slave list primitives */ 79/* slave list primitives */
75#define bond_to_slave(ptr) list_entry(ptr, struct slave, list) 80#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
81
82#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
76 83
77/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */ 84/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
78#define bond_first_slave(bond) \ 85#define bond_first_slave(bond) \
79 list_first_entry_or_null(&(bond)->slave_list, struct slave, list) 86 (bond_has_slaves(bond) ? \
87 netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
88 NULL)
80#define bond_last_slave(bond) \ 89#define bond_last_slave(bond) \
81 (list_empty(&(bond)->slave_list) ? NULL : \ 90 (bond_has_slaves(bond) ? \
82 bond_to_slave((bond)->slave_list.prev)) 91 netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
92 NULL)
83 93
84#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list) 94#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
85#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list) 95#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
86
87/* Since bond_first/last_slave can return NULL, these can return NULL too */
88#define bond_next_slave(bond, pos) \
89 (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
90 bond_to_slave((pos)->list.next))
91
92#define bond_prev_slave(bond, pos) \
93 (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
94 bond_to_slave((pos)->list.prev))
95
96/**
97 * bond_for_each_slave_from - iterate the slaves list from a starting point
98 * @bond: the bond holding this list.
99 * @pos: current slave.
100 * @cnt: counter for max number of moves
101 * @start: starting point.
102 *
103 * Caller must hold bond->lock
104 */
105#define bond_for_each_slave_from(bond, pos, cnt, start) \
106 for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
107 cnt++, pos = bond_next_slave(bond, pos))
108 96
109/** 97/**
110 * bond_for_each_slave - iterate over all slaves 98 * bond_for_each_slave - iterate over all slaves
111 * @bond: the bond holding this list 99 * @bond: the bond holding this list
112 * @pos: current slave 100 * @pos: current slave
101 * @iter: list_head * iterator
113 * 102 *
114 * Caller must hold bond->lock 103 * Caller must hold bond->lock
115 */ 104 */
116#define bond_for_each_slave(bond, pos) \ 105#define bond_for_each_slave(bond, pos, iter) \
117 list_for_each_entry(pos, &(bond)->slave_list, list) 106 netdev_for_each_lower_private((bond)->dev, pos, iter)
118 107
119/* Caller must have rcu_read_lock */ 108/* Caller must have rcu_read_lock */
120#define bond_for_each_slave_rcu(bond, pos) \ 109#define bond_for_each_slave_rcu(bond, pos, iter) \
121 list_for_each_entry_rcu(pos, &(bond)->slave_list, list) 110 netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
122
123/**
124 * bond_for_each_slave_reverse - iterate in reverse from a given position
125 * @bond: the bond holding this list
126 * @pos: slave to continue from
127 *
128 * Caller must hold bond->lock
129 */
130#define bond_for_each_slave_continue_reverse(bond, pos) \
131 list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
132 111
133#ifdef CONFIG_NET_POLL_CONTROLLER 112#ifdef CONFIG_NET_POLL_CONTROLLER
134extern atomic_t netpoll_block_tx; 113extern atomic_t netpoll_block_tx;
@@ -188,7 +167,6 @@ struct bond_parm_tbl {
188 167
189struct slave { 168struct slave {
190 struct net_device *dev; /* first - useful for panic debug */ 169 struct net_device *dev; /* first - useful for panic debug */
191 struct list_head list;
192 struct bonding *bond; /* our master */ 170 struct bonding *bond; /* our master */
193 int delay; 171 int delay;
194 unsigned long jiffies; 172 unsigned long jiffies;
@@ -228,7 +206,6 @@ struct slave {
228 */ 206 */
229struct bonding { 207struct bonding {
230 struct net_device *dev; /* first - useful for panic debug */ 208 struct net_device *dev; /* first - useful for panic debug */
231 struct list_head slave_list;
232 struct slave *curr_active_slave; 209 struct slave *curr_active_slave;
233 struct slave *current_arp_slave; 210 struct slave *current_arp_slave;
234 struct slave *primary_slave; 211 struct slave *primary_slave;
@@ -245,7 +222,6 @@ struct bonding {
245 char proc_file_name[IFNAMSIZ]; 222 char proc_file_name[IFNAMSIZ];
246#endif /* CONFIG_PROC_FS */ 223#endif /* CONFIG_PROC_FS */
247 struct list_head bond_list; 224 struct list_head bond_list;
248 int (*xmit_hash_policy)(struct sk_buff *, int);
249 u16 rr_tx_counter; 225 u16 rr_tx_counter;
250 struct ad_bond_info ad_info; 226 struct ad_bond_info ad_info;
251 struct alb_bond_info alb_info; 227 struct alb_bond_info alb_info;
@@ -276,13 +252,7 @@ struct bonding {
276static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, 252static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
277 struct net_device *slave_dev) 253 struct net_device *slave_dev)
278{ 254{
279 struct slave *slave = NULL; 255 return netdev_lower_dev_get_private(bond->dev, slave_dev);
280
281 bond_for_each_slave(bond, slave)
282 if (slave->dev == slave_dev)
283 return slave;
284
285 return NULL;
286} 256}
287 257
288static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 258static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,8 +264,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
294 264
295static inline bool bond_is_lb(const struct bonding *bond) 265static inline bool bond_is_lb(const struct bonding *bond)
296{ 266{
297 return (bond->params.mode == BOND_MODE_TLB || 267 return BOND_MODE_IS_LB(bond->params.mode);
298 bond->params.mode == BOND_MODE_ALB);
299} 268}
300 269
301static inline void bond_set_active_slave(struct slave *slave) 270static inline void bond_set_active_slave(struct slave *slave)
@@ -432,21 +401,18 @@ static inline bool slave_can_tx(struct slave *slave)
432struct bond_net; 401struct bond_net;
433 402
434int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); 403int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
435struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
436int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 404int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
437void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id); 405void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
438int bond_create(struct net *net, const char *name); 406int bond_create(struct net *net, const char *name);
439int bond_create_sysfs(struct bond_net *net); 407int bond_create_sysfs(struct bond_net *net);
440void bond_destroy_sysfs(struct bond_net *net); 408void bond_destroy_sysfs(struct bond_net *net);
441void bond_prepare_sysfs_group(struct bonding *bond); 409void bond_prepare_sysfs_group(struct bonding *bond);
442int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
443void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
444int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 410int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
445int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); 411int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
446void bond_mii_monitor(struct work_struct *); 412void bond_mii_monitor(struct work_struct *);
447void bond_loadbalance_arp_mon(struct work_struct *); 413void bond_loadbalance_arp_mon(struct work_struct *);
448void bond_activebackup_arp_mon(struct work_struct *); 414void bond_activebackup_arp_mon(struct work_struct *);
449void bond_set_mode_ops(struct bonding *bond, int mode); 415int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
450int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl); 416int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
451void bond_select_active_slave(struct bonding *bond); 417void bond_select_active_slave(struct bonding *bond);
452void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 418void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
@@ -456,6 +422,14 @@ void bond_debug_register(struct bonding *bond);
456void bond_debug_unregister(struct bonding *bond); 422void bond_debug_unregister(struct bonding *bond);
457void bond_debug_reregister(struct bonding *bond); 423void bond_debug_reregister(struct bonding *bond);
458const char *bond_mode_name(int mode); 424const char *bond_mode_name(int mode);
425void bond_setup(struct net_device *bond_dev);
426unsigned int bond_get_num_tx_queues(void);
427int bond_netlink_init(void);
428void bond_netlink_fini(void);
429int bond_option_mode_set(struct bonding *bond, int mode);
430int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
431struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
432struct net_device *bond_option_active_slave_get(struct bonding *bond);
459 433
460struct bond_net { 434struct bond_net {
461 struct net * net; /* Associated network namespace */ 435 struct net * net; /* Associated network namespace */
@@ -492,9 +466,24 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
492static inline struct slave *bond_slave_has_mac(struct bonding *bond, 466static inline struct slave *bond_slave_has_mac(struct bonding *bond,
493 const u8 *mac) 467 const u8 *mac)
494{ 468{
469 struct list_head *iter;
495 struct slave *tmp; 470 struct slave *tmp;
496 471
497 bond_for_each_slave(bond, tmp) 472 bond_for_each_slave(bond, tmp, iter)
473 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
474 return tmp;
475
476 return NULL;
477}
478
479/* Caller must hold rcu_read_lock() for read */
480static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
481 const u8 *mac)
482{
483 struct list_head *iter;
484 struct slave *tmp;
485
486 bond_for_each_slave_rcu(bond, tmp, iter)
498 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) 487 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
499 return tmp; 488 return tmp;
500 489
@@ -528,4 +517,7 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[];
528extern const struct bond_parm_tbl pri_reselect_tbl[]; 517extern const struct bond_parm_tbl pri_reselect_tbl[];
529extern struct bond_parm_tbl ad_select_tbl[]; 518extern struct bond_parm_tbl ad_select_tbl[];
530 519
520/* exported from bond_netlink.c */
521extern struct rtnl_link_ops bond_link_ops;
522
531#endif /* _LINUX_BONDING_H */ 523#endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 693d8ffe4653..cf0f63e14e53 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1347,7 +1347,7 @@ static int at91_can_probe(struct platform_device *pdev)
1347 priv->reg_base = addr; 1347 priv->reg_base = addr;
1348 priv->devtype_data = *devtype_data; 1348 priv->devtype_data = *devtype_data;
1349 priv->clk = clk; 1349 priv->clk = clk;
1350 priv->pdata = pdev->dev.platform_data; 1350 priv->pdata = dev_get_platdata(&pdev->dev);
1351 priv->mb0_id = 0x7ff; 1351 priv->mb0_id = 0x7ff;
1352 1352
1353 netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv)); 1353 netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a2700d25ff0e..8a0b515b33ea 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -539,7 +539,7 @@ static int bfin_can_probe(struct platform_device *pdev)
539 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq; 539 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
540 unsigned short *pdata; 540 unsigned short *pdata;
541 541
542 pdata = pdev->dev.platform_data; 542 pdata = dev_get_platdata(&pdev->dev);
543 if (!pdata) { 543 if (!pdata) {
544 dev_err(&pdev->dev, "No platform data provided!\n"); 544 dev_err(&pdev->dev, "No platform data provided!\n");
545 err = -EINVAL; 545 err = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index b374be7891a2..bce0be54c2f5 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -160,7 +160,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
160 return 0; 160 return 0;
161 161
162out_free_c_can: 162out_free_c_can:
163 pci_set_drvdata(pdev, NULL);
164 free_c_can_dev(dev); 163 free_c_can_dev(dev);
165out_iounmap: 164out_iounmap:
166 pci_iounmap(pdev, addr); 165 pci_iounmap(pdev, addr);
@@ -181,7 +180,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
181 180
182 unregister_c_can_dev(dev); 181 unregister_c_can_dev(dev);
183 182
184 pci_set_drvdata(pdev, NULL);
185 free_c_can_dev(dev); 183 free_c_can_dev(dev);
186 184
187 pci_iounmap(pdev, priv->base); 185 pci_iounmap(pdev, priv->base);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 294ced3cc227..d66ac265269c 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -322,7 +322,7 @@ static struct platform_driver c_can_plat_driver = {
322 .driver = { 322 .driver = {
323 .name = KBUILD_MODNAME, 323 .name = KBUILD_MODNAME,
324 .owner = THIS_MODULE, 324 .owner = THIS_MODULE,
325 .of_match_table = of_match_ptr(c_can_of_table), 325 .of_match_table = c_can_of_table,
326 }, 326 },
327 .probe = c_can_plat_probe, 327 .probe = c_can_plat_probe,
328 .remove = c_can_plat_remove, 328 .remove = c_can_plat_remove,
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 034bdd816a60..ad76734b3ecc 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -152,7 +152,7 @@ static int cc770_get_platform_data(struct platform_device *pdev,
152 struct cc770_priv *priv) 152 struct cc770_priv *priv)
153{ 153{
154 154
155 struct cc770_platform_data *pdata = pdev->dev.platform_data; 155 struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev);
156 156
157 priv->can.clock.freq = pdata->osc_freq; 157 priv->can.clock.freq = pdata->osc_freq;
158 if (priv->cpu_interface & CPUIF_DSC) 158 if (priv->cpu_interface & CPUIF_DSC)
@@ -203,7 +203,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
203 203
204 if (pdev->dev.of_node) 204 if (pdev->dev.of_node)
205 err = cc770_get_of_node_data(pdev, priv); 205 err = cc770_get_of_node_data(pdev, priv);
206 else if (pdev->dev.platform_data) 206 else if (dev_get_platdata(&pdev->dev))
207 err = cc770_get_platform_data(pdev, priv); 207 err = cc770_get_platform_data(pdev, priv);
208 else 208 else
209 err = -ENODEV; 209 err = -ENODEV;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 8f5ce747feb5..ae08cf129ebb 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1068,7 +1068,7 @@ static int flexcan_probe(struct platform_device *pdev)
1068 priv->dev = dev; 1068 priv->dev = dev;
1069 priv->clk_ipg = clk_ipg; 1069 priv->clk_ipg = clk_ipg;
1070 priv->clk_per = clk_per; 1070 priv->clk_per = clk_per;
1071 priv->pdata = pdev->dev.platform_data; 1071 priv->pdata = dev_get_platdata(&pdev->dev);
1072 priv->devtype_data = devtype_data; 1072 priv->devtype_data = devtype_data;
1073 1073
1074 priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); 1074 priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 36bd6fa1c7f3..ab5909a7bae9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1769,7 +1769,7 @@ static int ican3_probe(struct platform_device *pdev)
1769 struct device *dev; 1769 struct device *dev;
1770 int ret; 1770 int ret;
1771 1771
1772 pdata = pdev->dev.platform_data; 1772 pdata = dev_get_platdata(&pdev->dev);
1773 if (!pdata) 1773 if (!pdata)
1774 return -ENXIO; 1774 return -ENXIO;
1775 1775
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index fe7dd696957e..08ac401e0214 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -999,7 +999,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
999{ 999{
1000 struct net_device *net; 1000 struct net_device *net;
1001 struct mcp251x_priv *priv; 1001 struct mcp251x_priv *priv;
1002 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 1002 struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
1003 int ret = -ENODEV; 1003 int ret = -ENODEV;
1004 1004
1005 if (!pdata) 1005 if (!pdata)
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 9c24d60a23b1..e98abb97a050 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -297,8 +297,8 @@ struct mscan_priv {
297 struct napi_struct napi; 297 struct napi_struct napi;
298}; 298};
299 299
300extern struct net_device *alloc_mscandev(void); 300struct net_device *alloc_mscandev(void);
301extern int register_mscandev(struct net_device *dev, int mscan_clksrc); 301int register_mscandev(struct net_device *dev, int mscan_clksrc);
302extern void unregister_mscandev(struct net_device *dev); 302void unregister_mscandev(struct net_device *dev);
303 303
304#endif /* __MSCAN_H__ */ 304#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5c314a961970..5f0e9b3bfa7b 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -964,7 +964,6 @@ static void pch_can_remove(struct pci_dev *pdev)
964 pci_disable_msi(priv->dev); 964 pci_disable_msi(priv->dev);
965 pci_release_regions(pdev); 965 pci_release_regions(pdev);
966 pci_disable_device(pdev); 966 pci_disable_device(pdev);
967 pci_set_drvdata(pdev, NULL);
968 pch_can_reset(priv); 967 pch_can_reset(priv);
969 pci_iounmap(pdev, priv->regs); 968 pci_iounmap(pdev, priv->regs);
970 free_candev(priv->ndev); 969 free_candev(priv->ndev);
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 3752342a678a..835921388e7b 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -207,7 +207,6 @@ static void ems_pci_del_card(struct pci_dev *pdev)
207 kfree(card); 207 kfree(card);
208 208
209 pci_disable_device(pdev); 209 pci_disable_device(pdev);
210 pci_set_drvdata(pdev, NULL);
211} 210}
212 211
213static void ems_pci_card_reset(struct ems_pci_card *card) 212static void ems_pci_card_reset(struct ems_pci_card *card)
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 217585b97cd3..087b13bd300e 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -387,7 +387,6 @@ static void kvaser_pci_remove_one(struct pci_dev *pdev)
387 387
388 pci_release_regions(pdev); 388 pci_release_regions(pdev);
389 pci_disable_device(pdev); 389 pci_disable_device(pdev);
390 pci_set_drvdata(pdev, NULL);
391} 390}
392 391
393static struct pci_driver kvaser_pci_driver = { 392static struct pci_driver kvaser_pci_driver = {
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6b6f0ad75090..065ca49eb45e 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -744,8 +744,6 @@ static void peak_pci_remove(struct pci_dev *pdev)
744 pci_iounmap(pdev, cfg_base); 744 pci_iounmap(pdev, cfg_base);
745 pci_release_regions(pdev); 745 pci_release_regions(pdev);
746 pci_disable_device(pdev); 746 pci_disable_device(pdev);
747
748 pci_set_drvdata(pdev, NULL);
749} 747}
750 748
751static struct pci_driver peak_pci_driver = { 749static struct pci_driver peak_pci_driver = {
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index c52c1e96bf90..f9b4f81cd86a 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -477,7 +477,6 @@ static void plx_pci_del_card(struct pci_dev *pdev)
477 kfree(card); 477 kfree(card);
478 478
479 pci_disable_device(pdev); 479 pci_disable_device(pdev);
480 pci_set_drvdata(pdev, NULL);
481} 480}
482 481
483/* 482/*
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 8e259c541036..29f9b6321187 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -76,7 +76,7 @@ static int sp_probe(struct platform_device *pdev)
76 struct resource *res_mem, *res_irq; 76 struct resource *res_mem, *res_irq;
77 struct sja1000_platform_data *pdata; 77 struct sja1000_platform_data *pdata;
78 78
79 pdata = pdev->dev.platform_data; 79 pdata = dev_get_platdata(&pdev->dev);
80 if (!pdata) { 80 if (!pdata) {
81 dev_err(&pdev->dev, "No platform data provided!\n"); 81 dev_err(&pdev->dev, "No platform data provided!\n");
82 err = -ENODEV; 82 err = -ENODEV;
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
index afd7d85b6915..35f062282dbd 100644
--- a/drivers/net/can/softing/softing.h
+++ b/drivers/net/can/softing/softing.h
@@ -71,34 +71,34 @@ struct softing {
71 } id; 71 } id;
72}; 72};
73 73
74extern int softing_default_output(struct net_device *netdev); 74int softing_default_output(struct net_device *netdev);
75 75
76extern ktime_t softing_raw2ktime(struct softing *card, u32 raw); 76ktime_t softing_raw2ktime(struct softing *card, u32 raw);
77 77
78extern int softing_chip_poweron(struct softing *card); 78int softing_chip_poweron(struct softing *card);
79 79
80extern int softing_bootloader_command(struct softing *card, int16_t cmd, 80int softing_bootloader_command(struct softing *card, int16_t cmd,
81 const char *msg); 81 const char *msg);
82 82
83/* Load firmware after reset */ 83/* Load firmware after reset */
84extern int softing_load_fw(const char *file, struct softing *card, 84int softing_load_fw(const char *file, struct softing *card,
85 __iomem uint8_t *virt, unsigned int size, int offset); 85 __iomem uint8_t *virt, unsigned int size, int offset);
86 86
87/* Load final application firmware after bootloader */ 87/* Load final application firmware after bootloader */
88extern int softing_load_app_fw(const char *file, struct softing *card); 88int softing_load_app_fw(const char *file, struct softing *card);
89 89
90/* 90/*
91 * enable or disable irq 91 * enable or disable irq
92 * only called with fw.lock locked 92 * only called with fw.lock locked
93 */ 93 */
94extern int softing_enable_irq(struct softing *card, int enable); 94int softing_enable_irq(struct softing *card, int enable);
95 95
96/* start/stop 1 bus on card */ 96/* start/stop 1 bus on card */
97extern int softing_startstop(struct net_device *netdev, int up); 97int softing_startstop(struct net_device *netdev, int up);
98 98
99/* netif_rx() */ 99/* netif_rx() */
100extern int softing_netdev_rx(struct net_device *netdev, 100int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
101 const struct can_frame *msg, ktime_t ktime); 101 ktime_t ktime);
102 102
103/* SOFTING DPRAM mappings */ 103/* SOFTING DPRAM mappings */
104#define DPRAM_RX 0x0000 104#define DPRAM_RX 0x0000
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 65eef1eea2e2..6cd5c01b624d 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -768,7 +768,7 @@ static int softing_pdev_remove(struct platform_device *pdev)
768 768
769static int softing_pdev_probe(struct platform_device *pdev) 769static int softing_pdev_probe(struct platform_device *pdev)
770{ 770{
771 const struct softing_platform_data *pdat = pdev->dev.platform_data; 771 const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev);
772 struct softing *card; 772 struct softing *card;
773 struct net_device *netdev; 773 struct net_device *netdev;
774 struct softing_priv *priv; 774 struct softing_priv *priv;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 3a349a22d5bc..beb5ef834f0f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -894,7 +894,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
894 void __iomem *addr; 894 void __iomem *addr;
895 int err = -ENODEV; 895 int err = -ENODEV;
896 896
897 pdata = pdev->dev.platform_data; 897 pdata = dev_get_platdata(&pdev->dev);
898 if (!pdata) { 898 if (!pdata) {
899 dev_err(&pdev->dev, "No platform data\n"); 899 dev_err(&pdev->dev, "No platform data\n");
900 goto probe_exit; 900 goto probe_exit;
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index f00c76377b44..65b735d4a6ad 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -35,7 +35,7 @@ config EL3
35 35
36config 3C515 36config 3C515
37 tristate "3c515 ISA \"Fast EtherLink\"" 37 tristate "3c515 ISA \"Fast EtherLink\""
38 depends on (ISA || EISA) && ISA_DMA_API 38 depends on ISA && ISA_DMA_API
39 ---help--- 39 ---help---
40 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet 40 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
41 network card, say Y and read the Ethernet-HOWTO, available from 41 network card, say Y and read the Ethernet-HOWTO, available from
@@ -70,7 +70,7 @@ config VORTEX
70 select MII 70 select MII
71 ---help--- 71 ---help---
72 This option enables driver support for a large number of 10Mbps and 72 This option enables driver support for a large number of 10Mbps and
73 10/100Mbps EISA, PCI and PCMCIA 3Com network cards: 73 10/100Mbps EISA, PCI and Cardbus 3Com network cards:
74 74
75 "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI 75 "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
76 "Boomerang" (EtherLink XL 3c900 or 3c905) PCI 76 "Boomerang" (EtherLink XL 3c900 or 3c905) PCI
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 144942f6372b..465cc7108d8a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2525,7 +2525,6 @@ typhoon_remove_one(struct pci_dev *pdev)
2525 pci_release_regions(pdev); 2525 pci_release_regions(pdev);
2526 pci_clear_mwi(pdev); 2526 pci_clear_mwi(pdev);
2527 pci_disable_device(pdev); 2527 pci_disable_device(pdev);
2528 pci_set_drvdata(pdev, NULL);
2529 free_netdev(dev); 2528 free_netdev(dev);
2530} 2529}
2531 2530
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index ef325ffa1b5a..2923c51bb351 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -28,42 +28,42 @@ extern int ei_debug;
28#endif 28#endif
29 29
30#ifdef CONFIG_NET_POLL_CONTROLLER 30#ifdef CONFIG_NET_POLL_CONTROLLER
31extern void ei_poll(struct net_device *dev); 31void ei_poll(struct net_device *dev);
32extern void eip_poll(struct net_device *dev); 32void eip_poll(struct net_device *dev);
33#endif 33#endif
34 34
35 35
36/* Without I/O delay - non ISA or later chips */ 36/* Without I/O delay - non ISA or later chips */
37extern void NS8390_init(struct net_device *dev, int startp); 37void NS8390_init(struct net_device *dev, int startp);
38extern int ei_open(struct net_device *dev); 38int ei_open(struct net_device *dev);
39extern int ei_close(struct net_device *dev); 39int ei_close(struct net_device *dev);
40extern irqreturn_t ei_interrupt(int irq, void *dev_id); 40irqreturn_t ei_interrupt(int irq, void *dev_id);
41extern void ei_tx_timeout(struct net_device *dev); 41void ei_tx_timeout(struct net_device *dev);
42extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev); 42netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
43extern void ei_set_multicast_list(struct net_device *dev); 43void ei_set_multicast_list(struct net_device *dev);
44extern struct net_device_stats *ei_get_stats(struct net_device *dev); 44struct net_device_stats *ei_get_stats(struct net_device *dev);
45 45
46extern const struct net_device_ops ei_netdev_ops; 46extern const struct net_device_ops ei_netdev_ops;
47 47
48extern struct net_device *__alloc_ei_netdev(int size); 48struct net_device *__alloc_ei_netdev(int size);
49static inline struct net_device *alloc_ei_netdev(void) 49static inline struct net_device *alloc_ei_netdev(void)
50{ 50{
51 return __alloc_ei_netdev(0); 51 return __alloc_ei_netdev(0);
52} 52}
53 53
54/* With I/O delay form */ 54/* With I/O delay form */
55extern void NS8390p_init(struct net_device *dev, int startp); 55void NS8390p_init(struct net_device *dev, int startp);
56extern int eip_open(struct net_device *dev); 56int eip_open(struct net_device *dev);
57extern int eip_close(struct net_device *dev); 57int eip_close(struct net_device *dev);
58extern irqreturn_t eip_interrupt(int irq, void *dev_id); 58irqreturn_t eip_interrupt(int irq, void *dev_id);
59extern void eip_tx_timeout(struct net_device *dev); 59void eip_tx_timeout(struct net_device *dev);
60extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev); 60netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
61extern void eip_set_multicast_list(struct net_device *dev); 61void eip_set_multicast_list(struct net_device *dev);
62extern struct net_device_stats *eip_get_stats(struct net_device *dev); 62struct net_device_stats *eip_get_stats(struct net_device *dev);
63 63
64extern const struct net_device_ops eip_netdev_ops; 64extern const struct net_device_ops eip_netdev_ops;
65 65
66extern struct net_device *__alloc_eip_netdev(int size); 66struct net_device *__alloc_eip_netdev(int size);
67static inline struct net_device *alloc_eip_netdev(void) 67static inline struct net_device *alloc_eip_netdev(void)
68{ 68{
69 return __alloc_eip_netdev(0); 69 return __alloc_eip_netdev(0);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index f92f001551da..36fa577970bb 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -702,7 +702,7 @@ static int ax_init_dev(struct net_device *dev)
702 for (i = 0; i < 16; i++) 702 for (i = 0; i < 16; i++)
703 SA_prom[i] = SA_prom[i+i]; 703 SA_prom[i] = SA_prom[i+i];
704 704
705 memcpy(dev->dev_addr, SA_prom, 6); 705 memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
706 } 706 }
707 707
708#ifdef CONFIG_AX88796_93CX6 708#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 92201080e07a..fc14a85e4d5f 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -389,9 +389,7 @@ err_out_free_netdev:
389 free_netdev (dev); 389 free_netdev (dev);
390err_out_free_res: 390err_out_free_res:
391 release_region (ioaddr, NE_IO_EXTENT); 391 release_region (ioaddr, NE_IO_EXTENT);
392 pci_set_drvdata (pdev, NULL);
393 return -ENODEV; 392 return -ENODEV;
394
395} 393}
396 394
397/* 395/*
@@ -655,7 +653,6 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
655 release_region(dev->base_addr, NE_IO_EXTENT); 653 release_region(dev->base_addr, NE_IO_EXTENT);
656 free_netdev(dev); 654 free_netdev(dev);
657 pci_disable_device(pdev); 655 pci_disable_device(pdev);
658 pci_set_drvdata(pdev, NULL);
659} 656}
660 657
661#ifdef CONFIG_PM 658#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8b04bfc20cfb..171d73c1d3c2 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -835,7 +835,6 @@ static int starfire_init_one(struct pci_dev *pdev,
835 return 0; 835 return 0;
836 836
837err_out_cleardev: 837err_out_cleardev:
838 pci_set_drvdata(pdev, NULL);
839 iounmap(base); 838 iounmap(base);
840err_out_free_res: 839err_out_free_res:
841 pci_release_regions (pdev); 840 pci_release_regions (pdev);
@@ -2012,7 +2011,6 @@ static void starfire_remove_one(struct pci_dev *pdev)
2012 iounmap(np->base); 2011 iounmap(np->base);
2013 pci_release_regions(pdev); 2012 pci_release_regions(pdev);
2014 2013
2015 pci_set_drvdata(pdev, NULL);
2016 free_netdev(dev); /* Will also free np!! */ 2014 free_netdev(dev); /* Will also free np!! */
2017} 2015}
2018 2016
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 7a07ee07906b..6dec86ac97cd 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -104,6 +104,6 @@ struct bfin_mac_local {
104#endif 104#endif
105}; 105};
106 106
107extern int bfin_get_ether_addr(char *addr); 107int bfin_get_ether_addr(char *addr);
108 108
109#endif 109#endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 0a5837b96421..ae33a99bf476 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -242,13 +242,13 @@ struct lance_private
242#define LANCE_ADDR(x) ((int)(x) & ~0xff000000) 242#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
243 243
244/* Now the prototypes we export */ 244/* Now the prototypes we export */
245extern int lance_open(struct net_device *dev); 245int lance_open(struct net_device *dev);
246extern int lance_close (struct net_device *dev); 246int lance_close (struct net_device *dev);
247extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); 247int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
248extern void lance_set_multicast (struct net_device *dev); 248void lance_set_multicast (struct net_device *dev);
249extern void lance_tx_timeout(struct net_device *dev); 249void lance_tx_timeout(struct net_device *dev);
250#ifdef CONFIG_NET_POLL_CONTROLLER 250#ifdef CONFIG_NET_POLL_CONTROLLER
251extern void lance_poll(struct net_device *dev); 251void lance_poll(struct net_device *dev);
252#endif 252#endif
253 253
254#endif /* ndef _7990_H */ 254#endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 1b1429d5d5c2..d042511bdc13 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1711,7 +1711,6 @@ static void amd8111e_remove_one(struct pci_dev *pdev)
1711 free_netdev(dev); 1711 free_netdev(dev);
1712 pci_release_regions(pdev); 1712 pci_release_regions(pdev);
1713 pci_disable_device(pdev); 1713 pci_disable_device(pdev);
1714 pci_set_drvdata(pdev, NULL);
1715 } 1714 }
1716} 1715}
1717static void amd8111e_config_ipg(struct net_device* dev) 1716static void amd8111e_config_ipg(struct net_device* dev)
@@ -1967,7 +1966,6 @@ err_free_reg:
1967 1966
1968err_disable_pdev: 1967err_disable_pdev:
1969 pci_disable_device(pdev); 1968 pci_disable_device(pdev);
1970 pci_set_drvdata(pdev, NULL);
1971 return err; 1969 return err;
1972 1970
1973} 1971}
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 10ceca523fc0..e07ce5ff2d48 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -586,10 +586,10 @@ static unsigned long __init lance_probe1( struct net_device *dev,
586 switch( lp->cardtype ) { 586 switch( lp->cardtype ) {
587 case OLD_RIEBL: 587 case OLD_RIEBL:
588 /* No ethernet address! (Set some default address) */ 588 /* No ethernet address! (Set some default address) */
589 memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 ); 589 memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
590 break; 590 break;
591 case NEW_RIEBL: 591 case NEW_RIEBL:
592 lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 ); 592 lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
593 break; 593 break;
594 case PAM_CARD: 594 case PAM_CARD:
595 i = IO->eeprom; 595 i = IO->eeprom;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 91d52b495848..427c148bb643 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1138,7 +1138,7 @@ static int au1000_probe(struct platform_device *pdev)
1138 aup->phy1_search_mac0 = 1; 1138 aup->phy1_search_mac0 = 1;
1139 } else { 1139 } else {
1140 if (is_valid_ether_addr(pd->mac)) { 1140 if (is_valid_ether_addr(pd->mac)) {
1141 memcpy(dev->dev_addr, pd->mac, 6); 1141 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
1142 } else { 1142 } else {
1143 /* Set a random MAC since no valid provided by platform_data. */ 1143 /* Set a random MAC since no valid provided by platform_data. */
1144 eth_hw_addr_random(dev); 1144 eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 94edc9c6fbbf..cc35f6f4e703 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -344,8 +344,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
344 } 344 }
345 345
346 clen = len & 1; 346 clen = len & 1;
347 rtp = tp; 347 rtp = (unsigned char *)tp;
348 rfp = fp; 348 rfp = (const unsigned char *)fp;
349 while (clen--) { 349 while (clen--) {
350 *rtp++ = *rfp++; 350 *rtp++ = *rfp++;
351 } 351 }
@@ -372,8 +372,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
372 * do the rest, if any. 372 * do the rest, if any.
373 */ 373 */
374 clen = len & 15; 374 clen = len & 15;
375 rtp = (unsigned char *) tp; 375 rtp = (unsigned char *)tp;
376 rfp = (unsigned char *) fp; 376 rfp = (const unsigned char *)fp;
377 while (clen--) { 377 while (clen--) {
378 *rtp++ = *rfp++; 378 *rtp++ = *rfp++;
379 } 379 }
@@ -403,8 +403,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
403 403
404 clen = len & 1; 404 clen = len & 1;
405 405
406 rtp = tp; 406 rtp = (unsigned char *)tp;
407 rfp = fp; 407 rfp = (const unsigned char *)fp;
408 408
409 while (clen--) { 409 while (clen--) {
410 *rtp++ = *rfp++; 410 *rtp++ = *rfp++;
@@ -433,8 +433,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
433 * do the rest, if any. 433 * do the rest, if any.
434 */ 434 */
435 clen = len & 15; 435 clen = len & 15;
436 rtp = (unsigned char *) tp; 436 rtp = (unsigned char *)tp;
437 rfp = (unsigned char *) fp; 437 rfp = (const unsigned char *)fp;
438 while (clen--) { 438 while (clen--) {
439 *rtp++ = *rfp++; 439 *rtp++ = *rfp++;
440 } 440 }
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 5c728436b85e..256f590f6bb1 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -754,7 +754,7 @@ lance_open(struct net_device *dev)
754 int i; 754 int i;
755 755
756 if (dev->irq == 0 || 756 if (dev->irq == 0 ||
757 request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) { 757 request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
758 return -EAGAIN; 758 return -EAGAIN;
759 } 759 }
760 760
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 2d8e28819779..38492e0b704e 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1675,7 +1675,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1675 pr_cont(" warning: CSR address invalid,\n"); 1675 pr_cont(" warning: CSR address invalid,\n");
1676 pr_info(" using instead PROM address of"); 1676 pr_info(" using instead PROM address of");
1677 } 1677 }
1678 memcpy(dev->dev_addr, promaddr, 6); 1678 memcpy(dev->dev_addr, promaddr, ETH_ALEN);
1679 } 1679 }
1680 } 1680 }
1681 1681
@@ -2818,7 +2818,6 @@ static void pcnet32_remove_one(struct pci_dev *pdev)
2818 lp->init_block, lp->init_dma_addr); 2818 lp->init_block, lp->init_dma_addr);
2819 free_netdev(dev); 2819 free_netdev(dev);
2820 pci_disable_device(pdev); 2820 pci_disable_device(pdev);
2821 pci_set_drvdata(pdev, NULL);
2822 } 2821 }
2823} 2822}
2824 2823
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a597b766f080..daae0e016253 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1220,8 +1220,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
1220 if (skb != NULL) { 1220 if (skb != NULL) {
1221 data = skb_put(skb, ETHERMINPACKET); 1221 data = skb_put(skb, ETHERMINPACKET);
1222 memset(data, 0, ETHERMINPACKET); 1222 memset(data, 0, ETHERMINPACKET);
1223 memcpy(data, dev->dev_addr, 6); 1223 memcpy(data, dev->dev_addr, ETH_ALEN);
1224 memcpy(data+6, dev->dev_addr, 6); 1224 memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
1225 bmac_transmit_packet(skb, dev); 1225 bmac_transmit_packet(skb, dev);
1226 } 1226 }
1227 spin_unlock_irqrestore(&bp->lock, flags); 1227 spin_unlock_irqrestore(&bp->lock, flags);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index fc95b235e210..5aa5e8146496 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1367,7 +1367,6 @@ static void alx_remove(struct pci_dev *pdev)
1367 1367
1368 pci_disable_pcie_error_reporting(pdev); 1368 pci_disable_pcie_error_reporting(pdev);
1369 pci_disable_device(pdev); 1369 pci_disable_device(pdev);
1370 pci_set_drvdata(pdev, NULL);
1371 1370
1372 free_netdev(alx->dev); 1371 free_netdev(alx->dev);
1373} 1372}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 0f0556526ba9..7f9369a3b378 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -600,7 +600,7 @@ struct atl1c_adapter {
600extern char atl1c_driver_name[]; 600extern char atl1c_driver_name[];
601extern char atl1c_driver_version[]; 601extern char atl1c_driver_version[];
602 602
603extern void atl1c_reinit_locked(struct atl1c_adapter *adapter); 603void atl1c_reinit_locked(struct atl1c_adapter *adapter);
604extern s32 atl1c_reset_hw(struct atl1c_hw *hw); 604s32 atl1c_reset_hw(struct atl1c_hw *hw);
605extern void atl1c_set_ethtool_ops(struct net_device *netdev); 605void atl1c_set_ethtool_ops(struct net_device *netdev);
606#endif /* _ATL1C_H_ */ 606#endif /* _ATL1C_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 3ef7092e3f1c..1cda49a28f7f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -153,7 +153,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
153bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value) 153bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
154{ 154{
155 int i; 155 int i;
156 int ret = false; 156 bool ret = false;
157 u32 otp_ctrl_data; 157 u32 otp_ctrl_data;
158 u32 control; 158 u32 control;
159 u32 data; 159 u32 data;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index b5fd934585e9..1b0fe2d04a0e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -499,10 +499,10 @@ struct atl1e_adapter {
499extern char atl1e_driver_name[]; 499extern char atl1e_driver_name[];
500extern char atl1e_driver_version[]; 500extern char atl1e_driver_version[];
501 501
502extern void atl1e_check_options(struct atl1e_adapter *adapter); 502void atl1e_check_options(struct atl1e_adapter *adapter);
503extern int atl1e_up(struct atl1e_adapter *adapter); 503int atl1e_up(struct atl1e_adapter *adapter);
504extern void atl1e_down(struct atl1e_adapter *adapter); 504void atl1e_down(struct atl1e_adapter *adapter);
505extern void atl1e_reinit_locked(struct atl1e_adapter *adapter); 505void atl1e_reinit_locked(struct atl1e_adapter *adapter);
506extern s32 atl1e_reset_hw(struct atl1e_hw *hw); 506s32 atl1e_reset_hw(struct atl1e_hw *hw);
507extern void atl1e_set_ethtool_ops(struct net_device *netdev); 507void atl1e_set_ethtool_ops(struct net_device *netdev);
508#endif /* _ATL1_E_H_ */ 508#endif /* _ATL1_E_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1966444590f6..7a73f3a9fcb5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,6 +313,34 @@ static void atl1e_set_multi(struct net_device *netdev)
313 } 313 }
314} 314}
315 315
316static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
317{
318
319 if (features & NETIF_F_RXALL) {
320 /* enable RX of ALL frames */
321 *mac_ctrl_data |= MAC_CTRL_DBG;
322 } else {
323 /* disable RX of ALL frames */
324 *mac_ctrl_data &= ~MAC_CTRL_DBG;
325 }
326}
327
328static void atl1e_rx_mode(struct net_device *netdev,
329 netdev_features_t features)
330{
331 struct atl1e_adapter *adapter = netdev_priv(netdev);
332 u32 mac_ctrl_data = 0;
333
334 netdev_dbg(adapter->netdev, "%s\n", __func__);
335
336 atl1e_irq_disable(adapter);
337 mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
338 __atl1e_rx_mode(features, &mac_ctrl_data);
339 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
340 atl1e_irq_enable(adapter);
341}
342
343
316static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) 344static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
317{ 345{
318 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 346 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -394,6 +422,10 @@ static int atl1e_set_features(struct net_device *netdev,
394 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 422 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
395 atl1e_vlan_mode(netdev, features); 423 atl1e_vlan_mode(netdev, features);
396 424
425 if (changed & NETIF_F_RXALL)
426 atl1e_rx_mode(netdev, features);
427
428
397 return 0; 429 return 0;
398} 430}
399 431
@@ -1057,7 +1089,8 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1057 value |= MAC_CTRL_PROMIS_EN; 1089 value |= MAC_CTRL_PROMIS_EN;
1058 if (netdev->flags & IFF_ALLMULTI) 1090 if (netdev->flags & IFF_ALLMULTI)
1059 value |= MAC_CTRL_MC_ALL_EN; 1091 value |= MAC_CTRL_MC_ALL_EN;
1060 1092 if (netdev->features & NETIF_F_RXALL)
1093 value |= MAC_CTRL_DBG;
1061 AT_WRITE_REG(hw, REG_MAC_CTRL, value); 1094 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
1062} 1095}
1063 1096
@@ -1405,7 +1438,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1405 rx_page_desc[que].rx_nxseq++; 1438 rx_page_desc[que].rx_nxseq++;
1406 1439
1407 /* error packet */ 1440 /* error packet */
1408 if (prrs->pkt_flag & RRS_IS_ERR_FRAME) { 1441 if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
1442 !(netdev->features & NETIF_F_RXALL)) {
1409 if (prrs->err_flag & (RRS_ERR_BAD_CRC | 1443 if (prrs->err_flag & (RRS_ERR_BAD_CRC |
1410 RRS_ERR_DRIBBLE | RRS_ERR_CODE | 1444 RRS_ERR_DRIBBLE | RRS_ERR_CODE |
1411 RRS_ERR_TRUNC)) { 1445 RRS_ERR_TRUNC)) {
@@ -1418,7 +1452,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1418 } 1452 }
1419 1453
1420 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & 1454 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1421 RRS_PKT_SIZE_MASK) - 4; /* CRC */ 1455 RRS_PKT_SIZE_MASK);
1456 if (likely(!(netdev->features & NETIF_F_RXFCS)))
1457 packet_size -= 4; /* CRC */
1458
1422 skb = netdev_alloc_skb_ip_align(netdev, packet_size); 1459 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1423 if (skb == NULL) 1460 if (skb == NULL)
1424 goto skip_pkt; 1461 goto skip_pkt;
@@ -2245,7 +2282,8 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2245 NETIF_F_HW_VLAN_CTAG_RX; 2282 NETIF_F_HW_VLAN_CTAG_RX;
2246 netdev->features = netdev->hw_features | NETIF_F_LLTX | 2283 netdev->features = netdev->hw_features | NETIF_F_LLTX |
2247 NETIF_F_HW_VLAN_CTAG_TX; 2284 NETIF_F_HW_VLAN_CTAG_TX;
2248 2285 /* not enabled by default */
2286 netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
2249 return 0; 2287 return 0;
2250} 2288}
2251 2289
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index 3ebe19f7242b..2f27d4c4c3ad 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -42,7 +42,7 @@
42#include "atlx.h" 42#include "atlx.h"
43 43
44#ifdef ETHTOOL_OPS_COMPAT 44#ifdef ETHTOOL_OPS_COMPAT
45extern int ethtool_ioctl(struct ifreq *ifr); 45int ethtool_ioctl(struct ifreq *ifr);
46#endif 46#endif
47 47
48#define PCI_COMMAND_REGISTER PCI_COMMAND 48#define PCI_COMMAND_REGISTER PCI_COMMAND
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 9b017d9c58e9..079a597fa20c 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque)
596static void b44_tx(struct b44 *bp) 596static void b44_tx(struct b44 *bp)
597{ 597{
598 u32 cur, cons; 598 u32 cur, cons;
599 unsigned bytes_compl = 0, pkts_compl = 0;
599 600
600 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; 601 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
601 cur /= sizeof(struct dma_desc); 602 cur /= sizeof(struct dma_desc);
@@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp)
612 skb->len, 613 skb->len,
613 DMA_TO_DEVICE); 614 DMA_TO_DEVICE);
614 rp->skb = NULL; 615 rp->skb = NULL;
616
617 bytes_compl += skb->len;
618 pkts_compl++;
619
615 dev_kfree_skb_irq(skb); 620 dev_kfree_skb_irq(skb);
616 } 621 }
617 622
623 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
618 bp->tx_cons = cons; 624 bp->tx_cons = cons;
619 if (netif_queue_stopped(bp->dev) && 625 if (netif_queue_stopped(bp->dev) &&
620 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) 626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1018 if (bp->flags & B44_FLAG_REORDER_BUG) 1024 if (bp->flags & B44_FLAG_REORDER_BUG)
1019 br32(bp, B44_DMATX_PTR); 1025 br32(bp, B44_DMATX_PTR);
1020 1026
1027 netdev_sent_queue(dev, skb->len);
1028
1021 if (TX_BUFFS_AVAIL(bp) < 1) 1029 if (TX_BUFFS_AVAIL(bp) < 1)
1022 netif_stop_queue(dev); 1030 netif_stop_queue(dev);
1023 1031
@@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)
1416 1424
1417 val = br32(bp, B44_ENET_CTRL); 1425 val = br32(bp, B44_ENET_CTRL);
1418 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); 1426 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1427
1428 netdev_reset_queue(bp->dev);
1419} 1429}
1420 1430
1421static int b44_open(struct net_device *dev) 1431static int b44_open(struct net_device *dev)
@@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp)
2101 * valid PHY address. */ 2111 * valid PHY address. */
2102 bp->phy_addr &= 0x1F; 2112 bp->phy_addr &= 0x1F;
2103 2113
2104 memcpy(bp->dev->dev_addr, addr, 6); 2114 memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2105 2115
2106 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ 2116 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2107 pr_err("Invalid MAC address found in EEPROM\n"); 2117 pr_err("Invalid MAC address found in EEPROM\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 249468f95365..7eca5a174733 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
149 dma_desc->ctl0 = cpu_to_le32(ctl0); 149 dma_desc->ctl0 = cpu_to_le32(ctl0);
150 dma_desc->ctl1 = cpu_to_le32(ctl1); 150 dma_desc->ctl1 = cpu_to_le32(ctl1);
151 151
152 netdev_sent_queue(net_dev, skb->len);
153
152 wmb(); 154 wmb();
153 155
154 /* Increase ring->end to point empty slot. We tell hardware the first 156 /* Increase ring->end to point empty slot. We tell hardware the first
@@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
178 struct device *dma_dev = bgmac->core->dma_dev; 180 struct device *dma_dev = bgmac->core->dma_dev;
179 int empty_slot; 181 int empty_slot;
180 bool freed = false; 182 bool freed = false;
183 unsigned bytes_compl = 0, pkts_compl = 0;
181 184
182 /* The last slot that hardware didn't consume yet */ 185 /* The last slot that hardware didn't consume yet */
183 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 186 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
@@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
195 slot->skb->len, DMA_TO_DEVICE); 198 slot->skb->len, DMA_TO_DEVICE);
196 slot->dma_addr = 0; 199 slot->dma_addr = 0;
197 200
201 bytes_compl += slot->skb->len;
202 pkts_compl++;
203
198 /* Free memory! :) */ 204 /* Free memory! :) */
199 dev_kfree_skb(slot->skb); 205 dev_kfree_skb(slot->skb);
200 slot->skb = NULL; 206 slot->skb = NULL;
@@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
208 freed = true; 214 freed = true;
209 } 215 }
210 216
217 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
218
211 if (freed && netif_queue_stopped(bgmac->net_dev)) 219 if (freed && netif_queue_stopped(bgmac->net_dev))
212 netif_wake_queue(bgmac->net_dev); 220 netif_wake_queue(bgmac->net_dev);
213} 221}
@@ -988,6 +996,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
988 bgmac_miiconfig(bgmac); 996 bgmac_miiconfig(bgmac);
989 bgmac_phy_init(bgmac); 997 bgmac_phy_init(bgmac);
990 998
999 netdev_reset_queue(bgmac->net_dev);
1000
991 bgmac->int_status = 0; 1001 bgmac->int_status = 0;
992} 1002}
993 1003
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e838a3f74b69..d9980ad00b4b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5761 if (!skb) 5761 if (!skb)
5762 return -ENOMEM; 5762 return -ENOMEM;
5763 packet = skb_put(skb, pkt_size); 5763 packet = skb_put(skb, pkt_size);
5764 memcpy(packet, bp->dev->dev_addr, 6); 5764 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5765 memset(packet + 6, 0x0, 8); 5765 memset(packet + ETH_ALEN, 0x0, 8);
5766 for (i = 14; i < pkt_size; i++) 5766 for (i = 14; i < pkt_size; i++)
5767 packet[i] = (unsigned char) (i & 0xff); 5767 packet[i] = (unsigned char) (i & 0xff);
5768 5768
@@ -8413,7 +8413,6 @@ err_out_release:
8413 8413
8414err_out_disable: 8414err_out_disable:
8415 pci_disable_device(pdev); 8415 pci_disable_device(pdev);
8416 pci_set_drvdata(pdev, NULL);
8417 8416
8418err_out: 8417err_out:
8419 return rc; 8418 return rc;
@@ -8514,7 +8513,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8514 8513
8515 pci_set_drvdata(pdev, dev); 8514 pci_set_drvdata(pdev, dev);
8516 8515
8517 memcpy(dev->dev_addr, bp->mac_addr, 6); 8516 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8518 8517
8519 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 8518 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8520 NETIF_F_TSO | NETIF_F_TSO_ECN | 8519 NETIF_F_TSO | NETIF_F_TSO_ECN |
@@ -8546,7 +8545,6 @@ error:
8546 pci_iounmap(pdev, bp->regview); 8545 pci_iounmap(pdev, bp->regview);
8547 pci_release_regions(pdev); 8546 pci_release_regions(pdev);
8548 pci_disable_device(pdev); 8547 pci_disable_device(pdev);
8549 pci_set_drvdata(pdev, NULL);
8550err_free: 8548err_free:
8551 free_netdev(dev); 8549 free_netdev(dev);
8552 return rc; 8550 return rc;
@@ -8578,7 +8576,6 @@ bnx2_remove_one(struct pci_dev *pdev)
8578 8576
8579 pci_release_regions(pdev); 8577 pci_release_regions(pdev);
8580 pci_disable_device(pdev); 8578 pci_disable_device(pdev);
8581 pci_set_drvdata(pdev, NULL);
8582} 8579}
8583 8580
8584static int 8581static int
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index c5e375ddd6c0..4e01c57d8c8d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1546,6 +1546,7 @@ struct bnx2x {
1546#define IS_VF_FLAG (1 << 22) 1546#define IS_VF_FLAG (1 << 22)
1547#define INTERRUPTS_ENABLED_FLAG (1 << 23) 1547#define INTERRUPTS_ENABLED_FLAG (1 << 23)
1548#define BC_SUPPORTS_RMMOD_CMD (1 << 24) 1548#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
1549#define HAS_PHYS_PORT_ID (1 << 25)
1549 1550
1550#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) 1551#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
1551 1552
@@ -1876,6 +1877,8 @@ struct bnx2x {
1876 u32 dump_preset_idx; 1877 u32 dump_preset_idx;
1877 bool stats_started; 1878 bool stats_started;
1878 struct semaphore stats_sema; 1879 struct semaphore stats_sema;
1880
1881 u8 phys_port_id[ETH_ALEN];
1879}; 1882};
1880 1883
1881/* Tx queues may be less or equal to Rx queues */ 1884/* Tx queues may be less or equal to Rx queues */
@@ -2232,7 +2235,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2232#define BNX2X_NUM_TESTS_SF 7 2235#define BNX2X_NUM_TESTS_SF 7
2233#define BNX2X_NUM_TESTS_MF 3 2236#define BNX2X_NUM_TESTS_MF 3
2234#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ 2237#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
2235 BNX2X_NUM_TESTS_SF) 2238 IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
2236 2239
2237#define BNX2X_PHY_LOOPBACK 0 2240#define BNX2X_PHY_LOOPBACK 0
2238#define BNX2X_MAC_LOOPBACK 1 2241#define BNX2X_MAC_LOOPBACK 1
@@ -2492,12 +2495,6 @@ enum {
2492 2495
2493#define NUM_MACS 8 2496#define NUM_MACS 8
2494 2497
2495enum bnx2x_pci_bus_speed {
2496 BNX2X_PCI_LINK_SPEED_2500 = 2500,
2497 BNX2X_PCI_LINK_SPEED_5000 = 5000,
2498 BNX2X_PCI_LINK_SPEED_8000 = 8000
2499};
2500
2501void bnx2x_set_local_cmng(struct bnx2x *bp); 2498void bnx2x_set_local_cmng(struct bnx2x *bp);
2502 2499
2503#define MCPR_SCRATCH_BASE(bp) \ 2500#define MCPR_SCRATCH_BASE(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4ab4c89c60cd..6e46cff5236d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3256,14 +3256,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3256 if (prot == IPPROTO_TCP) 3256 if (prot == IPPROTO_TCP)
3257 rc |= XMIT_CSUM_TCP; 3257 rc |= XMIT_CSUM_TCP;
3258 3258
3259 if (skb_is_gso_v6(skb)) { 3259 if (skb_is_gso(skb)) {
3260 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); 3260 if (skb_is_gso_v6(skb)) {
3261 if (rc & XMIT_CSUM_ENC) 3261 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3262 rc |= XMIT_GSO_ENC_V6; 3262 if (rc & XMIT_CSUM_ENC)
3263 } else if (skb_is_gso(skb)) { 3263 rc |= XMIT_GSO_ENC_V6;
3264 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); 3264 } else {
3265 if (rc & XMIT_CSUM_ENC) 3265 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3266 rc |= XMIT_GSO_ENC_V4; 3266 if (rc & XMIT_CSUM_ENC)
3267 rc |= XMIT_GSO_ENC_V4;
3268 }
3267 } 3269 }
3268 3270
3269 return rc; 3271 return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index e8efa1c93ffe..32d0f1435fb4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)
639 struct bnx2x *bp = netdev_priv(dev); 639 struct bnx2x *bp = netdev_priv(dev);
640 int regdump_len = 0; 640 int regdump_len = 0;
641 641
642 if (IS_VF(bp))
643 return 0;
644
642 regdump_len = __bnx2x_get_regs_len(bp); 645 regdump_len = __bnx2x_get_regs_len(bp);
643 regdump_len *= 4; 646 regdump_len *= 4;
644 regdump_len += sizeof(struct dump_header); 647 regdump_len += sizeof(struct dump_header);
@@ -2864,9 +2867,16 @@ static void bnx2x_self_test(struct net_device *dev,
2864 2867
2865 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); 2868 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
2866 2869
2870 if (bnx2x_test_nvram(bp) != 0) {
2871 if (!IS_MF(bp))
2872 buf[4] = 1;
2873 else
2874 buf[0] = 1;
2875 etest->flags |= ETH_TEST_FL_FAILED;
2876 }
2877
2867 if (!netif_running(dev)) { 2878 if (!netif_running(dev)) {
2868 DP(BNX2X_MSG_ETHTOOL, 2879 DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
2869 "Can't perform self-test when interface is down\n");
2870 return; 2880 return;
2871 } 2881 }
2872 2882
@@ -2928,13 +2938,7 @@ static void bnx2x_self_test(struct net_device *dev,
2928 /* wait until link state is restored */ 2938 /* wait until link state is restored */
2929 bnx2x_wait_for_link(bp, link_up, is_serdes); 2939 bnx2x_wait_for_link(bp, link_up, is_serdes);
2930 } 2940 }
2931 if (bnx2x_test_nvram(bp) != 0) { 2941
2932 if (!IS_MF(bp))
2933 buf[4] = 1;
2934 else
2935 buf[0] = 1;
2936 etest->flags |= ETH_TEST_FL_FAILED;
2937 }
2938 if (bnx2x_test_intr(bp) != 0) { 2942 if (bnx2x_test_intr(bp) != 0) {
2939 if (!IS_MF(bp)) 2943 if (!IS_MF(bp))
2940 buf[5] = 1; 2944 buf[5] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 32767f6aa33f..cf1df8b62e2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -172,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */
172 #define SHARED_HW_CFG_LED_MAC4 0x000c0000 172 #define SHARED_HW_CFG_LED_MAC4 0x000c0000
173 #define SHARED_HW_CFG_LED_PHY8 0x000d0000 173 #define SHARED_HW_CFG_LED_PHY8 0x000d0000
174 #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 174 #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
175 #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
175 176
176 177
177 #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 178 #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 51468227bf3b..20dcc02431ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3122,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
3122} 3122}
3123 3123
3124static int bnx2x_bsc_read(struct link_params *params, 3124static int bnx2x_bsc_read(struct link_params *params,
3125 struct bnx2x_phy *phy, 3125 struct bnx2x *bp,
3126 u8 sl_devid, 3126 u8 sl_devid,
3127 u16 sl_addr, 3127 u16 sl_addr,
3128 u8 lc_addr, 3128 u8 lc_addr,
@@ -3131,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params,
3131{ 3131{
3132 u32 val, i; 3132 u32 val, i;
3133 int rc = 0; 3133 int rc = 0;
3134 struct bnx2x *bp = params->bp;
3135 3134
3136 if (xfer_cnt > 16) { 3135 if (xfer_cnt > 16) {
3137 DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", 3136 DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -6371,9 +6370,15 @@ int bnx2x_set_led(struct link_params *params,
6371 * intended override. 6370 * intended override.
6372 */ 6371 */
6373 break; 6372 break;
6374 } else 6373 } else {
6374 u32 nig_led_mode = ((params->hw_led_mode <<
6375 SHARED_HW_CFG_LED_MODE_SHIFT) ==
6376 SHARED_HW_CFG_LED_EXTPHY2) ?
6377 (SHARED_HW_CFG_LED_PHY1 >>
6378 SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
6375 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 6379 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
6376 hw_led_mode); 6380 nig_led_mode);
6381 }
6377 6382
6378 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); 6383 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
6379 /* Set blinking rate to ~15.9Hz */ 6384 /* Set blinking rate to ~15.9Hz */
@@ -7917,7 +7922,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7917 usleep_range(1000, 2000); 7922 usleep_range(1000, 2000);
7918 bnx2x_warpcore_power_module(params, 1); 7923 bnx2x_warpcore_power_module(params, 1);
7919 } 7924 }
7920 rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt, 7925 rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
7921 data_array); 7926 data_array);
7922 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); 7927 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
7923 7928
@@ -10653,10 +10658,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10653 0x40); 10658 0x40);
10654 10659
10655 } else { 10660 } else {
10661 /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
10662 * sources are all wired through LED1, rather than only
10663 * 10G in other modes.
10664 */
10665 val = ((params->hw_led_mode <<
10666 SHARED_HW_CFG_LED_MODE_SHIFT) ==
10667 SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
10668
10656 bnx2x_cl45_write(bp, phy, 10669 bnx2x_cl45_write(bp, phy,
10657 MDIO_PMA_DEVAD, 10670 MDIO_PMA_DEVAD,
10658 MDIO_PMA_REG_8481_LED1_MASK, 10671 MDIO_PMA_REG_8481_LED1_MASK,
10659 0x80); 10672 val);
10660 10673
10661 /* Tell LED3 to blink on source */ 10674 /* Tell LED3 to blink on source */
10662 bnx2x_cl45_read(bp, phy, 10675 bnx2x_cl45_read(bp, phy,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b42f89ce02ef..bb2f20291509 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9916,7 +9916,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
9916static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) 9916static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9917{ 9917{
9918 struct bnx2x_prev_path_list *tmp_list; 9918 struct bnx2x_prev_path_list *tmp_list;
9919 int rc = false; 9919 bool rc = false;
9920 9920
9921 if (down_trylock(&bnx2x_prev_sem)) 9921 if (down_trylock(&bnx2x_prev_sem))
9922 return false; 9922 return false;
@@ -11186,6 +11186,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11186 bnx2x_get_cnic_mac_hwinfo(bp); 11186 bnx2x_get_cnic_mac_hwinfo(bp);
11187 } 11187 }
11188 11188
11189 if (!BP_NOMCP(bp)) {
11190 /* Read physical port identifier from shmem */
11191 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11192 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11193 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11194 bp->flags |= HAS_PHYS_PORT_ID;
11195 }
11196
11189 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 11197 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11190 11198
11191 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 11199 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@ -11784,7 +11792,7 @@ static int bnx2x_open(struct net_device *dev)
11784 rc = bnx2x_nic_load(bp, LOAD_OPEN); 11792 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11785 if (rc) 11793 if (rc)
11786 return rc; 11794 return rc;
11787 return bnx2x_open_epilog(bp); 11795 return 0;
11788} 11796}
11789 11797
11790/* called with rtnl_lock */ 11798/* called with rtnl_lock */
@@ -12082,6 +12090,20 @@ static int bnx2x_validate_addr(struct net_device *dev)
12082 return 0; 12090 return 0;
12083} 12091}
12084 12092
12093static int bnx2x_get_phys_port_id(struct net_device *netdev,
12094 struct netdev_phys_port_id *ppid)
12095{
12096 struct bnx2x *bp = netdev_priv(netdev);
12097
12098 if (!(bp->flags & HAS_PHYS_PORT_ID))
12099 return -EOPNOTSUPP;
12100
12101 ppid->id_len = sizeof(bp->phys_port_id);
12102 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12103
12104 return 0;
12105}
12106
12085static const struct net_device_ops bnx2x_netdev_ops = { 12107static const struct net_device_ops bnx2x_netdev_ops = {
12086 .ndo_open = bnx2x_open, 12108 .ndo_open = bnx2x_open,
12087 .ndo_stop = bnx2x_close, 12109 .ndo_stop = bnx2x_close,
@@ -12111,6 +12133,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
12111#ifdef CONFIG_NET_RX_BUSY_POLL 12133#ifdef CONFIG_NET_RX_BUSY_POLL
12112 .ndo_busy_poll = bnx2x_low_latency_recv, 12134 .ndo_busy_poll = bnx2x_low_latency_recv,
12113#endif 12135#endif
12136 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
12114}; 12137};
12115 12138
12116static int bnx2x_set_coherency_mask(struct bnx2x *bp) 12139static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -12274,10 +12297,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12274 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12297 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12275 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12298 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12276 if (!CHIP_IS_E1x(bp)) { 12299 if (!CHIP_IS_E1x(bp)) {
12277 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; 12300 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12301 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12278 dev->hw_enc_features = 12302 dev->hw_enc_features =
12279 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 12303 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12280 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12304 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12305 NETIF_F_GSO_IPIP |
12306 NETIF_F_GSO_SIT |
12281 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; 12307 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12282 } 12308 }
12283 12309
@@ -12310,34 +12336,11 @@ err_out_release:
12310 12336
12311err_out_disable: 12337err_out_disable:
12312 pci_disable_device(pdev); 12338 pci_disable_device(pdev);
12313 pci_set_drvdata(pdev, NULL);
12314 12339
12315err_out: 12340err_out:
12316 return rc; 12341 return rc;
12317} 12342}
12318 12343
12319static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
12320 enum bnx2x_pci_bus_speed *speed)
12321{
12322 u32 link_speed, val = 0;
12323
12324 pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
12325 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12326
12327 link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12328
12329 switch (link_speed) {
12330 case 3:
12331 *speed = BNX2X_PCI_LINK_SPEED_8000;
12332 break;
12333 case 2:
12334 *speed = BNX2X_PCI_LINK_SPEED_5000;
12335 break;
12336 default:
12337 *speed = BNX2X_PCI_LINK_SPEED_2500;
12338 }
12339}
12340
12341static int bnx2x_check_firmware(struct bnx2x *bp) 12344static int bnx2x_check_firmware(struct bnx2x *bp)
12342{ 12345{
12343 const struct firmware *firmware = bp->firmware; 12346 const struct firmware *firmware = bp->firmware;
@@ -12694,8 +12697,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12694{ 12697{
12695 struct net_device *dev = NULL; 12698 struct net_device *dev = NULL;
12696 struct bnx2x *bp; 12699 struct bnx2x *bp;
12697 int pcie_width; 12700 enum pcie_link_width pcie_width;
12698 enum bnx2x_pci_bus_speed pcie_speed; 12701 enum pci_bus_speed pcie_speed;
12699 int rc, max_non_def_sbs; 12702 int rc, max_non_def_sbs;
12700 int rx_count, tx_count, rss_count, doorbell_size; 12703 int rx_count, tx_count, rss_count, doorbell_size;
12701 int max_cos_est; 12704 int max_cos_est;
@@ -12844,18 +12847,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12844 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12847 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12845 rtnl_unlock(); 12848 rtnl_unlock();
12846 } 12849 }
12847 12850 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
12848 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 12851 pcie_speed == PCI_SPEED_UNKNOWN ||
12849 BNX2X_DEV_INFO("got pcie width %d and speed %d\n", 12852 pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
12850 pcie_width, pcie_speed); 12853 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
12851 12854 else
12852 BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 12855 BNX2X_DEV_INFO(
12856 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12853 board_info[ent->driver_data].name, 12857 board_info[ent->driver_data].name,
12854 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 12858 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12855 pcie_width, 12859 pcie_width,
12856 pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" : 12860 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
12857 pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" : 12861 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
12858 pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" : 12862 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
12859 "Unknown", 12863 "Unknown",
12860 dev->base_addr, bp->pdev->irq, dev->dev_addr); 12864 dev->base_addr, bp->pdev->irq, dev->dev_addr);
12861 12865
@@ -12874,7 +12878,6 @@ init_one_exit:
12874 pci_release_regions(pdev); 12878 pci_release_regions(pdev);
12875 12879
12876 pci_disable_device(pdev); 12880 pci_disable_device(pdev);
12877 pci_set_drvdata(pdev, NULL);
12878 12881
12879 return rc; 12882 return rc;
12880} 12883}
@@ -12957,7 +12960,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
12957 pci_release_regions(pdev); 12960 pci_release_regions(pdev);
12958 12961
12959 pci_disable_device(pdev); 12962 pci_disable_device(pdev);
12960 pci_set_drvdata(pdev, NULL);
12961} 12963}
12962 12964
12963static void bnx2x_remove_one(struct pci_dev *pdev) 12965static void bnx2x_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index bf08ad68b405..71fffad94aff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2802,7 +2802,7 @@ struct set_vf_state_cookie {
2802 u8 state; 2802 u8 state;
2803}; 2803};
2804 2804
2805void bnx2x_set_vf_state(void *cookie) 2805static void bnx2x_set_vf_state(void *cookie)
2806{ 2806{
2807 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2807 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2808 2808
@@ -3225,8 +3225,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
3225 pci_disable_sriov(bp->pdev); 3225 pci_disable_sriov(bp->pdev);
3226} 3226}
3227 3227
3228int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf, 3228static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3229 struct pf_vf_bulletin_content **bulletin) 3229 struct bnx2x_virtf **vf,
3230 struct pf_vf_bulletin_content **bulletin)
3230{ 3231{
3231 if (bp->state != BNX2X_STATE_OPEN) { 3232 if (bp->state != BNX2X_STATE_OPEN) {
3232 BNX2X_ERR("vf ndo called though PF is down\n"); 3233 BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3642,29 +3643,6 @@ alloc_mem_err:
3642 return -ENOMEM; 3643 return -ENOMEM;
3643} 3644}
3644 3645
3645int bnx2x_open_epilog(struct bnx2x *bp)
3646{
3647 /* Enable sriov via delayed work. This must be done via delayed work
3648 * because it causes the probe of the vf devices to be run, which invoke
3649 * register_netdevice which must have rtnl lock taken. As we are holding
3650 * the lock right now, that could only work if the probe would not take
3651 * the lock. However, as the probe of the vf may be called from other
3652 * contexts as well (such as passthrough to vm fails) it can't assume
3653 * the lock is being held for it. Using delayed work here allows the
3654 * probe code to simply take the lock (i.e. wait for it to be released
3655 * if it is being held). We only want to do this if the number of VFs
3656 * was set before PF driver was loaded.
3657 */
3658 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
3659 smp_mb__before_clear_bit();
3660 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
3661 smp_mb__after_clear_bit();
3662 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3663 }
3664
3665 return 0;
3666}
3667
3668void bnx2x_iov_channel_down(struct bnx2x *bp) 3646void bnx2x_iov_channel_down(struct bnx2x *bp)
3669{ 3647{
3670 int vf_idx; 3648 int vf_idx;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 059f0d460af2..1ff6a9366629 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -782,7 +782,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
782void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); 782void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
783int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 783int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
784void bnx2x_iov_channel_down(struct bnx2x *bp); 784void bnx2x_iov_channel_down(struct bnx2x *bp);
785int bnx2x_open_epilog(struct bnx2x *bp);
786 785
787#else /* CONFIG_BNX2X_SRIOV */ 786#else /* CONFIG_BNX2X_SRIOV */
788 787
@@ -842,7 +841,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
842static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 841static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
843static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 842static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
844static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} 843static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
845static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
846 844
847#endif /* CONFIG_BNX2X_SRIOV */ 845#endif /* CONFIG_BNX2X_SRIOV */
848#endif /* bnx2x_sriov.h */ 846#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 28757dfacf0d..9199adf32d33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -60,6 +60,30 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
60 mutex_unlock(&bp->vf2pf_mutex); 60 mutex_unlock(&bp->vf2pf_mutex);
61} 61}
62 62
63/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
64static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
65 enum channel_tlvs req_tlv)
66{
67 struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
68
69 do {
70 if (tlv->type == req_tlv)
71 return tlv;
72
73 if (!tlv->length) {
74 BNX2X_ERR("Found TLV with length 0\n");
75 return NULL;
76 }
77
78 tlvs_list += tlv->length;
79 tlv = (struct channel_tlv *)tlvs_list;
80 } while (tlv->type != CHANNEL_TLV_LIST_END);
81
82 DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
83
84 return NULL;
85}
86
63/* list the types and lengths of the tlvs on the buffer */ 87/* list the types and lengths of the tlvs on the buffer */
64void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) 88void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
65{ 89{
@@ -196,6 +220,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
196 int rc = 0, attempts = 0; 220 int rc = 0, attempts = 0;
197 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; 221 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
198 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; 222 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
223 struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
199 u32 vf_id; 224 u32 vf_id;
200 bool resources_acquired = false; 225 bool resources_acquired = false;
201 226
@@ -219,8 +244,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
219 /* pf 2 vf bulletin board address */ 244 /* pf 2 vf bulletin board address */
220 req->bulletin_addr = bp->pf2vf_bulletin_mapping; 245 req->bulletin_addr = bp->pf2vf_bulletin_mapping;
221 246
247 /* Request physical port identifier */
248 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
249 CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
250
222 /* add list termination tlv */ 251 /* add list termination tlv */
223 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, 252 bnx2x_add_tlv(bp, req,
253 req->first_tlv.tl.length + sizeof(struct channel_tlv),
254 CHANNEL_TLV_LIST_END,
224 sizeof(struct channel_list_end_tlv)); 255 sizeof(struct channel_list_end_tlv));
225 256
226 /* output tlvs list */ 257 /* output tlvs list */
@@ -287,6 +318,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
287 } 318 }
288 } 319 }
289 320
321 /* Retrieve physical port id (if possible) */
322 phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
323 bnx2x_search_tlv_list(bp, resp,
324 CHANNEL_TLV_PHYS_PORT_ID);
325 if (phys_port_resp) {
326 memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
327 bp->flags |= HAS_PHYS_PORT_ID;
328 }
329
290 /* get HW info */ 330 /* get HW info */
291 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); 331 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
292 bp->link_params.chip_id = bp->common.chip_id; 332 bp->link_params.chip_id = bp->common.chip_id;
@@ -983,53 +1023,59 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
983 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 1023 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
984} 1024}
985 1025
986static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) 1026static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
1027 struct bnx2x_virtf *vf)
987{ 1028{
988 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); 1029 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
989 u64 vf_addr;
990 dma_addr_t pf_addr;
991 u16 length, type; 1030 u16 length, type;
992 int rc;
993 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
994 1031
995 /* prepare response */ 1032 /* prepare response */
996 type = mbx->first_tlv.tl.type; 1033 type = mbx->first_tlv.tl.type;
997 length = type == CHANNEL_TLV_ACQUIRE ? 1034 length = type == CHANNEL_TLV_ACQUIRE ?
998 sizeof(struct pfvf_acquire_resp_tlv) : 1035 sizeof(struct pfvf_acquire_resp_tlv) :
999 sizeof(struct pfvf_general_resp_tlv); 1036 sizeof(struct pfvf_general_resp_tlv);
1000 bnx2x_add_tlv(bp, resp, 0, type, length); 1037 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
1001 resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); 1038 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1002 bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
1003 sizeof(struct channel_list_end_tlv)); 1039 sizeof(struct channel_list_end_tlv));
1040}
1041
1042static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1043 struct bnx2x_virtf *vf)
1044{
1045 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1046 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
1047 dma_addr_t pf_addr;
1048 u64 vf_addr;
1049 int rc;
1050
1004 bnx2x_dp_tlv_list(bp, resp); 1051 bnx2x_dp_tlv_list(bp, resp);
1005 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", 1052 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1006 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); 1053 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1007 1054
1055 resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
1056
1008 /* send response */ 1057 /* send response */
1009 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + 1058 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
1010 mbx->first_tlv.resp_msg_offset; 1059 mbx->first_tlv.resp_msg_offset;
1011 pf_addr = mbx->msg_mapping + 1060 pf_addr = mbx->msg_mapping +
1012 offsetof(struct bnx2x_vf_mbx_msg, resp); 1061 offsetof(struct bnx2x_vf_mbx_msg, resp);
1013 1062
1014 /* copy the response body, if there is one, before the header, as the vf 1063 /* Copy the response buffer. The first u64 is written afterwards, as
1015 * is sensitive to the header being written 1064 * the vf is sensitive to the header being written
1016 */ 1065 */
1017 if (resp->hdr.tl.length > sizeof(u64)) { 1066 vf_addr += sizeof(u64);
1018 length = resp->hdr.tl.length - sizeof(u64); 1067 pf_addr += sizeof(u64);
1019 vf_addr += sizeof(u64); 1068 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1020 pf_addr += sizeof(u64); 1069 U64_HI(vf_addr),
1021 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, 1070 U64_LO(vf_addr),
1022 U64_HI(vf_addr), 1071 (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
1023 U64_LO(vf_addr), 1072 if (rc) {
1024 length/4); 1073 BNX2X_ERR("Failed to copy response body to VF %d\n",
1025 if (rc) { 1074 vf->abs_vfid);
1026 BNX2X_ERR("Failed to copy response body to VF %d\n", 1075 goto mbx_error;
1027 vf->abs_vfid);
1028 goto mbx_error;
1029 }
1030 vf_addr -= sizeof(u64);
1031 pf_addr -= sizeof(u64);
1032 } 1076 }
1077 vf_addr -= sizeof(u64);
1078 pf_addr -= sizeof(u64);
1033 1079
1034 /* ack the FW */ 1080 /* ack the FW */
1035 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); 1081 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
@@ -1060,6 +1106,36 @@ mbx_error:
1060 bnx2x_vf_release(bp, vf, false); /* non blocking */ 1106 bnx2x_vf_release(bp, vf, false); /* non blocking */
1061} 1107}
1062 1108
1109static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
1110 struct bnx2x_virtf *vf)
1111{
1112 bnx2x_vf_mbx_resp_single_tlv(bp, vf);
1113 bnx2x_vf_mbx_resp_send_msg(bp, vf);
1114}
1115
1116static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
1117 struct bnx2x_virtf *vf,
1118 void *buffer,
1119 u16 *offset)
1120{
1121 struct vfpf_port_phys_id_resp_tlv *port_id;
1122
1123 if (!(bp->flags & HAS_PHYS_PORT_ID))
1124 return;
1125
1126 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
1127 sizeof(struct vfpf_port_phys_id_resp_tlv));
1128
1129 port_id = (struct vfpf_port_phys_id_resp_tlv *)
1130 (((u8 *)buffer) + *offset);
1131 memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
1132
1133 /* Offset should continue representing the offset to the tail
1134 * of TLV data (outside this function scope)
1135 */
1136 *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
1137}
1138
1063static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, 1139static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1064 struct bnx2x_vf_mbx *mbx, int vfop_status) 1140 struct bnx2x_vf_mbx *mbx, int vfop_status)
1065{ 1141{
@@ -1067,6 +1143,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1067 struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp; 1143 struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
1068 struct pf_vf_resc *resc = &resp->resc; 1144 struct pf_vf_resc *resc = &resp->resc;
1069 u8 status = bnx2x_pfvf_status_codes(vfop_status); 1145 u8 status = bnx2x_pfvf_status_codes(vfop_status);
1146 u16 length;
1070 1147
1071 memset(resp, 0, sizeof(*resp)); 1148 memset(resp, 0, sizeof(*resp));
1072 1149
@@ -1140,9 +1217,24 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1140 resc->hw_sbs[i].sb_qid); 1217 resc->hw_sbs[i].sb_qid);
1141 DP_CONT(BNX2X_MSG_IOV, "]\n"); 1218 DP_CONT(BNX2X_MSG_IOV, "]\n");
1142 1219
1220 /* prepare response */
1221 length = sizeof(struct pfvf_acquire_resp_tlv);
1222 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
1223
1224 /* Handle possible VF requests for physical port identifiers.
1225 * 'length' should continue to indicate the offset of the first empty
1226 * place in the buffer (i.e., where next TLV should be inserted)
1227 */
1228 if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1229 CHANNEL_TLV_PHYS_PORT_ID))
1230 bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
1231
1232 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1233 sizeof(struct channel_list_end_tlv));
1234
1143 /* send the response */ 1235 /* send the response */
1144 vf->op_rc = vfop_status; 1236 vf->op_rc = vfop_status;
1145 bnx2x_vf_mbx_resp(bp, vf); 1237 bnx2x_vf_mbx_resp_send_msg(bp, vf);
1146} 1238}
1147 1239
1148static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 1240static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1874,6 +1966,9 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
1874 /* process the VF message header */ 1966 /* process the VF message header */
1875 mbx->first_tlv = mbx->msg->req.first_tlv; 1967 mbx->first_tlv = mbx->msg->req.first_tlv;
1876 1968
1969 /* Clean response buffer to refrain from falsely seeing chains */
1970 memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
1971
1877 /* dispatch the request (will prepare the response) */ 1972 /* dispatch the request (will prepare the response) */
1878 bnx2x_vf_mbx_request(bp, vf, mbx); 1973 bnx2x_vf_mbx_request(bp, vf, mbx);
1879 goto mbx_done; 1974 goto mbx_done;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 1179fe06d0c7..208568bc7a71 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -188,6 +188,12 @@ struct pfvf_acquire_resp_tlv {
188 } resc; 188 } resc;
189}; 189};
190 190
191struct vfpf_port_phys_id_resp_tlv {
192 struct channel_tlv tl;
193 u8 id[ETH_ALEN];
194 u8 padding[2];
195};
196
191#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues 197#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
192 * stats will be coalesced on 198 * stats will be coalesced on
193 * the leading RSS queue 199 * the leading RSS queue
@@ -398,6 +404,7 @@ enum channel_tlvs {
398 CHANNEL_TLV_PF_SET_MAC, 404 CHANNEL_TLV_PF_SET_MAC,
399 CHANNEL_TLV_PF_SET_VLAN, 405 CHANNEL_TLV_PF_SET_VLAN,
400 CHANNEL_TLV_UPDATE_RSS, 406 CHANNEL_TLV_UPDATE_RSS,
407 CHANNEL_TLV_PHYS_PORT_ID,
401 CHANNEL_TLV_MAX 408 CHANNEL_TLV_MAX
402}; 409};
403 410
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 99394bd49a13..f58a8b80302d 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
393 393
394 csk->vlan_id = path_resp->vlan_id; 394 csk->vlan_id = path_resp->vlan_id;
395 395
396 memcpy(csk->ha, path_resp->mac_addr, 6); 396 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
397 if (test_bit(SK_F_IPV6, &csk->flags)) 397 if (test_bit(SK_F_IPV6, &csk->flags))
398 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 398 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
399 sizeof(struct in6_addr)); 399 sizeof(struct in6_addr));
@@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5572 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5572 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5573 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; 5573 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5574 5574
5575 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); 5575 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5576 5576
5577 cp->cnic_ops = &cnic_bnx2x_ops; 5577 cp->cnic_ops = &cnic_bnx2x_ops;
5578 cp->start_hw = cnic_start_bnx2x_hw; 5578 cp->start_hw = cnic_start_bnx2x_hw;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0658b43e148c..ebbfe25acaa6 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -353,8 +353,8 @@ struct cnic_ulp_ops {
353 atomic_t ref_count; 353 atomic_t ref_count;
354}; 354};
355 355
356extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); 356int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
357 357
358extern int cnic_unregister_driver(int ulp_type); 358int cnic_unregister_driver(int ulp_type);
359 359
360#endif 360#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 12d961c4ebca..819d87c281bf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
94 94
95#define DRV_MODULE_NAME "tg3" 95#define DRV_MODULE_NAME "tg3"
96#define TG3_MAJ_NUM 3 96#define TG3_MAJ_NUM 3
97#define TG3_MIN_NUM 133 97#define TG3_MIN_NUM 134
98#define DRV_MODULE_VERSION \ 98#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE "Jul 29, 2013" 100#define DRV_MODULE_RELDATE "Sep 16, 2013"
101 101
102#define RESET_KIND_SHUTDOWN 0 102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1 103#define RESET_KIND_INIT 1
@@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1326,6 +1331,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1326 return err; 1331 return err;
1327} 1332}
1328 1333
1334static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1335{
1336 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1337 reg | val | MII_TG3_MISC_SHDW_WREN);
1338}
1339
1329static int tg3_bmcr_reset(struct tg3 *tp) 1340static int tg3_bmcr_reset(struct tg3 *tp)
1330{ 1341{
1331 u32 phy_control; 1342 u32 phy_control;
@@ -1364,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1364 1375
1365 spin_lock_bh(&tp->lock); 1376 spin_lock_bh(&tp->lock);
1366 1377
1367 if (tg3_readphy(tp, reg, &val)) 1378 if (__tg3_readphy(tp, mii_id, reg, &val))
1368 val = -EIO; 1379 val = -EIO;
1369 1380
1370 spin_unlock_bh(&tp->lock); 1381 spin_unlock_bh(&tp->lock);
@@ -1379,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1379 1390
1380 spin_lock_bh(&tp->lock); 1391 spin_lock_bh(&tp->lock);
1381 1392
1382 if (tg3_writephy(tp, reg, val)) 1393 if (__tg3_writephy(tp, mii_id, reg, val))
1383 ret = -EIO; 1394 ret = -EIO;
1384 1395
1385 spin_unlock_bh(&tp->lock); 1396 spin_unlock_bh(&tp->lock);
@@ -1397,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
1397 u32 val; 1408 u32 val;
1398 struct phy_device *phydev; 1409 struct phy_device *phydev;
1399 1410
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1411 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1412 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610: 1413 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M: 1414 case PHY_ID_BCM50610M:
@@ -1502,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp)
1502 TG3_CPMU_PHY_STRAP_IS_SERDES; 1513 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 if (is_serdes) 1514 if (is_serdes)
1504 tp->phy_addr += 7; 1515 tp->phy_addr += 7;
1516 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1517 int addr;
1518
1519 addr = ssb_gige_get_phyaddr(tp->pdev);
1520 if (addr < 0)
1521 return addr;
1522 tp->phy_addr = addr;
1505 } else 1523 } else
1506 tp->phy_addr = TG3_PHY_MII_ADDR; 1524 tp->phy_addr = TG3_PHY_MII_ADDR;
1507 1525
@@ -1522,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1522 tp->mdio_bus->read = &tg3_mdio_read; 1540 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write; 1541 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset; 1542 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); 1543 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0]; 1544 tp->mdio_bus->irq = &tp->mdio_irq[0];
1527 1545
1528 for (i = 0; i < PHY_MAX_ADDR; i++) 1546 for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1543,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1543 return i; 1561 return i;
1544 } 1562 }
1545 1563
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1564 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1547 1565
1548 if (!phydev || !phydev->drv) { 1566 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
@@ -1953,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1953 u32 old_tx_mode = tp->tx_mode; 1971 u32 old_tx_mode = tp->tx_mode;
1954 1972
1955 if (tg3_flag(tp, USE_PHYLIB)) 1973 if (tg3_flag(tp, USE_PHYLIB))
1956 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; 1974 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1957 else 1975 else
1958 autoneg = tp->link_config.autoneg; 1976 autoneg = tp->link_config.autoneg;
1959 1977
@@ -1989,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev)
1989 u8 oldflowctrl, linkmesg = 0; 2007 u8 oldflowctrl, linkmesg = 0;
1990 u32 mac_mode, lcl_adv, rmt_adv; 2008 u32 mac_mode, lcl_adv, rmt_adv;
1991 struct tg3 *tp = netdev_priv(dev); 2009 struct tg3 *tp = netdev_priv(dev);
1992 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 2010 struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1993 2011
1994 spin_lock_bh(&tp->lock); 2012 spin_lock_bh(&tp->lock);
1995 2013
@@ -2078,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp)
2078 /* Bring the PHY back to a known state. */ 2096 /* Bring the PHY back to a known state. */
2079 tg3_bmcr_reset(tp); 2097 tg3_bmcr_reset(tp);
2080 2098
2081 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 2099 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2082 2100
2083 /* Attach the MAC to the PHY. */ 2101 /* Attach the MAC to the PHY. */
2084 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), 2102 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
@@ -2105,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp)
2105 SUPPORTED_Asym_Pause); 2123 SUPPORTED_Asym_Pause);
2106 break; 2124 break;
2107 default: 2125 default:
2108 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 2126 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2109 return -EINVAL; 2127 return -EINVAL;
2110 } 2128 }
2111 2129
@@ -2123,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp)
2123 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2141 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2124 return; 2142 return;
2125 2143
2126 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 2144 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2127 2145
2128 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2146 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2129 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2147 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
@@ -2143,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp)
2143 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2161 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144 return; 2162 return;
2145 2163
2146 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 2164 phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2147} 2165}
2148 2166
2149static void tg3_phy_fini(struct tg3 *tp) 2167static void tg3_phy_fini(struct tg3 *tp)
2150{ 2168{
2151 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2169 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2152 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 2170 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2153 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2171 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2154 } 2172 }
2155} 2173}
@@ -2218,25 +2236,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 return; 2236 return;
2219 } 2237 }
2220 2238
2221 reg = MII_TG3_MISC_SHDW_WREN | 2239 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2222 MII_TG3_MISC_SHDW_SCR5_SEL |
2223 MII_TG3_MISC_SHDW_SCR5_LPED |
2224 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2240 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2225 MII_TG3_MISC_SHDW_SCR5_SDTL | 2241 MII_TG3_MISC_SHDW_SCR5_SDTL |
2226 MII_TG3_MISC_SHDW_SCR5_C125OE; 2242 MII_TG3_MISC_SHDW_SCR5_C125OE;
2227 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2243 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2228 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2244 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2229 2245
2230 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); 2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2231 2247
2232 2248
2233 reg = MII_TG3_MISC_SHDW_WREN | 2249 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2234 MII_TG3_MISC_SHDW_APD_SEL |
2235 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2236 if (enable) 2250 if (enable)
2237 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2251 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2238 2252
2239 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); 2253 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2240} 2254}
2241 2255
2242static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2256static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
@@ -4027,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
4027 struct phy_device *phydev; 4041 struct phy_device *phydev;
4028 u32 phyid, advertising; 4042 u32 phyid, advertising;
4029 4043
4030 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 4044 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4031 4045
4032 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4046 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4033 4047
@@ -9196,10 +9210,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9196 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9210 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9197 } 9211 }
9198 9212
9199 if (err) 9213 return err;
9200 return err;
9201
9202 return 0;
9203} 9214}
9204 9215
9205static int tg3_set_mac_addr(struct net_device *dev, void *p) 9216static int tg3_set_mac_addr(struct net_device *dev, void *p)
@@ -11035,7 +11046,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
11035 name = tp->dev->name; 11046 name = tp->dev->name;
11036 else { 11047 else {
11037 name = &tnapi->irq_lbl[0]; 11048 name = &tnapi->irq_lbl[0];
11038 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); 11049 if (tnapi->tx_buffers && tnapi->rx_rcb)
11050 snprintf(name, IFNAMSIZ,
11051 "%s-txrx-%d", tp->dev->name, irq_num);
11052 else if (tnapi->tx_buffers)
11053 snprintf(name, IFNAMSIZ,
11054 "%s-tx-%d", tp->dev->name, irq_num);
11055 else if (tnapi->rx_rcb)
11056 snprintf(name, IFNAMSIZ,
11057 "%s-rx-%d", tp->dev->name, irq_num);
11058 else
11059 snprintf(name, IFNAMSIZ,
11060 "%s-%d", tp->dev->name, irq_num);
11039 name[IFNAMSIZ-1] = 0; 11061 name[IFNAMSIZ-1] = 0;
11040 } 11062 }
11041 11063
@@ -11907,7 +11929,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11907 struct phy_device *phydev; 11929 struct phy_device *phydev;
11908 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 11930 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11909 return -EAGAIN; 11931 return -EAGAIN;
11910 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 11932 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11911 return phy_ethtool_gset(phydev, cmd); 11933 return phy_ethtool_gset(phydev, cmd);
11912 } 11934 }
11913 11935
@@ -11974,7 +11996,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11974 struct phy_device *phydev; 11996 struct phy_device *phydev;
11975 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 11997 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11976 return -EAGAIN; 11998 return -EAGAIN;
11977 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 11999 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11978 return phy_ethtool_sset(phydev, cmd); 12000 return phy_ethtool_sset(phydev, cmd);
11979 } 12001 }
11980 12002
@@ -12093,12 +12115,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12093 12115
12094 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12116 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12095 12117
12096 spin_lock_bh(&tp->lock);
12097 if (device_may_wakeup(dp)) 12118 if (device_may_wakeup(dp))
12098 tg3_flag_set(tp, WOL_ENABLE); 12119 tg3_flag_set(tp, WOL_ENABLE);
12099 else 12120 else
12100 tg3_flag_clear(tp, WOL_ENABLE); 12121 tg3_flag_clear(tp, WOL_ENABLE);
12101 spin_unlock_bh(&tp->lock);
12102 12122
12103 return 0; 12123 return 0;
12104} 12124}
@@ -12131,7 +12151,7 @@ static int tg3_nway_reset(struct net_device *dev)
12131 if (tg3_flag(tp, USE_PHYLIB)) { 12151 if (tg3_flag(tp, USE_PHYLIB)) {
12132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12152 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12133 return -EAGAIN; 12153 return -EAGAIN;
12134 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 12154 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12135 } else { 12155 } else {
12136 u32 bmcr; 12156 u32 bmcr;
12137 12157
@@ -12247,7 +12267,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
12247 u32 newadv; 12267 u32 newadv;
12248 struct phy_device *phydev; 12268 struct phy_device *phydev;
12249 12269
12250 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 12270 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12251 12271
12252 if (!(phydev->supported & SUPPORTED_Pause) || 12272 if (!(phydev->supported & SUPPORTED_Pause) ||
12253 (!(phydev->supported & SUPPORTED_Asym_Pause) && 12273 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
@@ -13194,8 +13214,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13194 return -ENOMEM; 13214 return -ENOMEM;
13195 13215
13196 tx_data = skb_put(skb, tx_len); 13216 tx_data = skb_put(skb, tx_len);
13197 memcpy(tx_data, tp->dev->dev_addr, 6); 13217 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13198 memset(tx_data + 6, 0x0, 8); 13218 memset(tx_data + ETH_ALEN, 0x0, 8);
13199 13219
13200 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13220 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13201 13221
@@ -13683,7 +13703,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13683 struct phy_device *phydev; 13703 struct phy_device *phydev;
13684 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13704 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13685 return -EAGAIN; 13705 return -EAGAIN;
13686 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 13706 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13687 return phy_mii_ioctl(phydev, ifr, cmd); 13707 return phy_mii_ioctl(phydev, ifr, cmd);
13688 } 13708 }
13689 13709
@@ -14921,6 +14941,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14921 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 14941 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14922 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 14942 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14923 LED_CTRL_MODE_PHY_2); 14943 LED_CTRL_MODE_PHY_2);
14944
14945 if (tg3_flag(tp, 5717_PLUS) ||
14946 tg3_asic_rev(tp) == ASIC_REV_5762)
14947 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
14948 LED_CTRL_BLINK_RATE_MASK;
14949
14924 break; 14950 break;
14925 14951
14926 case SHASTA_EXT_LED_MAC: 14952 case SHASTA_EXT_LED_MAC:
@@ -15759,9 +15785,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 15785 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 15786 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 15787 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 15790 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15763 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 15791 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15764 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) 15792 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15793 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15765 reg = TG3PCI_GEN2_PRODID_ASICREV; 15794 reg = TG3PCI_GEN2_PRODID_ASICREV;
15766 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 15795 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 15796 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -16632,8 +16661,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
16632 int len; 16661 int len;
16633 16662
16634 addr = of_get_property(dp, "local-mac-address", &len); 16663 addr = of_get_property(dp, "local-mac-address", &len);
16635 if (addr && len == 6) { 16664 if (addr && len == ETH_ALEN) {
16636 memcpy(dev->dev_addr, addr, 6); 16665 memcpy(dev->dev_addr, addr, ETH_ALEN);
16637 return 0; 16666 return 0;
16638 } 16667 }
16639 return -ENODEV; 16668 return -ENODEV;
@@ -16643,7 +16672,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16643{ 16672{
16644 struct net_device *dev = tp->dev; 16673 struct net_device *dev = tp->dev;
16645 16674
16646 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 16675 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16647 return 0; 16676 return 0;
16648} 16677}
16649#endif 16678#endif
@@ -17052,10 +17081,6 @@ static int tg3_test_dma(struct tg3 *tp)
17052 17081
17053 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17082 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17054 17083
17055#if 0
17056 /* Unneeded, already done by tg3_get_invariants. */
17057 tg3_switch_clocks(tp);
17058#endif
17059 17084
17060 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17085 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17061 tg3_asic_rev(tp) != ASIC_REV_5701) 17086 tg3_asic_rev(tp) != ASIC_REV_5701)
@@ -17083,20 +17108,6 @@ static int tg3_test_dma(struct tg3 *tp)
17083 break; 17108 break;
17084 } 17109 }
17085 17110
17086#if 0
17087 /* validate data reached card RAM correctly. */
17088 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17089 u32 val;
17090 tg3_read_mem(tp, 0x2100 + (i*4), &val);
17091 if (le32_to_cpu(val) != p[i]) {
17092 dev_err(&tp->pdev->dev,
17093 "%s: Buffer corrupted on device! "
17094 "(%d != %d)\n", __func__, val, i);
17095 /* ret = -ENODEV here? */
17096 }
17097 p[i] = 0;
17098 }
17099#endif
17100 /* Now read it back. */ 17111 /* Now read it back. */
17101 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17112 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17102 if (ret) { 17113 if (ret) {
@@ -17362,8 +17373,10 @@ static int tg3_init_one(struct pci_dev *pdev,
17362 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17373 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17363 if (ssb_gige_one_dma_at_once(pdev)) 17374 if (ssb_gige_one_dma_at_once(pdev))
17364 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17375 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17365 if (ssb_gige_have_roboswitch(pdev)) 17376 if (ssb_gige_have_roboswitch(pdev)) {
17377 tg3_flag_set(tp, USE_PHYLIB);
17366 tg3_flag_set(tp, ROBOSWITCH); 17378 tg3_flag_set(tp, ROBOSWITCH);
17379 }
17367 if (ssb_gige_is_rgmii(pdev)) 17380 if (ssb_gige_is_rgmii(pdev))
17368 tg3_flag_set(tp, RGMII_MODE); 17381 tg3_flag_set(tp, RGMII_MODE);
17369 } 17382 }
@@ -17409,9 +17422,12 @@ static int tg3_init_one(struct pci_dev *pdev,
17409 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17422 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17410 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17423 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17424 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17425 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17427 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17413 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17414 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) { 17429 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17430 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17415 tg3_flag_set(tp, ENABLE_APE); 17431 tg3_flag_set(tp, ENABLE_APE);
17416 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17432 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17417 if (!tp->aperegs) { 17433 if (!tp->aperegs) {
@@ -17628,7 +17644,7 @@ static int tg3_init_one(struct pci_dev *pdev,
17628 17644
17629 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 17645 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17630 struct phy_device *phydev; 17646 struct phy_device *phydev;
17631 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 17647 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17632 netdev_info(dev, 17648 netdev_info(dev,
17633 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 17649 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17634 phydev->drv->name, dev_name(&phydev->dev)); 17650 phydev->drv->name, dev_name(&phydev->dev));
@@ -17685,7 +17701,6 @@ err_out_free_res:
17685err_out_disable_pdev: 17701err_out_disable_pdev:
17686 if (pci_is_enabled(pdev)) 17702 if (pci_is_enabled(pdev))
17687 pci_disable_device(pdev); 17703 pci_disable_device(pdev);
17688 pci_set_drvdata(pdev, NULL);
17689 return err; 17704 return err;
17690} 17705}
17691 17706
@@ -17717,7 +17732,6 @@ static void tg3_remove_one(struct pci_dev *pdev)
17717 free_netdev(dev); 17732 free_netdev(dev);
17718 pci_release_regions(pdev); 17733 pci_release_regions(pdev);
17719 pci_disable_device(pdev); 17734 pci_disable_device(pdev);
17720 pci_set_drvdata(pdev, NULL);
17721 } 17735 }
17722} 17736}
17723 17737
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 70257808aa37..5c3835aa1e1b 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -68,6 +68,9 @@
68#define TG3PCI_DEVICE_TIGON3_5762 0x1687 68#define TG3PCI_DEVICE_TIGON3_5762 0x1687
69#define TG3PCI_DEVICE_TIGON3_5725 0x1643 69#define TG3PCI_DEVICE_TIGON3_5725 0x1643
70#define TG3PCI_DEVICE_TIGON3_5727 0x16f3 70#define TG3PCI_DEVICE_TIGON3_5727 0x16f3
71#define TG3PCI_DEVICE_TIGON3_57764 0x1642
72#define TG3PCI_DEVICE_TIGON3_57767 0x1683
73#define TG3PCI_DEVICE_TIGON3_57787 0x1641
71/* 0x04 --> 0x2c unused */ 74/* 0x04 --> 0x2c unused */
72#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM 75#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
73#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 76#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b78e69e0e52a..f276433d37ce 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3212,7 +3212,6 @@ bnad_init(struct bnad *bnad,
3212 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); 3212 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3213 if (!bnad->bar0) { 3213 if (!bnad->bar0) {
3214 dev_err(&pdev->dev, "ioremap for bar0 failed\n"); 3214 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3215 pci_set_drvdata(pdev, NULL);
3216 return -ENOMEM; 3215 return -ENOMEM;
3217 } 3216 }
3218 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, 3217 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index aefee77523f2..f7e033f8a00e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -372,38 +372,37 @@ extern u32 bnad_rxqs_per_cq;
372/* 372/*
373 * EXTERN PROTOTYPES 373 * EXTERN PROTOTYPES
374 */ 374 */
375extern u32 *cna_get_firmware_buf(struct pci_dev *pdev); 375u32 *cna_get_firmware_buf(struct pci_dev *pdev);
376/* Netdev entry point prototypes */ 376/* Netdev entry point prototypes */
377extern void bnad_set_rx_mode(struct net_device *netdev); 377void bnad_set_rx_mode(struct net_device *netdev);
378extern struct net_device_stats *bnad_get_netdev_stats( 378struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
379 struct net_device *netdev); 379int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
380extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); 380int bnad_enable_default_bcast(struct bnad *bnad);
381extern int bnad_enable_default_bcast(struct bnad *bnad); 381void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
382extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); 382void bnad_set_ethtool_ops(struct net_device *netdev);
383extern void bnad_set_ethtool_ops(struct net_device *netdev); 383void bnad_cb_completion(void *arg, enum bfa_status status);
384extern void bnad_cb_completion(void *arg, enum bfa_status status);
385 384
386/* Configuration & setup */ 385/* Configuration & setup */
387extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad); 386void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
388extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad); 387void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
389 388
390extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id); 389int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
391extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id); 390int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
392extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); 391void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
393extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); 392void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
394 393
395/* Timer start/stop protos */ 394/* Timer start/stop protos */
396extern void bnad_dim_timer_start(struct bnad *bnad); 395void bnad_dim_timer_start(struct bnad *bnad);
397 396
398/* Statistics */ 397/* Statistics */
399extern void bnad_netdev_qstats_fill(struct bnad *bnad, 398void bnad_netdev_qstats_fill(struct bnad *bnad,
400 struct rtnl_link_stats64 *stats); 399 struct rtnl_link_stats64 *stats);
401extern void bnad_netdev_hwstats_fill(struct bnad *bnad, 400void bnad_netdev_hwstats_fill(struct bnad *bnad,
402 struct rtnl_link_stats64 *stats); 401 struct rtnl_link_stats64 *stats);
403 402
404/* Debugfs */ 403/* Debugfs */
405void bnad_debugfs_init(struct bnad *bnad); 404void bnad_debugfs_init(struct bnad *bnad);
406void bnad_debugfs_uninit(struct bnad *bnad); 405void bnad_debugfs_uninit(struct bnad *bnad);
407 406
408/* MACROS */ 407/* MACROS */
409/* To set & get the stats counters */ 408/* To set & get the stats counters */
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 5ccbed1784d2..8abb46b39032 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -324,30 +324,30 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
324 return board_info(adap)->clock_core / 1000000; 324 return board_info(adap)->clock_core / 1000000;
325} 325}
326 326
327extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp); 327int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
328extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); 328int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
329extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); 329int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
330extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); 330int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
331 331
332extern void t1_interrupts_enable(adapter_t *adapter); 332void t1_interrupts_enable(adapter_t *adapter);
333extern void t1_interrupts_disable(adapter_t *adapter); 333void t1_interrupts_disable(adapter_t *adapter);
334extern void t1_interrupts_clear(adapter_t *adapter); 334void t1_interrupts_clear(adapter_t *adapter);
335extern int t1_elmer0_ext_intr_handler(adapter_t *adapter); 335int t1_elmer0_ext_intr_handler(adapter_t *adapter);
336extern void t1_elmer0_ext_intr(adapter_t *adapter); 336void t1_elmer0_ext_intr(adapter_t *adapter);
337extern int t1_slow_intr_handler(adapter_t *adapter); 337int t1_slow_intr_handler(adapter_t *adapter);
338 338
339extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); 339int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
340extern const struct board_info *t1_get_board_info(unsigned int board_id); 340const struct board_info *t1_get_board_info(unsigned int board_id);
341extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid, 341const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
342 unsigned short ssid); 342 unsigned short ssid);
343extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data); 343int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
344extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, 344int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
345 struct adapter_params *p); 345 struct adapter_params *p);
346extern int t1_init_hw_modules(adapter_t *adapter); 346int t1_init_hw_modules(adapter_t *adapter);
347extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); 347int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
348extern void t1_free_sw_modules(adapter_t *adapter); 348void t1_free_sw_modules(adapter_t *adapter);
349extern void t1_fatal_err(adapter_t *adapter); 349void t1_fatal_err(adapter_t *adapter);
350extern void t1_link_changed(adapter_t *adapter, int port_id); 350void t1_link_changed(adapter_t *adapter, int port_id);
351extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, 351void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
352 int speed, int duplex, int pause); 352 int speed, int duplex, int pause);
353#endif /* _CXGB_COMMON_H_ */ 353#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d7048db9863d..1d021059f097 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1168,7 +1168,6 @@ out_free_dev:
1168 pci_release_regions(pdev); 1168 pci_release_regions(pdev);
1169out_disable_pdev: 1169out_disable_pdev:
1170 pci_disable_device(pdev); 1170 pci_disable_device(pdev);
1171 pci_set_drvdata(pdev, NULL);
1172 return err; 1171 return err;
1173} 1172}
1174 1173
@@ -1347,7 +1346,6 @@ static void remove_one(struct pci_dev *pdev)
1347 1346
1348 pci_release_regions(pdev); 1347 pci_release_regions(pdev);
1349 pci_disable_device(pdev); 1348 pci_disable_device(pdev);
1350 pci_set_drvdata(pdev, NULL);
1351 t1_sw_reset(pdev); 1349 t1_sw_reset(pdev);
1352} 1350}
1353 1351
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 40c7b93ababc..eb33a31b08a0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -499,7 +499,7 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
499 499
500static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) 500static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
501{ 501{
502 memcpy(mac_addr, cmac->instance->mac_addr, 6); 502 memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
503 return 0; 503 return 0;
504} 504}
505 505
@@ -526,7 +526,7 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
526 */ 526 */
527 527
528 /* Store local copy */ 528 /* Store local copy */
529 memcpy(cmac->instance->mac_addr, ma, 6); 529 memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
530 530
531 lo = ((u32) ma[1] << 8) | (u32) ma[0]; 531 lo = ((u32) ma[1] << 8) | (u32) ma[0];
532 mid = ((u32) ma[3] << 8) | (u32) ma[2]; 532 mid = ((u32) ma[3] << 8) | (u32) ma[2];
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index b650951791dd..45d77334d7d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3374,7 +3374,6 @@ out_release_regions:
3374 pci_release_regions(pdev); 3374 pci_release_regions(pdev);
3375out_disable_device: 3375out_disable_device:
3376 pci_disable_device(pdev); 3376 pci_disable_device(pdev);
3377 pci_set_drvdata(pdev, NULL);
3378out: 3377out:
3379 return err; 3378 return err;
3380} 3379}
@@ -3415,7 +3414,6 @@ static void remove_one(struct pci_dev *pdev)
3415 kfree(adapter); 3414 kfree(adapter);
3416 pci_release_regions(pdev); 3415 pci_release_regions(pdev);
3417 pci_disable_device(pdev); 3416 pci_disable_device(pdev);
3418 pci_set_drvdata(pdev, NULL);
3419 } 3417 }
3420} 3418}
3421 3419
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
index 6990f6c65221..81029b872bdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -685,10 +685,6 @@
685#define V_BUSY(x) ((x) << S_BUSY) 685#define V_BUSY(x) ((x) << S_BUSY)
686#define F_BUSY V_BUSY(1U) 686#define F_BUSY V_BUSY(1U)
687 687
688#define S_BUSY 31
689#define V_BUSY(x) ((x) << S_BUSY)
690#define F_BUSY V_BUSY(1U)
691
692#define A_MC7_EXT_MODE1 0x108 688#define A_MC7_EXT_MODE1 0x108
693 689
694#define A_MC7_EXT_MODE2 0x10c 690#define A_MC7_EXT_MODE2 0x10c
@@ -749,14 +745,6 @@
749 745
750#define A_MC7_CAL 0x128 746#define A_MC7_CAL 0x128
751 747
752#define S_BUSY 31
753#define V_BUSY(x) ((x) << S_BUSY)
754#define F_BUSY V_BUSY(1U)
755
756#define S_BUSY 31
757#define V_BUSY(x) ((x) << S_BUSY)
758#define F_BUSY V_BUSY(1U)
759
760#define S_CAL_FAULT 30 748#define S_CAL_FAULT 30
761#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT) 749#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
762#define F_CAL_FAULT V_CAL_FAULT(1U) 750#define F_CAL_FAULT V_CAL_FAULT(1U)
@@ -815,9 +803,6 @@
815#define V_OP(x) ((x) << S_OP) 803#define V_OP(x) ((x) << S_OP)
816#define F_OP V_OP(1U) 804#define F_OP V_OP(1U)
817 805
818#define F_OP V_OP(1U)
819#define A_SF_OP 0x6dc
820
821#define A_MC7_BIST_ADDR_BEG 0x168 806#define A_MC7_BIST_ADDR_BEG 0x168
822 807
823#define A_MC7_BIST_ADDR_END 0x16c 808#define A_MC7_BIST_ADDR_END 0x16c
@@ -830,8 +815,6 @@
830#define V_CONT(x) ((x) << S_CONT) 815#define V_CONT(x) ((x) << S_CONT)
831#define F_CONT V_CONT(1U) 816#define F_CONT V_CONT(1U)
832 817
833#define F_CONT V_CONT(1U)
834
835#define A_MC7_INT_ENABLE 0x178 818#define A_MC7_INT_ENABLE 0x178
836 819
837#define S_AE 17 820#define S_AE 17
@@ -1017,8 +1000,6 @@
1017#define V_NICMODE(x) ((x) << S_NICMODE) 1000#define V_NICMODE(x) ((x) << S_NICMODE)
1018#define F_NICMODE V_NICMODE(1U) 1001#define F_NICMODE V_NICMODE(1U)
1019 1002
1020#define F_NICMODE V_NICMODE(1U)
1021
1022#define S_IPV6ENABLE 15 1003#define S_IPV6ENABLE 15
1023#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE) 1004#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
1024#define F_IPV6ENABLE V_IPV6ENABLE(1U) 1005#define F_IPV6ENABLE V_IPV6ENABLE(1U)
@@ -1562,27 +1543,15 @@
1562#define A_ULPRX_STAG_ULIMIT 0x530 1543#define A_ULPRX_STAG_ULIMIT 0x530
1563 1544
1564#define A_ULPRX_RQ_LLIMIT 0x534 1545#define A_ULPRX_RQ_LLIMIT 0x534
1565#define A_ULPRX_RQ_LLIMIT 0x534
1566 1546
1567#define A_ULPRX_RQ_ULIMIT 0x538 1547#define A_ULPRX_RQ_ULIMIT 0x538
1568#define A_ULPRX_RQ_ULIMIT 0x538
1569 1548
1570#define A_ULPRX_PBL_LLIMIT 0x53c 1549#define A_ULPRX_PBL_LLIMIT 0x53c
1571 1550
1572#define A_ULPRX_PBL_ULIMIT 0x540 1551#define A_ULPRX_PBL_ULIMIT 0x540
1573#define A_ULPRX_PBL_ULIMIT 0x540
1574 1552
1575#define A_ULPRX_TDDP_TAGMASK 0x524 1553#define A_ULPRX_TDDP_TAGMASK 0x524
1576 1554
1577#define A_ULPRX_RQ_LLIMIT 0x534
1578#define A_ULPRX_RQ_LLIMIT 0x534
1579
1580#define A_ULPRX_RQ_ULIMIT 0x538
1581#define A_ULPRX_RQ_ULIMIT 0x538
1582
1583#define A_ULPRX_PBL_ULIMIT 0x540
1584#define A_ULPRX_PBL_ULIMIT 0x540
1585
1586#define A_ULPTX_CONFIG 0x580 1555#define A_ULPTX_CONFIG 0x580
1587 1556
1588#define S_CFG_CQE_SOP_MASK 1 1557#define S_CFG_CQE_SOP_MASK 1
@@ -2053,8 +2022,6 @@
2053#define V_TMMODE(x) ((x) << S_TMMODE) 2022#define V_TMMODE(x) ((x) << S_TMMODE)
2054#define F_TMMODE V_TMMODE(1U) 2023#define F_TMMODE V_TMMODE(1U)
2055 2024
2056#define F_TMMODE V_TMMODE(1U)
2057
2058#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c 2025#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
2059 2026
2060#define A_MC5_DB_FILTER_TABLE 0x710 2027#define A_MC5_DB_FILTER_TABLE 0x710
@@ -2454,8 +2421,6 @@
2454#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE) 2421#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
2455#define F_TXACTENABLE V_TXACTENABLE(1U) 2422#define F_TXACTENABLE V_TXACTENABLE(1U)
2456 2423
2457#define A_XGM_SERDES_CTRL0 0x8e0
2458
2459#define S_RESET3 23 2424#define S_RESET3 23
2460#define V_RESET3(x) ((x) << S_RESET3) 2425#define V_RESET3(x) ((x) << S_RESET3)
2461#define F_RESET3 V_RESET3(1U) 2426#define F_RESET3 V_RESET3(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index dfd1e36f5753..ecd2fb3ef695 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -48,7 +48,6 @@
48#include <linux/vmalloc.h> 48#include <linux/vmalloc.h>
49#include <asm/io.h> 49#include <asm/io.h>
50#include "cxgb4_uld.h" 50#include "cxgb4_uld.h"
51#include "t4_hw.h"
52 51
53#define FW_VERSION_MAJOR 1 52#define FW_VERSION_MAJOR 1
54#define FW_VERSION_MINOR 4 53#define FW_VERSION_MINOR 4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c73cabdbd4c0..8b929eeecd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3983,6 +3983,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
3983 struct net_device *event_dev; 3983 struct net_device *event_dev;
3984 int ret = NOTIFY_DONE; 3984 int ret = NOTIFY_DONE;
3985 struct bonding *bond = netdev_priv(ifa->idev->dev); 3985 struct bonding *bond = netdev_priv(ifa->idev->dev);
3986 struct list_head *iter;
3986 struct slave *slave; 3987 struct slave *slave;
3987 struct pci_dev *first_pdev = NULL; 3988 struct pci_dev *first_pdev = NULL;
3988 3989
@@ -3995,7 +3996,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
3995 * in all of them only once. 3996 * in all of them only once.
3996 */ 3997 */
3997 read_lock(&bond->lock); 3998 read_lock(&bond->lock);
3998 bond_for_each_slave(bond, slave) { 3999 bond_for_each_slave(bond, slave, iter) {
3999 if (!first_pdev) { 4000 if (!first_pdev) {
4000 ret = clip_add(slave->dev, ifa, event); 4001 ret = clip_add(slave->dev, ifa, event);
4001 /* If clip_add is success then only initialize 4002 /* If clip_add is success then only initialize
@@ -6074,7 +6075,6 @@ sriov:
6074 pci_disable_device(pdev); 6075 pci_disable_device(pdev);
6075 out_release_regions: 6076 out_release_regions:
6076 pci_release_regions(pdev); 6077 pci_release_regions(pdev);
6077 pci_set_drvdata(pdev, NULL);
6078 return err; 6078 return err;
6079} 6079}
6080 6080
@@ -6122,7 +6122,6 @@ static void remove_one(struct pci_dev *pdev)
6122 pci_disable_pcie_error_reporting(pdev); 6122 pci_disable_pcie_error_reporting(pdev);
6123 pci_disable_device(pdev); 6123 pci_disable_device(pdev);
6124 pci_release_regions(pdev); 6124 pci_release_regions(pdev);
6125 pci_set_drvdata(pdev, NULL);
6126 } else 6125 } else
6127 pci_release_regions(pdev); 6126 pci_release_regions(pdev);
6128} 6127}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 40c22e7de15c..5f90ec5f7519 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2782,11 +2782,9 @@ err_unmap_bar:
2782 2782
2783err_free_adapter: 2783err_free_adapter:
2784 kfree(adapter); 2784 kfree(adapter);
2785 pci_set_drvdata(pdev, NULL);
2786 2785
2787err_release_regions: 2786err_release_regions:
2788 pci_release_regions(pdev); 2787 pci_release_regions(pdev);
2789 pci_set_drvdata(pdev, NULL);
2790 pci_clear_master(pdev); 2788 pci_clear_master(pdev);
2791 2789
2792err_disable_device: 2790err_disable_device:
@@ -2851,7 +2849,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
2851 } 2849 }
2852 iounmap(adapter->regs); 2850 iounmap(adapter->regs);
2853 kfree(adapter); 2851 kfree(adapter);
2854 pci_set_drvdata(pdev, NULL);
2855 } 2852 }
2856 2853
2857 /* 2854 /*
@@ -2908,7 +2905,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2908#define CH_DEVICE(devid, idx) \ 2905#define CH_DEVICE(devid, idx) \
2909 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } 2906 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
2910 2907
2911static struct pci_device_id cxgb4vf_pci_tbl[] = { 2908static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
2912 CH_DEVICE(0xb000, 0), /* PE10K FPGA */ 2909 CH_DEVICE(0xb000, 0), /* PE10K FPGA */
2913 CH_DEVICE(0x4800, 0), /* T440-dbg */ 2910 CH_DEVICE(0x4800, 0), /* T440-dbg */
2914 CH_DEVICE(0x4801, 0), /* T420-cr */ 2911 CH_DEVICE(0x4801, 0), /* T420-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index df296af20bd5..8475c4cda9e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1396,8 +1396,9 @@ static inline void copy_frags(struct sk_buff *skb,
1396 * Builds an sk_buff from the given packet gather list. Returns the 1396 * Builds an sk_buff from the given packet gather list. Returns the
1397 * sk_buff or %NULL if sk_buff allocation failed. 1397 * sk_buff or %NULL if sk_buff allocation failed.
1398 */ 1398 */
1399struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, 1399static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1400 unsigned int skb_len, unsigned int pull_len) 1400 unsigned int skb_len,
1401 unsigned int pull_len)
1401{ 1402{
1402 struct sk_buff *skb; 1403 struct sk_buff *skb;
1403 1404
@@ -1443,7 +1444,7 @@ out:
1443 * Releases the pages of a packet gather list. We do not own the last 1444 * Releases the pages of a packet gather list. We do not own the last
1444 * page on the list and do not free it. 1445 * page on the list and do not free it.
1445 */ 1446 */
1446void t4vf_pktgl_free(const struct pkt_gl *gl) 1447static void t4vf_pktgl_free(const struct pkt_gl *gl)
1447{ 1448{
1448 int frag; 1449 int frag;
1449 1450
@@ -1640,7 +1641,7 @@ static inline void rspq_next(struct sge_rspq *rspq)
1640 * on this queue. If the system is under memory shortage use a fairly 1641 * on this queue. If the system is under memory shortage use a fairly
1641 * long delay to help recovery. 1642 * long delay to help recovery.
1642 */ 1643 */
1643int process_responses(struct sge_rspq *rspq, int budget) 1644static int process_responses(struct sge_rspq *rspq, int budget)
1644{ 1645{
1645 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1646 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1646 int budget_left = budget; 1647 int budget_left = budget;
@@ -1893,7 +1894,7 @@ static unsigned int process_intrq(struct adapter *adapter)
1893 * The MSI interrupt handler handles data events from SGE response queues as 1894 * The MSI interrupt handler handles data events from SGE response queues as
1894 * well as error and other async events as they all use the same MSI vector. 1895 * well as error and other async events as they all use the same MSI vector.
1895 */ 1896 */
1896irqreturn_t t4vf_intr_msi(int irq, void *cookie) 1897static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
1897{ 1898{
1898 struct adapter *adapter = cookie; 1899 struct adapter *adapter = cookie;
1899 1900
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 7b756cf9474a..ff78dfaec508 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2309,7 +2309,6 @@ err_out_release_regions:
2309err_out_disable_device: 2309err_out_disable_device:
2310 pci_disable_device(pdev); 2310 pci_disable_device(pdev);
2311err_out_free_netdev: 2311err_out_free_netdev:
2312 pci_set_drvdata(pdev, NULL);
2313 free_netdev(netdev); 2312 free_netdev(netdev);
2314 2313
2315 return err; 2314 return err;
@@ -2338,7 +2337,6 @@ static void enic_remove(struct pci_dev *pdev)
2338 enic_iounmap(enic); 2337 enic_iounmap(enic);
2339 pci_release_regions(pdev); 2338 pci_release_regions(pdev);
2340 pci_disable_device(pdev); 2339 pci_disable_device(pdev);
2341 pci_set_drvdata(pdev, NULL);
2342 free_netdev(netdev); 2340 free_netdev(netdev);
2343 } 2341 }
2344} 2342}
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a7a941b1a655..7080ad6c4014 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1623,7 +1623,7 @@ dm9000_probe(struct platform_device *pdev)
1623 1623
1624 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { 1624 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1625 mac_src = "platform data"; 1625 mac_src = "platform data";
1626 memcpy(ndev->dev_addr, pdata->dev_addr, 6); 1626 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1627 } 1627 }
1628 1628
1629 if (!is_valid_ether_addr(ndev->dev_addr)) { 1629 if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index eaab73cf27ca..38148b0e3a95 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -2110,7 +2110,6 @@ static void de_remove_one(struct pci_dev *pdev)
2110 iounmap(de->regs); 2110 iounmap(de->regs);
2111 pci_release_regions(pdev); 2111 pci_release_regions(pdev);
2112 pci_disable_device(pdev); 2112 pci_disable_device(pdev);
2113 pci_set_drvdata(pdev, NULL);
2114 free_netdev(dev); 2113 free_netdev(dev);
2115} 2114}
2116 2115
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 263b92c00cbf..c05b66dfcc30 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2328,7 +2328,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
2328 pci_disable_device (pdev); 2328 pci_disable_device (pdev);
2329} 2329}
2330 2330
2331static struct pci_device_id de4x5_pci_tbl[] = { 2331static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = {
2332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, 2332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 2333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, 2334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83139307861c..5ad9e3e3c0b8 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -523,7 +523,6 @@ err_out_res:
523err_out_disable: 523err_out_disable:
524 pci_disable_device(pdev); 524 pci_disable_device(pdev);
525err_out_free: 525err_out_free:
526 pci_set_drvdata(pdev, NULL);
527 free_netdev(dev); 526 free_netdev(dev);
528 527
529 return err; 528 return err;
@@ -548,8 +547,6 @@ static void dmfe_remove_one(struct pci_dev *pdev)
548 db->buf_pool_ptr, db->buf_pool_dma_ptr); 547 db->buf_pool_ptr, db->buf_pool_dma_ptr);
549 pci_release_regions(pdev); 548 pci_release_regions(pdev);
550 free_netdev(dev); /* free board information */ 549 free_netdev(dev); /* free board information */
551
552 pci_set_drvdata(pdev, NULL);
553 } 550 }
554 551
555 DMFE_DBUG(0, "dmfe_remove_one() exit", 0); 552 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 4e8cfa2ac803..add05f14b38b 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,7 +1939,6 @@ static void tulip_remove_one(struct pci_dev *pdev)
1939 pci_iounmap(pdev, tp->base_addr); 1939 pci_iounmap(pdev, tp->base_addr);
1940 free_netdev (dev); 1940 free_netdev (dev);
1941 pci_release_regions (pdev); 1941 pci_release_regions (pdev);
1942 pci_set_drvdata (pdev, NULL);
1943 1942
1944 /* pci_power_off (pdev, -1); */ 1943 /* pci_power_off (pdev, -1); */
1945} 1944}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 93845afe1cea..a5397b130724 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -429,7 +429,6 @@ err_out_release:
429err_out_disable: 429err_out_disable:
430 pci_disable_device(pdev); 430 pci_disable_device(pdev);
431err_out_free: 431err_out_free:
432 pci_set_drvdata(pdev, NULL);
433 free_netdev(dev); 432 free_netdev(dev);
434 433
435 return err; 434 return err;
@@ -450,7 +449,6 @@ static void uli526x_remove_one(struct pci_dev *pdev)
450 db->buf_pool_ptr, db->buf_pool_dma_ptr); 449 db->buf_pool_ptr, db->buf_pool_dma_ptr);
451 pci_release_regions(pdev); 450 pci_release_regions(pdev);
452 pci_disable_device(pdev); 451 pci_disable_device(pdev);
453 pci_set_drvdata(pdev, NULL);
454 free_netdev(dev); 452 free_netdev(dev);
455} 453}
456 454
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index c7b04ecf5b49..62fe512bb216 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -468,7 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
468 return 0; 468 return 0;
469 469
470err_out_cleardev: 470err_out_cleardev:
471 pci_set_drvdata(pdev, NULL);
472 pci_iounmap(pdev, ioaddr); 471 pci_iounmap(pdev, ioaddr);
473err_out_free_res: 472err_out_free_res:
474 pci_release_regions(pdev); 473 pci_release_regions(pdev);
@@ -1542,8 +1541,6 @@ static void w840_remove1(struct pci_dev *pdev)
1542 pci_iounmap(pdev, np->base_addr); 1541 pci_iounmap(pdev, np->base_addr);
1543 free_netdev(dev); 1542 free_netdev(dev);
1544 } 1543 }
1545
1546 pci_set_drvdata(pdev, NULL);
1547} 1544}
1548 1545
1549#ifdef CONFIG_PM 1546#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 9b84cb04fe5f..ab7ebac6fbea 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -289,7 +289,6 @@ out:
289err_unmap: 289err_unmap:
290 pci_iounmap(pdev, private->ioaddr); 290 pci_iounmap(pdev, private->ioaddr);
291reg_fail: 291reg_fail:
292 pci_set_drvdata(pdev, NULL);
293 dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle); 292 dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
294tx_buf_fail: 293tx_buf_fail:
295 dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle); 294 dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
@@ -317,7 +316,6 @@ static void xircom_remove(struct pci_dev *pdev)
317 316
318 unregister_netdev(dev); 317 unregister_netdev(dev);
319 pci_iounmap(pdev, card->ioaddr); 318 pci_iounmap(pdev, card->ioaddr);
320 pci_set_drvdata(pdev, NULL);
321 dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle); 319 dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
322 dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle); 320 dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
323 free_netdev(dev); 321 free_netdev(dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index afa8e3af2c4d..4fb756d219f7 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1746,7 +1746,6 @@ rio_remove1 (struct pci_dev *pdev)
1746 pci_release_regions (pdev); 1746 pci_release_regions (pdev);
1747 pci_disable_device (pdev); 1747 pci_disable_device (pdev);
1748 } 1748 }
1749 pci_set_drvdata (pdev, NULL);
1750} 1749}
1751 1750
1752static struct pci_driver rio_driver = { 1751static struct pci_driver rio_driver = {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index bf3bf6f22c99..113cd799a131 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -703,7 +703,6 @@ err_out_unmap_tx:
703 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, 703 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
704 np->tx_ring, np->tx_ring_dma); 704 np->tx_ring, np->tx_ring_dma);
705err_out_cleardev: 705err_out_cleardev:
706 pci_set_drvdata(pdev, NULL);
707 pci_iounmap(pdev, ioaddr); 706 pci_iounmap(pdev, ioaddr);
708err_out_res: 707err_out_res:
709 pci_release_regions(pdev); 708 pci_release_regions(pdev);
@@ -1941,7 +1940,6 @@ static void sundance_remove1(struct pci_dev *pdev)
1941 pci_iounmap(pdev, np->base); 1940 pci_iounmap(pdev, np->base);
1942 pci_release_regions(pdev); 1941 pci_release_regions(pdev);
1943 free_netdev(dev); 1942 free_netdev(dev);
1944 pci_set_drvdata(pdev, NULL);
1945 } 1943 }
1946} 1944}
1947 1945
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index db020230bd0b..1bce77fdbd99 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "4.9.134.0u" 37#define DRV_VER "4.9.224.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "Emulex BladeEngine2" 39#define BE_NAME "Emulex BladeEngine2"
40#define BE3_NAME "Emulex BladeEngine3" 40#define BE3_NAME "Emulex BladeEngine3"
@@ -89,7 +89,7 @@ static inline char *nic_name(struct pci_dev *pdev)
89 89
90#define BE_NUM_VLANS_SUPPORTED 64 90#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_UMC_NUM_VLANS_SUPPORTED 15 91#define BE_UMC_NUM_VLANS_SUPPORTED 15
92#define BE_MAX_EQD 96u 92#define BE_MAX_EQD 128u
93#define BE_MAX_TX_FRAG_COUNT 30 93#define BE_MAX_TX_FRAG_COUNT 30
94 94
95#define EVNT_Q_LEN 1024 95#define EVNT_Q_LEN 1024
@@ -201,6 +201,17 @@ struct be_eq_obj {
201 struct be_adapter *adapter; 201 struct be_adapter *adapter;
202} ____cacheline_aligned_in_smp; 202} ____cacheline_aligned_in_smp;
203 203
204struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
205 bool enable;
206 u32 min_eqd; /* in usecs */
207 u32 max_eqd; /* in usecs */
208 u32 prev_eqd; /* in usecs */
209 u32 et_eqd; /* configured val when aic is off */
210 ulong jiffies;
211 u64 rx_pkts_prev; /* Used to calculate RX pps */
212 u64 tx_reqs_prev; /* Used to calculate TX pps */
213};
214
204struct be_mcc_obj { 215struct be_mcc_obj {
205 struct be_queue_info q; 216 struct be_queue_info q;
206 struct be_queue_info cq; 217 struct be_queue_info cq;
@@ -215,6 +226,7 @@ struct be_tx_stats {
215 u64 tx_compl; 226 u64 tx_compl;
216 ulong tx_jiffies; 227 ulong tx_jiffies;
217 u32 tx_stops; 228 u32 tx_stops;
229 u32 tx_drv_drops; /* pkts dropped by driver */
218 struct u64_stats_sync sync; 230 struct u64_stats_sync sync;
219 struct u64_stats_sync sync_compl; 231 struct u64_stats_sync sync_compl;
220}; 232};
@@ -239,15 +251,12 @@ struct be_rx_page_info {
239struct be_rx_stats { 251struct be_rx_stats {
240 u64 rx_bytes; 252 u64 rx_bytes;
241 u64 rx_pkts; 253 u64 rx_pkts;
242 u64 rx_pkts_prev;
243 ulong rx_jiffies;
244 u32 rx_drops_no_skbs; /* skb allocation errors */ 254 u32 rx_drops_no_skbs; /* skb allocation errors */
245 u32 rx_drops_no_frags; /* HW has no fetched frags */ 255 u32 rx_drops_no_frags; /* HW has no fetched frags */
246 u32 rx_post_fail; /* page post alloc failures */ 256 u32 rx_post_fail; /* page post alloc failures */
247 u32 rx_compl; 257 u32 rx_compl;
248 u32 rx_mcast_pkts; 258 u32 rx_mcast_pkts;
249 u32 rx_compl_err; /* completions with err set */ 259 u32 rx_compl_err; /* completions with err set */
250 u32 rx_pps; /* pkts per second */
251 struct u64_stats_sync sync; 260 struct u64_stats_sync sync;
252}; 261};
253 262
@@ -316,6 +325,11 @@ struct be_drv_stats {
316 u32 rx_input_fifo_overflow_drop; 325 u32 rx_input_fifo_overflow_drop;
317 u32 pmem_fifo_overflow_drop; 326 u32 pmem_fifo_overflow_drop;
318 u32 jabber_events; 327 u32 jabber_events;
328 u32 rx_roce_bytes_lsd;
329 u32 rx_roce_bytes_msd;
330 u32 rx_roce_frames;
331 u32 roce_drops_payload_len;
332 u32 roce_drops_crc;
319}; 333};
320 334
321struct be_vf_cfg { 335struct be_vf_cfg {
@@ -405,6 +419,7 @@ struct be_adapter {
405 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 419 u32 big_page_size; /* Compounded page size shared by rx wrbs */
406 420
407 struct be_drv_stats drv_stats; 421 struct be_drv_stats drv_stats;
422 struct be_aic_obj aic_obj[MAX_EVT_QS];
408 u16 vlans_added; 423 u16 vlans_added;
409 u8 vlan_tag[VLAN_N_VID]; 424 u8 vlan_tag[VLAN_N_VID];
410 u8 vlan_prio_bmap; /* Available Priority BitMap */ 425 u8 vlan_prio_bmap; /* Available Priority BitMap */
@@ -472,8 +487,8 @@ struct be_adapter {
472 487
473#define be_physfn(adapter) (!adapter->virtfn) 488#define be_physfn(adapter) (!adapter->virtfn)
474#define sriov_enabled(adapter) (adapter->num_vfs > 0) 489#define sriov_enabled(adapter) (adapter->num_vfs > 0)
475#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \ 490#define sriov_want(adapter) (be_physfn(adapter) && \
476 be_physfn(adapter)) 491 (num_vfs || pci_num_vf(adapter->pdev)))
477#define for_all_vfs(adapter, vf_cfg, i) \ 492#define for_all_vfs(adapter, vf_cfg, i) \
478 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ 493 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
479 i++, vf_cfg++) 494 i++, vf_cfg++)
@@ -696,27 +711,27 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
696 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 711 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
697} 712}
698 713
699extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 714void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
700 u16 num_popped); 715 u16 num_popped);
701extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); 716void be_link_status_update(struct be_adapter *adapter, u8 link_status);
702extern void be_parse_stats(struct be_adapter *adapter); 717void be_parse_stats(struct be_adapter *adapter);
703extern int be_load_fw(struct be_adapter *adapter, u8 *func); 718int be_load_fw(struct be_adapter *adapter, u8 *func);
704extern bool be_is_wol_supported(struct be_adapter *adapter); 719bool be_is_wol_supported(struct be_adapter *adapter);
705extern bool be_pause_supported(struct be_adapter *adapter); 720bool be_pause_supported(struct be_adapter *adapter);
706extern u32 be_get_fw_log_level(struct be_adapter *adapter); 721u32 be_get_fw_log_level(struct be_adapter *adapter);
707int be_update_queues(struct be_adapter *adapter); 722int be_update_queues(struct be_adapter *adapter);
708int be_poll(struct napi_struct *napi, int budget); 723int be_poll(struct napi_struct *napi, int budget);
709 724
710/* 725/*
711 * internal function to initialize-cleanup roce device. 726 * internal function to initialize-cleanup roce device.
712 */ 727 */
713extern void be_roce_dev_add(struct be_adapter *); 728void be_roce_dev_add(struct be_adapter *);
714extern void be_roce_dev_remove(struct be_adapter *); 729void be_roce_dev_remove(struct be_adapter *);
715 730
716/* 731/*
717 * internal function to open-close roce device during ifup-ifdown. 732 * internal function to open-close roce device during ifup-ifdown.
718 */ 733 */
719extern void be_roce_dev_open(struct be_adapter *); 734void be_roce_dev_open(struct be_adapter *);
720extern void be_roce_dev_close(struct be_adapter *); 735void be_roce_dev_close(struct be_adapter *);
721 736
722#endif /* BE_H */ 737#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index c08fd32bb8e5..2d554366b342 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1436,8 +1436,12 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1436 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1436 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1437 1437
1438 /* version 1 of the cmd is not supported only by BE2 */ 1438 /* version 1 of the cmd is not supported only by BE2 */
1439 if (!BE2_chip(adapter)) 1439 if (BE2_chip(adapter))
1440 hdr->version = 0;
1441 if (BE3_chip(adapter) || lancer_chip(adapter))
1440 hdr->version = 1; 1442 hdr->version = 1;
1443 else
1444 hdr->version = 2;
1441 1445
1442 be_mcc_notify(adapter); 1446 be_mcc_notify(adapter);
1443 adapter->stats_cmd_sent = true; 1447 adapter->stats_cmd_sent = true;
@@ -1719,11 +1723,12 @@ err:
1719/* set the EQ delay interval of an EQ to specified value 1723/* set the EQ delay interval of an EQ to specified value
1720 * Uses async mcc 1724 * Uses async mcc
1721 */ 1725 */
1722int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) 1726int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1727 int num)
1723{ 1728{
1724 struct be_mcc_wrb *wrb; 1729 struct be_mcc_wrb *wrb;
1725 struct be_cmd_req_modify_eq_delay *req; 1730 struct be_cmd_req_modify_eq_delay *req;
1726 int status = 0; 1731 int status = 0, i;
1727 1732
1728 spin_lock_bh(&adapter->mcc_lock); 1733 spin_lock_bh(&adapter->mcc_lock);
1729 1734
@@ -1737,13 +1742,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1737 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1742 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1738 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); 1743 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1739 1744
1740 req->num_eq = cpu_to_le32(1); 1745 req->num_eq = cpu_to_le32(num);
1741 req->delay[0].eq_id = cpu_to_le32(eq_id); 1746 for (i = 0; i < num; i++) {
1742 req->delay[0].phase = 0; 1747 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1743 req->delay[0].delay_multiplier = cpu_to_le32(eqd); 1748 req->set_eqd[i].phase = 0;
1749 req->set_eqd[i].delay_multiplier =
1750 cpu_to_le32(set_eqd[i].delay_multiplier);
1751 }
1744 1752
1745 be_mcc_notify(adapter); 1753 be_mcc_notify(adapter);
1746
1747err: 1754err:
1748 spin_unlock_bh(&adapter->mcc_lock); 1755 spin_unlock_bh(&adapter->mcc_lock);
1749 return status; 1756 return status;
@@ -3520,7 +3527,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3520 struct be_cmd_enable_disable_vf *req; 3527 struct be_cmd_enable_disable_vf *req;
3521 int status; 3528 int status;
3522 3529
3523 if (!lancer_chip(adapter)) 3530 if (BEx_chip(adapter))
3524 return 0; 3531 return 0;
3525 3532
3526 spin_lock_bh(&adapter->mcc_lock); 3533 spin_lock_bh(&adapter->mcc_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 108ca8abf0af..88708372d5e5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1057,14 +1057,16 @@ struct be_cmd_resp_get_flow_control {
1057} __packed; 1057} __packed;
1058 1058
1059/******************** Modify EQ Delay *******************/ 1059/******************** Modify EQ Delay *******************/
1060struct be_set_eqd {
1061 u32 eq_id;
1062 u32 phase;
1063 u32 delay_multiplier;
1064};
1065
1060struct be_cmd_req_modify_eq_delay { 1066struct be_cmd_req_modify_eq_delay {
1061 struct be_cmd_req_hdr hdr; 1067 struct be_cmd_req_hdr hdr;
1062 u32 num_eq; 1068 u32 num_eq;
1063 struct { 1069 struct be_set_eqd set_eqd[MAX_EVT_QS];
1064 u32 eq_id;
1065 u32 phase;
1066 u32 delay_multiplier;
1067 } delay[8];
1068} __packed; 1070} __packed;
1069 1071
1070struct be_cmd_resp_modify_eq_delay { 1072struct be_cmd_resp_modify_eq_delay {
@@ -1660,6 +1662,67 @@ struct be_erx_stats_v1 {
1660 u32 rsvd[4]; 1662 u32 rsvd[4];
1661}; 1663};
1662 1664
1665struct be_port_rxf_stats_v2 {
1666 u32 rsvd0[10];
1667 u32 roce_bytes_received_lsd;
1668 u32 roce_bytes_received_msd;
1669 u32 rsvd1[5];
1670 u32 roce_frames_received;
1671 u32 rx_crc_errors;
1672 u32 rx_alignment_symbol_errors;
1673 u32 rx_pause_frames;
1674 u32 rx_priority_pause_frames;
1675 u32 rx_control_frames;
1676 u32 rx_in_range_errors;
1677 u32 rx_out_range_errors;
1678 u32 rx_frame_too_long;
1679 u32 rx_address_filtered;
1680 u32 rx_dropped_too_small;
1681 u32 rx_dropped_too_short;
1682 u32 rx_dropped_header_too_small;
1683 u32 rx_dropped_tcp_length;
1684 u32 rx_dropped_runt;
1685 u32 rsvd2[10];
1686 u32 rx_ip_checksum_errs;
1687 u32 rx_tcp_checksum_errs;
1688 u32 rx_udp_checksum_errs;
1689 u32 rsvd3[7];
1690 u32 rx_switched_unicast_packets;
1691 u32 rx_switched_multicast_packets;
1692 u32 rx_switched_broadcast_packets;
1693 u32 rsvd4[3];
1694 u32 tx_pauseframes;
1695 u32 tx_priority_pauseframes;
1696 u32 tx_controlframes;
1697 u32 rsvd5[10];
1698 u32 rxpp_fifo_overflow_drop;
1699 u32 rx_input_fifo_overflow_drop;
1700 u32 pmem_fifo_overflow_drop;
1701 u32 jabber_events;
1702 u32 rsvd6[3];
1703 u32 rx_drops_payload_size;
1704 u32 rx_drops_clipped_header;
1705 u32 rx_drops_crc;
1706 u32 roce_drops_payload_len;
1707 u32 roce_drops_crc;
1708 u32 rsvd7[19];
1709};
1710
1711struct be_rxf_stats_v2 {
1712 struct be_port_rxf_stats_v2 port[4];
1713 u32 rsvd0[2];
1714 u32 rx_drops_no_pbuf;
1715 u32 rx_drops_no_txpb;
1716 u32 rx_drops_no_erx_descr;
1717 u32 rx_drops_no_tpre_descr;
1718 u32 rsvd1[6];
1719 u32 rx_drops_too_many_frags;
1720 u32 rx_drops_invalid_ring;
1721 u32 forwarded_packets;
1722 u32 rx_drops_mtu;
1723 u32 rsvd2[35];
1724};
1725
1663struct be_hw_stats_v1 { 1726struct be_hw_stats_v1 {
1664 struct be_rxf_stats_v1 rxf; 1727 struct be_rxf_stats_v1 rxf;
1665 u32 rsvd0[BE_TXP_SW_SZ]; 1728 u32 rsvd0[BE_TXP_SW_SZ];
@@ -1678,6 +1741,29 @@ struct be_cmd_resp_get_stats_v1 {
1678 struct be_hw_stats_v1 hw_stats; 1741 struct be_hw_stats_v1 hw_stats;
1679}; 1742};
1680 1743
1744struct be_erx_stats_v2 {
1745 u32 rx_drops_no_fragments[136]; /* dwordS 0 to 135*/
1746 u32 rsvd[3];
1747};
1748
1749struct be_hw_stats_v2 {
1750 struct be_rxf_stats_v2 rxf;
1751 u32 rsvd0[BE_TXP_SW_SZ];
1752 struct be_erx_stats_v2 erx;
1753 struct be_pmem_stats pmem;
1754 u32 rsvd1[18];
1755};
1756
1757struct be_cmd_req_get_stats_v2 {
1758 struct be_cmd_req_hdr hdr;
1759 u8 rsvd[sizeof(struct be_hw_stats_v2)];
1760};
1761
1762struct be_cmd_resp_get_stats_v2 {
1763 struct be_cmd_resp_hdr hdr;
1764 struct be_hw_stats_v2 hw_stats;
1765};
1766
1681/************** get fat capabilites *******************/ 1767/************** get fat capabilites *******************/
1682#define MAX_MODULES 27 1768#define MAX_MODULES 27
1683#define MAX_MODES 4 1769#define MAX_MODES 4
@@ -1865,137 +1951,120 @@ struct be_cmd_resp_get_iface_list {
1865 struct be_if_desc if_desc; 1951 struct be_if_desc if_desc;
1866}; 1952};
1867 1953
1868extern int be_pci_fnum_get(struct be_adapter *adapter); 1954int be_pci_fnum_get(struct be_adapter *adapter);
1869extern int be_fw_wait_ready(struct be_adapter *adapter); 1955int be_fw_wait_ready(struct be_adapter *adapter);
1870extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1956int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1871 bool permanent, u32 if_handle, u32 pmac_id); 1957 bool permanent, u32 if_handle, u32 pmac_id);
1872extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1958int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
1873 u32 if_id, u32 *pmac_id, u32 domain); 1959 u32 *pmac_id, u32 domain);
1874extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, 1960int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
1875 int pmac_id, u32 domain); 1961 u32 domain);
1876extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 1962int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1877 u32 en_flags, u32 *if_handle, u32 domain); 1963 u32 *if_handle, u32 domain);
1878extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, 1964int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain);
1879 u32 domain); 1965int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
1880extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo); 1966int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1881extern int be_cmd_cq_create(struct be_adapter *adapter, 1967 struct be_queue_info *eq, bool no_delay,
1882 struct be_queue_info *cq, struct be_queue_info *eq, 1968 int num_cqe_dma_coalesce);
1883 bool no_delay, int num_cqe_dma_coalesce); 1969int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq,
1884extern int be_cmd_mccq_create(struct be_adapter *adapter, 1970 struct be_queue_info *cq);
1885 struct be_queue_info *mccq, 1971int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo);
1886 struct be_queue_info *cq); 1972int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
1887extern int be_cmd_txq_create(struct be_adapter *adapter, 1973 u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
1888 struct be_tx_obj *txo); 1974int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1889extern int be_cmd_rxq_create(struct be_adapter *adapter, 1975 int type);
1890 struct be_queue_info *rxq, u16 cq_id, 1976int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
1891 u16 frag_size, u32 if_id, u32 rss, u8 *rss_id); 1977int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1892extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1978 u8 *link_status, u32 dom);
1893 int type); 1979int be_cmd_reset(struct be_adapter *adapter);
1894extern int be_cmd_rxq_destroy(struct be_adapter *adapter, 1980int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
1895 struct be_queue_info *q); 1981int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1896extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, 1982 struct be_dma_mem *nonemb_cmd);
1897 u8 *link_status, u32 dom); 1983int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1898extern int be_cmd_reset(struct be_adapter *adapter); 1984 char *fw_on_flash);
1899extern int be_cmd_get_stats(struct be_adapter *adapter, 1985int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
1900 struct be_dma_mem *nonemb_cmd); 1986int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1901extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1987 u32 num, bool untagged, bool promiscuous);
1902 struct be_dma_mem *nonemb_cmd); 1988int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
1903extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1989int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
1904 char *fw_on_flash); 1990int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
1905 1991int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1906extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
1907extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
1908 u16 *vtag_array, u32 num, bool untagged,
1909 bool promiscuous);
1910extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
1911extern int be_cmd_set_flow_control(struct be_adapter *adapter,
1912 u32 tx_fc, u32 rx_fc);
1913extern int be_cmd_get_flow_control(struct be_adapter *adapter,
1914 u32 *tx_fc, u32 *rx_fc);
1915extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1916 u32 *function_mode, u32 *function_caps, u16 *asic_rev); 1992 u32 *function_mode, u32 *function_caps, u16 *asic_rev);
1917extern int be_cmd_reset_function(struct be_adapter *adapter); 1993int be_cmd_reset_function(struct be_adapter *adapter);
1918extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 1994int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1919 u32 rss_hash_opts, u16 table_size); 1995 u32 rss_hash_opts, u16 table_size);
1920extern int be_process_mcc(struct be_adapter *adapter); 1996int be_process_mcc(struct be_adapter *adapter);
1921extern int be_cmd_set_beacon_state(struct be_adapter *adapter, 1997int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
1922 u8 port_num, u8 beacon, u8 status, u8 state); 1998 u8 status, u8 state);
1923extern int be_cmd_get_beacon_state(struct be_adapter *adapter, 1999int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
1924 u8 port_num, u32 *state); 2000 u32 *state);
1925extern int be_cmd_write_flashrom(struct be_adapter *adapter, 2001int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1926 struct be_dma_mem *cmd, u32 flash_oper, 2002 u32 flash_oper, u32 flash_opcode, u32 buf_size);
1927 u32 flash_opcode, u32 buf_size); 2003int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1928extern int lancer_cmd_write_object(struct be_adapter *adapter, 2004 u32 data_size, u32 data_offset,
1929 struct be_dma_mem *cmd, 2005 const char *obj_name, u32 *data_written,
1930 u32 data_size, u32 data_offset, 2006 u8 *change_status, u8 *addn_status);
1931 const char *obj_name,
1932 u32 *data_written, u8 *change_status,
1933 u8 *addn_status);
1934int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2007int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1935 u32 data_size, u32 data_offset, const char *obj_name, 2008 u32 data_size, u32 data_offset, const char *obj_name,
1936 u32 *data_read, u32 *eof, u8 *addn_status); 2009 u32 *data_read, u32 *eof, u8 *addn_status);
1937int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2010int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1938 int offset); 2011 int offset);
1939extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2012int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1940 struct be_dma_mem *nonemb_cmd); 2013 struct be_dma_mem *nonemb_cmd);
1941extern int be_cmd_fw_init(struct be_adapter *adapter); 2014int be_cmd_fw_init(struct be_adapter *adapter);
1942extern int be_cmd_fw_clean(struct be_adapter *adapter); 2015int be_cmd_fw_clean(struct be_adapter *adapter);
1943extern void be_async_mcc_enable(struct be_adapter *adapter); 2016void be_async_mcc_enable(struct be_adapter *adapter);
1944extern void be_async_mcc_disable(struct be_adapter *adapter); 2017void be_async_mcc_disable(struct be_adapter *adapter);
1945extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2018int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1946 u32 loopback_type, u32 pkt_size, 2019 u32 loopback_type, u32 pkt_size, u32 num_pkts,
1947 u32 num_pkts, u64 pattern); 2020 u64 pattern);
1948extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2021int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt,
1949 u32 byte_cnt, struct be_dma_mem *cmd); 2022 struct be_dma_mem *cmd);
1950extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2023int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1951 struct be_dma_mem *nonemb_cmd); 2024 struct be_dma_mem *nonemb_cmd);
1952extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2025int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1953 u8 loopback_type, u8 enable); 2026 u8 loopback_type, u8 enable);
1954extern int be_cmd_get_phy_info(struct be_adapter *adapter); 2027int be_cmd_get_phy_info(struct be_adapter *adapter);
1955extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 2028int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1956extern void be_detect_error(struct be_adapter *adapter); 2029void be_detect_error(struct be_adapter *adapter);
1957extern int be_cmd_get_die_temperature(struct be_adapter *adapter); 2030int be_cmd_get_die_temperature(struct be_adapter *adapter);
1958extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 2031int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1959extern int be_cmd_req_native_mode(struct be_adapter *adapter); 2032int be_cmd_req_native_mode(struct be_adapter *adapter);
1960extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); 2033int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1961extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 2034void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1962extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, 2035int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
1963 u32 *privilege, u32 domain); 2036 u32 domain);
1964extern int be_cmd_set_fn_privileges(struct be_adapter *adapter, 2037int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
1965 u32 privileges, u32 vf_num); 2038 u32 vf_num);
1966extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2039int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1967 bool *pmac_id_active, u32 *pmac_id, 2040 bool *pmac_id_active, u32 *pmac_id, u8 domain);
1968 u8 domain); 2041int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
1969extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, 2042int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
1970 u8 *mac); 2043int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
1971extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); 2044 u32 domain);
1972extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 2045int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
1973 u8 mac_count, u32 domain); 2046int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
1974extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, 2047 u16 intf_id, u16 hsw_mode);
1975 u32 dom); 2048int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
1976extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 2049 u16 intf_id, u8 *mode);
1977 u32 domain, u16 intf_id, u16 hsw_mode); 2050int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
1978extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 2051int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
1979 u32 domain, u16 intf_id, u8 *mode); 2052 struct be_dma_mem *cmd);
1980extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); 2053int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
1981extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 2054 struct be_dma_mem *cmd,
1982 struct be_dma_mem *cmd); 2055 struct be_fat_conf_params *cfgs);
1983extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 2056int lancer_wait_ready(struct be_adapter *adapter);
1984 struct be_dma_mem *cmd, 2057int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
1985 struct be_fat_conf_params *cfgs); 2058int lancer_initiate_dump(struct be_adapter *adapter);
1986extern int lancer_wait_ready(struct be_adapter *adapter); 2059bool dump_present(struct be_adapter *adapter);
1987extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); 2060int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1988extern int lancer_initiate_dump(struct be_adapter *adapter); 2061int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1989extern bool dump_present(struct be_adapter *adapter);
1990extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1991extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1992int be_cmd_get_func_config(struct be_adapter *adapter, 2062int be_cmd_get_func_config(struct be_adapter *adapter,
1993 struct be_resources *res); 2063 struct be_resources *res);
1994int be_cmd_get_profile_config(struct be_adapter *adapter, 2064int be_cmd_get_profile_config(struct be_adapter *adapter,
1995 struct be_resources *res, u8 domain); 2065 struct be_resources *res, u8 domain);
1996extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, 2066int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
1997 u8 domain); 2067int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
1998extern int be_cmd_get_if_id(struct be_adapter *adapter, 2068 int vf_num);
1999 struct be_vf_cfg *vf_cfg, int vf_num); 2069int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
2000extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); 2070int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
2001extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b440a1fac77b..08330034d9ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -116,7 +116,12 @@ static const struct be_ethtool_stat et_stats[] = {
116 {DRVSTAT_INFO(rx_drops_mtu)}, 116 {DRVSTAT_INFO(rx_drops_mtu)},
117 /* Number of packets dropped due to random early drop function */ 117 /* Number of packets dropped due to random early drop function */
118 {DRVSTAT_INFO(eth_red_drops)}, 118 {DRVSTAT_INFO(eth_red_drops)},
119 {DRVSTAT_INFO(be_on_die_temperature)} 119 {DRVSTAT_INFO(be_on_die_temperature)},
120 {DRVSTAT_INFO(rx_roce_bytes_lsd)},
121 {DRVSTAT_INFO(rx_roce_bytes_msd)},
122 {DRVSTAT_INFO(rx_roce_frames)},
123 {DRVSTAT_INFO(roce_drops_payload_len)},
124 {DRVSTAT_INFO(roce_drops_crc)}
120}; 125};
121#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 126#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
122 127
@@ -155,7 +160,9 @@ static const struct be_ethtool_stat et_tx_stats[] = {
155 /* Number of times the TX queue was stopped due to lack 160 /* Number of times the TX queue was stopped due to lack
156 * of spaces in the TXQ. 161 * of spaces in the TXQ.
157 */ 162 */
158 {DRVSTAT_TX_INFO(tx_stops)} 163 {DRVSTAT_TX_INFO(tx_stops)},
164 /* Pkts dropped in the driver's transmit path */
165 {DRVSTAT_TX_INFO(tx_drv_drops)}
159}; 166};
160#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) 167#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
161 168
@@ -290,19 +297,19 @@ static int be_get_coalesce(struct net_device *netdev,
290 struct ethtool_coalesce *et) 297 struct ethtool_coalesce *et)
291{ 298{
292 struct be_adapter *adapter = netdev_priv(netdev); 299 struct be_adapter *adapter = netdev_priv(netdev);
293 struct be_eq_obj *eqo = &adapter->eq_obj[0]; 300 struct be_aic_obj *aic = &adapter->aic_obj[0];
294 301
295 302
296 et->rx_coalesce_usecs = eqo->cur_eqd; 303 et->rx_coalesce_usecs = aic->prev_eqd;
297 et->rx_coalesce_usecs_high = eqo->max_eqd; 304 et->rx_coalesce_usecs_high = aic->max_eqd;
298 et->rx_coalesce_usecs_low = eqo->min_eqd; 305 et->rx_coalesce_usecs_low = aic->min_eqd;
299 306
300 et->tx_coalesce_usecs = eqo->cur_eqd; 307 et->tx_coalesce_usecs = aic->prev_eqd;
301 et->tx_coalesce_usecs_high = eqo->max_eqd; 308 et->tx_coalesce_usecs_high = aic->max_eqd;
302 et->tx_coalesce_usecs_low = eqo->min_eqd; 309 et->tx_coalesce_usecs_low = aic->min_eqd;
303 310
304 et->use_adaptive_rx_coalesce = eqo->enable_aic; 311 et->use_adaptive_rx_coalesce = aic->enable;
305 et->use_adaptive_tx_coalesce = eqo->enable_aic; 312 et->use_adaptive_tx_coalesce = aic->enable;
306 313
307 return 0; 314 return 0;
308} 315}
@@ -314,14 +321,17 @@ static int be_set_coalesce(struct net_device *netdev,
314 struct ethtool_coalesce *et) 321 struct ethtool_coalesce *et)
315{ 322{
316 struct be_adapter *adapter = netdev_priv(netdev); 323 struct be_adapter *adapter = netdev_priv(netdev);
324 struct be_aic_obj *aic = &adapter->aic_obj[0];
317 struct be_eq_obj *eqo; 325 struct be_eq_obj *eqo;
318 int i; 326 int i;
319 327
320 for_all_evt_queues(adapter, eqo, i) { 328 for_all_evt_queues(adapter, eqo, i) {
321 eqo->enable_aic = et->use_adaptive_rx_coalesce; 329 aic->enable = et->use_adaptive_rx_coalesce;
322 eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); 330 aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
323 eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd); 331 aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
324 eqo->eqd = et->rx_coalesce_usecs; 332 aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
333 aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
334 aic++;
325 } 335 }
326 336
327 return 0; 337 return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 2c38cc402119..393e3dc05a36 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -306,10 +306,14 @@ static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va; 306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307 307
308 return &cmd->hw_stats; 308 return &cmd->hw_stats;
309 } else { 309 } else if (BE3_chip(adapter)) {
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va; 310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311 311
312 return &cmd->hw_stats; 312 return &cmd->hw_stats;
313 } else {
314 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
315
316 return &cmd->hw_stats;
313 } 317 }
314} 318}
315 319
@@ -320,10 +324,14 @@ static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
320 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); 324 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321 325
322 return &hw_stats->erx; 326 return &hw_stats->erx;
323 } else { 327 } else if (BE3_chip(adapter)) {
324 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); 328 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325 329
326 return &hw_stats->erx; 330 return &hw_stats->erx;
331 } else {
332 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
333
334 return &hw_stats->erx;
327 } 335 }
328} 336}
329 337
@@ -422,6 +430,60 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
422 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; 430 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423} 431}
424 432
433static void populate_be_v2_stats(struct be_adapter *adapter)
434{
435 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
436 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
437 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
438 struct be_port_rxf_stats_v2 *port_stats =
439 &rxf_stats->port[adapter->port_num];
440 struct be_drv_stats *drvs = &adapter->drv_stats;
441
442 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
443 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
444 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
445 drvs->rx_pause_frames = port_stats->rx_pause_frames;
446 drvs->rx_crc_errors = port_stats->rx_crc_errors;
447 drvs->rx_control_frames = port_stats->rx_control_frames;
448 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
449 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
450 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
451 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
452 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
453 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
454 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
455 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
456 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
457 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
458 drvs->rx_dropped_header_too_small =
459 port_stats->rx_dropped_header_too_small;
460 drvs->rx_input_fifo_overflow_drop =
461 port_stats->rx_input_fifo_overflow_drop;
462 drvs->rx_address_filtered = port_stats->rx_address_filtered;
463 drvs->rx_alignment_symbol_errors =
464 port_stats->rx_alignment_symbol_errors;
465 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
466 drvs->tx_pauseframes = port_stats->tx_pauseframes;
467 drvs->tx_controlframes = port_stats->tx_controlframes;
468 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
469 drvs->jabber_events = port_stats->jabber_events;
470 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
471 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
472 drvs->forwarded_packets = rxf_stats->forwarded_packets;
473 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
474 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
475 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
476 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
477 if (be_roce_supported(adapter)) {
478 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
479 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
480 drvs->rx_roce_frames = port_stats->roce_frames_received;
481 drvs->roce_drops_crc = port_stats->roce_drops_crc;
482 drvs->roce_drops_payload_len =
483 port_stats->roce_drops_payload_len;
484 }
485}
486
425static void populate_lancer_stats(struct be_adapter *adapter) 487static void populate_lancer_stats(struct be_adapter *adapter)
426{ 488{
427 489
@@ -489,7 +551,7 @@ static void populate_erx_stats(struct be_adapter *adapter,
489 551
490void be_parse_stats(struct be_adapter *adapter) 552void be_parse_stats(struct be_adapter *adapter)
491{ 553{
492 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter); 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
493 struct be_rx_obj *rxo; 555 struct be_rx_obj *rxo;
494 int i; 556 int i;
495 u32 erx_stat; 557 u32 erx_stat;
@@ -499,11 +561,13 @@ void be_parse_stats(struct be_adapter *adapter)
499 } else { 561 } else {
500 if (BE2_chip(adapter)) 562 if (BE2_chip(adapter))
501 populate_be_v0_stats(adapter); 563 populate_be_v0_stats(adapter);
502 else 564 else if (BE3_chip(adapter))
503 /* for BE3 and Skyhawk */ 565 /* for BE3 */
504 populate_be_v1_stats(adapter); 566 populate_be_v1_stats(adapter);
567 else
568 populate_be_v2_stats(adapter);
505 569
506 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */ 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
507 for_all_rx_queues(adapter, rxo, i) { 571 for_all_rx_queues(adapter, rxo, i) {
508 erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; 572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509 populate_erx_stats(adapter, rxo, erx_stat); 573 populate_erx_stats(adapter, rxo, erx_stat);
@@ -935,8 +999,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
935 u32 start = txq->head; 999 u32 start = txq->head;
936 1000
937 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); 1001 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938 if (!skb) 1002 if (!skb) {
1003 tx_stats(txo)->tx_drv_drops++;
939 return NETDEV_TX_OK; 1004 return NETDEV_TX_OK;
1005 }
940 1006
941 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); 1007 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
942 1008
@@ -965,6 +1031,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
965 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); 1031 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
966 } else { 1032 } else {
967 txq->head = start; 1033 txq->head = start;
1034 tx_stats(txo)->tx_drv_drops++;
968 dev_kfree_skb_any(skb); 1035 dev_kfree_skb_any(skb);
969 } 1036 }
970 return NETDEV_TX_OK; 1037 return NETDEV_TX_OK;
@@ -1275,53 +1342,79 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1275 return status; 1342 return status;
1276} 1343}
1277 1344
1278static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo) 1345static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1346 ulong now)
1279{ 1347{
1280 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]); 1348 aic->rx_pkts_prev = rx_pkts;
1281 ulong now = jiffies; 1349 aic->tx_reqs_prev = tx_pkts;
1282 ulong delta = now - stats->rx_jiffies; 1350 aic->jiffies = now;
1283 u64 pkts; 1351}
1284 unsigned int start, eqd;
1285 1352
1286 if (!eqo->enable_aic) { 1353static void be_eqd_update(struct be_adapter *adapter)
1287 eqd = eqo->eqd; 1354{
1288 goto modify_eqd; 1355 struct be_set_eqd set_eqd[MAX_EVT_QS];
1289 } 1356 int eqd, i, num = 0, start;
1357 struct be_aic_obj *aic;
1358 struct be_eq_obj *eqo;
1359 struct be_rx_obj *rxo;
1360 struct be_tx_obj *txo;
1361 u64 rx_pkts, tx_pkts;
1362 ulong now;
1363 u32 pps, delta;
1290 1364
1291 if (eqo->idx >= adapter->num_rx_qs) 1365 for_all_evt_queues(adapter, eqo, i) {
1292 return; 1366 aic = &adapter->aic_obj[eqo->idx];
1367 if (!aic->enable) {
1368 if (aic->jiffies)
1369 aic->jiffies = 0;
1370 eqd = aic->et_eqd;
1371 goto modify_eqd;
1372 }
1293 1373
1294 stats = rx_stats(&adapter->rx_obj[eqo->idx]); 1374 rxo = &adapter->rx_obj[eqo->idx];
1375 do {
1376 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1377 rx_pkts = rxo->stats.rx_pkts;
1378 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1295 1379
1296 /* Wrapped around */ 1380 txo = &adapter->tx_obj[eqo->idx];
1297 if (time_before(now, stats->rx_jiffies)) { 1381 do {
1298 stats->rx_jiffies = now; 1382 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1299 return; 1383 tx_pkts = txo->stats.tx_reqs;
1300 } 1384 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1301 1385
1302 /* Update once a second */
1303 if (delta < HZ)
1304 return;
1305 1386
1306 do { 1387 /* Skip, if wrapped around or first calculation */
1307 start = u64_stats_fetch_begin_bh(&stats->sync); 1388 now = jiffies;
1308 pkts = stats->rx_pkts; 1389 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1309 } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 1390 rx_pkts < aic->rx_pkts_prev ||
1310 1391 tx_pkts < aic->tx_reqs_prev) {
1311 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ); 1392 be_aic_update(aic, rx_pkts, tx_pkts, now);
1312 stats->rx_pkts_prev = pkts; 1393 continue;
1313 stats->rx_jiffies = now; 1394 }
1314 eqd = (stats->rx_pps / 110000) << 3;
1315 eqd = min(eqd, eqo->max_eqd);
1316 eqd = max(eqd, eqo->min_eqd);
1317 if (eqd < 10)
1318 eqd = 0;
1319 1395
1396 delta = jiffies_to_msecs(now - aic->jiffies);
1397 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1398 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1399 eqd = (pps / 15000) << 2;
1400
1401 if (eqd < 8)
1402 eqd = 0;
1403 eqd = min_t(u32, eqd, aic->max_eqd);
1404 eqd = max_t(u32, eqd, aic->min_eqd);
1405
1406 be_aic_update(aic, rx_pkts, tx_pkts, now);
1320modify_eqd: 1407modify_eqd:
1321 if (eqd != eqo->cur_eqd) { 1408 if (eqd != aic->prev_eqd) {
1322 be_cmd_modify_eqd(adapter, eqo->q.id, eqd); 1409 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1323 eqo->cur_eqd = eqd; 1410 set_eqd[num].eq_id = eqo->q.id;
1411 aic->prev_eqd = eqd;
1412 num++;
1413 }
1324 } 1414 }
1415
1416 if (num)
1417 be_cmd_modify_eqd(adapter, set_eqd, num);
1325} 1418}
1326 1419
1327static void be_rx_stats_update(struct be_rx_obj *rxo, 1420static void be_rx_stats_update(struct be_rx_obj *rxo,
@@ -1938,6 +2031,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
1938{ 2031{
1939 struct be_queue_info *eq; 2032 struct be_queue_info *eq;
1940 struct be_eq_obj *eqo; 2033 struct be_eq_obj *eqo;
2034 struct be_aic_obj *aic;
1941 int i, rc; 2035 int i, rc;
1942 2036
1943 adapter->num_evt_qs = min_t(u16, num_irqs(adapter), 2037 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
@@ -1946,11 +2040,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
1946 for_all_evt_queues(adapter, eqo, i) { 2040 for_all_evt_queues(adapter, eqo, i) {
1947 netif_napi_add(adapter->netdev, &eqo->napi, be_poll, 2041 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1948 BE_NAPI_WEIGHT); 2042 BE_NAPI_WEIGHT);
2043 aic = &adapter->aic_obj[i];
1949 eqo->adapter = adapter; 2044 eqo->adapter = adapter;
1950 eqo->tx_budget = BE_TX_BUDGET; 2045 eqo->tx_budget = BE_TX_BUDGET;
1951 eqo->idx = i; 2046 eqo->idx = i;
1952 eqo->max_eqd = BE_MAX_EQD; 2047 aic->max_eqd = BE_MAX_EQD;
1953 eqo->enable_aic = true; 2048 aic->enable = true;
1954 2049
1955 eq = &eqo->q; 2050 eq = &eqo->q;
1956 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, 2051 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -2937,7 +3032,8 @@ static int be_vf_setup(struct be_adapter *adapter)
2937 goto err; 3032 goto err;
2938 vf_cfg->def_vid = def_vlan; 3033 vf_cfg->def_vid = def_vlan;
2939 3034
2940 be_cmd_enable_vf(adapter, vf + 1); 3035 if (!old_vfs)
3036 be_cmd_enable_vf(adapter, vf + 1);
2941 } 3037 }
2942 3038
2943 if (!old_vfs) { 3039 if (!old_vfs) {
@@ -2962,12 +3058,12 @@ static void BEx_get_resources(struct be_adapter *adapter,
2962 struct pci_dev *pdev = adapter->pdev; 3058 struct pci_dev *pdev = adapter->pdev;
2963 bool use_sriov = false; 3059 bool use_sriov = false;
2964 3060
2965 if (BE3_chip(adapter) && be_physfn(adapter)) { 3061 if (BE3_chip(adapter) && sriov_want(adapter)) {
2966 int max_vfs; 3062 int max_vfs;
2967 3063
2968 max_vfs = pci_sriov_get_totalvfs(pdev); 3064 max_vfs = pci_sriov_get_totalvfs(pdev);
2969 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; 3065 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2970 use_sriov = res->max_vfs && num_vfs; 3066 use_sriov = res->max_vfs;
2971 } 3067 }
2972 3068
2973 if (be_physfn(adapter)) 3069 if (be_physfn(adapter))
@@ -2983,8 +3079,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED; 3079 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2984 res->max_mcast_mac = BE_MAX_MC; 3080 res->max_mcast_mac = BE_MAX_MC;
2985 3081
3082 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
2986 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) || 3083 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2987 !be_physfn(adapter)) 3084 !be_physfn(adapter) || (adapter->port_num > 1))
2988 res->max_tx_qs = 1; 3085 res->max_tx_qs = 1;
2989 else 3086 else
2990 res->max_tx_qs = BE3_MAX_TX_QS; 3087 res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3026,14 +3123,6 @@ static int be_get_resources(struct be_adapter *adapter)
3026 adapter->res = res; 3123 adapter->res = res;
3027 } 3124 }
3028 3125
3029 /* For BE3 only check if FW suggests a different max-txqs value */
3030 if (BE3_chip(adapter)) {
3031 status = be_cmd_get_profile_config(adapter, &res, 0);
3032 if (!status && res.max_tx_qs)
3033 adapter->res.max_tx_qs =
3034 min(adapter->res.max_tx_qs, res.max_tx_qs);
3035 }
3036
3037 /* For Lancer, SH etc read per-function resource limits from FW. 3126 /* For Lancer, SH etc read per-function resource limits from FW.
3038 * GET_FUNC_CONFIG returns per function guaranteed limits. 3127 * GET_FUNC_CONFIG returns per function guaranteed limits.
3039 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits 3128 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@ -3258,7 +3347,7 @@ static int be_setup(struct be_adapter *adapter)
3258 be_cmd_set_flow_control(adapter, adapter->tx_fc, 3347 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3259 adapter->rx_fc); 3348 adapter->rx_fc);
3260 3349
3261 if (be_physfn(adapter) && num_vfs) { 3350 if (sriov_want(adapter)) {
3262 if (be_max_vfs(adapter)) 3351 if (be_max_vfs(adapter))
3263 be_vf_setup(adapter); 3352 be_vf_setup(adapter);
3264 else 3353 else
@@ -4077,9 +4166,11 @@ static int be_stats_init(struct be_adapter *adapter)
4077 cmd->size = sizeof(struct lancer_cmd_req_pport_stats); 4166 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4078 else if (BE2_chip(adapter)) 4167 else if (BE2_chip(adapter))
4079 cmd->size = sizeof(struct be_cmd_req_get_stats_v0); 4168 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4080 else 4169 else if (BE3_chip(adapter))
4081 /* BE3 and Skyhawk */
4082 cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 4170 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4171 else
4172 /* ALL non-BE ASICs */
4173 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4083 4174
4084 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 4175 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4085 GFP_KERNEL); 4176 GFP_KERNEL);
@@ -4113,7 +4204,6 @@ static void be_remove(struct pci_dev *pdev)
4113 4204
4114 pci_disable_pcie_error_reporting(pdev); 4205 pci_disable_pcie_error_reporting(pdev);
4115 4206
4116 pci_set_drvdata(pdev, NULL);
4117 pci_release_regions(pdev); 4207 pci_release_regions(pdev);
4118 pci_disable_device(pdev); 4208 pci_disable_device(pdev);
4119 4209
@@ -4262,7 +4352,6 @@ static void be_worker(struct work_struct *work)
4262 struct be_adapter *adapter = 4352 struct be_adapter *adapter =
4263 container_of(work, struct be_adapter, work.work); 4353 container_of(work, struct be_adapter, work.work);
4264 struct be_rx_obj *rxo; 4354 struct be_rx_obj *rxo;
4265 struct be_eq_obj *eqo;
4266 int i; 4355 int i;
4267 4356
4268 /* when interrupts are not yet enabled, just reap any pending 4357 /* when interrupts are not yet enabled, just reap any pending
@@ -4293,8 +4382,7 @@ static void be_worker(struct work_struct *work)
4293 } 4382 }
4294 } 4383 }
4295 4384
4296 for_all_evt_queues(adapter, eqo, i) 4385 be_eqd_update(adapter);
4297 be_eqd_update(adapter, eqo);
4298 4386
4299reschedule: 4387reschedule:
4300 adapter->work_counter++; 4388 adapter->work_counter++;
@@ -4370,9 +4458,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4370 } 4458 }
4371 } 4459 }
4372 4460
4373 status = pci_enable_pcie_error_reporting(pdev); 4461 if (be_physfn(adapter)) {
4374 if (status) 4462 status = pci_enable_pcie_error_reporting(pdev);
4375 dev_info(&pdev->dev, "Could not use PCIe error reporting\n"); 4463 if (!status)
4464 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4465 }
4376 4466
4377 status = be_ctrl_init(adapter); 4467 status = be_ctrl_init(adapter);
4378 if (status) 4468 if (status)
@@ -4443,7 +4533,6 @@ ctrl_clean:
4443 be_ctrl_cleanup(adapter); 4533 be_ctrl_cleanup(adapter);
4444free_netdev: 4534free_netdev:
4445 free_netdev(netdev); 4535 free_netdev(netdev);
4446 pci_set_drvdata(pdev, NULL);
4447rel_reg: 4536rel_reg:
4448 pci_release_regions(pdev); 4537 pci_release_regions(pdev);
4449disable_dev: 4538disable_dev:
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c706b7a9397e..4b22a9579f85 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -699,7 +699,6 @@ static void fealnx_remove_one(struct pci_dev *pdev)
699 pci_iounmap(pdev, np->mem); 699 pci_iounmap(pdev, np->mem);
700 free_netdev(dev); 700 free_netdev(dev);
701 pci_release_regions(pdev); 701 pci_release_regions(pdev);
702 pci_set_drvdata(pdev, NULL);
703 } else 702 } else
704 printk(KERN_ERR "fealnx: remove for unknown device\n"); 703 printk(KERN_ERR "fealnx: remove for unknown device\n");
705} 704}
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 6b60582ce8cf..56f2f608a9f4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1083,7 +1083,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
1083 1083
1084 mac_addr = of_get_mac_address(ofdev->dev.of_node); 1084 mac_addr = of_get_mac_address(ofdev->dev.of_node);
1085 if (mac_addr) 1085 if (mac_addr)
1086 memcpy(ndev->dev_addr, mac_addr, 6); 1086 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1087 1087
1088 ret = fep->ops->allocate_bd(ndev); 1088 ret = fep->ops->allocate_bd(ndev);
1089 if (ret) 1089 if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9fbe4dda7a0e..d6d810cb97c7 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2918,7 +2918,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2918 struct gfar_priv_rx_q *rx_queue = NULL; 2918 struct gfar_priv_rx_q *rx_queue = NULL;
2919 int work_done = 0, work_done_per_q = 0; 2919 int work_done = 0, work_done_per_q = 0;
2920 int i, budget_per_q = 0; 2920 int i, budget_per_q = 0;
2921 int has_tx_work; 2921 int has_tx_work = 0;
2922 unsigned long rstat_rxf; 2922 unsigned long rstat_rxf;
2923 int num_act_queues; 2923 int num_act_queues;
2924 2924
@@ -2933,62 +2933,51 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2933 if (num_act_queues) 2933 if (num_act_queues)
2934 budget_per_q = budget/num_act_queues; 2934 budget_per_q = budget/num_act_queues;
2935 2935
2936 while (1) { 2936 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2937 has_tx_work = 0; 2937 tx_queue = priv->tx_queue[i];
2938 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { 2938 /* run Tx cleanup to completion */
2939 tx_queue = priv->tx_queue[i]; 2939 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2940 /* run Tx cleanup to completion */ 2940 gfar_clean_tx_ring(tx_queue);
2941 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { 2941 has_tx_work = 1;
2942 gfar_clean_tx_ring(tx_queue);
2943 has_tx_work = 1;
2944 }
2945 } 2942 }
2943 }
2946 2944
2947 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2945 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2948 /* skip queue if not active */ 2946 /* skip queue if not active */
2949 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) 2947 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2950 continue; 2948 continue;
2951
2952 rx_queue = priv->rx_queue[i];
2953 work_done_per_q =
2954 gfar_clean_rx_ring(rx_queue, budget_per_q);
2955 work_done += work_done_per_q;
2956
2957 /* finished processing this queue */
2958 if (work_done_per_q < budget_per_q) {
2959 /* clear active queue hw indication */
2960 gfar_write(&regs->rstat,
2961 RSTAT_CLEAR_RXF0 >> i);
2962 rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
2963 num_act_queues--;
2964
2965 if (!num_act_queues)
2966 break;
2967 /* recompute budget per Rx queue */
2968 budget_per_q =
2969 (budget - work_done) / num_act_queues;
2970 }
2971 }
2972 2949
2973 if (work_done >= budget) 2950 rx_queue = priv->rx_queue[i];
2974 break; 2951 work_done_per_q =
2952 gfar_clean_rx_ring(rx_queue, budget_per_q);
2953 work_done += work_done_per_q;
2954
2955 /* finished processing this queue */
2956 if (work_done_per_q < budget_per_q) {
2957 /* clear active queue hw indication */
2958 gfar_write(&regs->rstat,
2959 RSTAT_CLEAR_RXF0 >> i);
2960 num_act_queues--;
2961
2962 if (!num_act_queues)
2963 break;
2964 }
2965 }
2975 2966
2976 if (!num_act_queues && !has_tx_work) { 2967 if (!num_act_queues && !has_tx_work) {
2977 2968
2978 napi_complete(napi); 2969 napi_complete(napi);
2979 2970
2980 /* Clear the halt bit in RSTAT */ 2971 /* Clear the halt bit in RSTAT */
2981 gfar_write(&regs->rstat, gfargrp->rstat); 2972 gfar_write(&regs->rstat, gfargrp->rstat);
2982 2973
2983 gfar_write(&regs->imask, IMASK_DEFAULT); 2974 gfar_write(&regs->imask, IMASK_DEFAULT);
2984 2975
2985 /* If we are coalescing interrupts, update the timer 2976 /* If we are coalescing interrupts, update the timer
2986 * Otherwise, clear it 2977 * Otherwise, clear it
2987 */ 2978 */
2988 gfar_configure_coalescing(priv, gfargrp->rx_bit_map, 2979 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2989 gfargrp->tx_bit_map); 2980 gfargrp->tx_bit_map);
2990 break;
2991 }
2992 } 2981 }
2993 2982
2994 return work_done; 2983 return work_done;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04112b98ff5d..114c58f9d8d2 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1177,21 +1177,21 @@ static inline void gfar_read_filer(struct gfar_private *priv,
1177 *fpr = gfar_read(&regs->rqfpr); 1177 *fpr = gfar_read(&regs->rqfpr);
1178} 1178}
1179 1179
1180extern void lock_rx_qs(struct gfar_private *priv); 1180void lock_rx_qs(struct gfar_private *priv);
1181extern void lock_tx_qs(struct gfar_private *priv); 1181void lock_tx_qs(struct gfar_private *priv);
1182extern void unlock_rx_qs(struct gfar_private *priv); 1182void unlock_rx_qs(struct gfar_private *priv);
1183extern void unlock_tx_qs(struct gfar_private *priv); 1183void unlock_tx_qs(struct gfar_private *priv);
1184extern irqreturn_t gfar_receive(int irq, void *dev_id); 1184irqreturn_t gfar_receive(int irq, void *dev_id);
1185extern int startup_gfar(struct net_device *dev); 1185int startup_gfar(struct net_device *dev);
1186extern void stop_gfar(struct net_device *dev); 1186void stop_gfar(struct net_device *dev);
1187extern void gfar_halt(struct net_device *dev); 1187void gfar_halt(struct net_device *dev);
1188extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 1188void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
1189 int enable, u32 regnum, u32 read); 1189 u32 regnum, u32 read);
1190extern void gfar_configure_coalescing_all(struct gfar_private *priv); 1190void gfar_configure_coalescing_all(struct gfar_private *priv);
1191void gfar_init_sysfs(struct net_device *dev); 1191void gfar_init_sysfs(struct net_device *dev);
1192int gfar_set_features(struct net_device *dev, netdev_features_t features); 1192int gfar_set_features(struct net_device *dev, netdev_features_t features);
1193extern void gfar_check_rx_parser_mode(struct gfar_private *priv); 1193void gfar_check_rx_parser_mode(struct gfar_private *priv);
1194extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features); 1194void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
1195 1195
1196extern const struct ethtool_ops gfar_ethtool_ops; 1196extern const struct ethtool_ops gfar_ethtool_ops;
1197 1197
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5930c39672db..d58a3dfc95c2 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3899,7 +3899,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3899 3899
3900 mac_addr = of_get_mac_address(np); 3900 mac_addr = of_get_mac_address(np);
3901 if (mac_addr) 3901 if (mac_addr)
3902 memcpy(dev->dev_addr, mac_addr, 6); 3902 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
3903 3903
3904 ugeth->ug_info = ug_info; 3904 ugeth->ug_info = ug_info;
3905 ugeth->dev = device; 3905 ugeth->dev = device;
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 6231bc02b964..1085257385d2 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_FUJITSU 5config NET_VENDOR_FUJITSU
6 bool "Fujitsu devices" 6 bool "Fujitsu devices"
7 default y 7 default y
8 depends on ISA || PCMCIA 8 depends on PCMCIA
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 91227d03274e..37860096f744 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1098,7 +1098,7 @@ static int hp100_open(struct net_device *dev)
1098 if (request_irq(dev->irq, hp100_interrupt, 1098 if (request_irq(dev->irq, hp100_interrupt,
1099 lp->bus == HP100_BUS_PCI || lp->bus == 1099 lp->bus == HP100_BUS_PCI || lp->bus ==
1100 HP100_BUS_EISA ? IRQF_SHARED : 0, 1100 HP100_BUS_EISA ? IRQF_SHARED : 0,
1101 "hp100", dev)) { 1101 dev->name, dev)) {
1102 printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq); 1102 printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
1103 return -EAGAIN; 1103 return -EAGAIN;
1104 } 1104 }
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index e38816145395..a15877affc9b 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -711,7 +711,7 @@ static int init_i596_mem(struct net_device *dev)
711 i596_add_cmd(dev, &lp->cf_cmd.cmd); 711 i596_add_cmd(dev, &lp->cf_cmd.cmd);
712 712
713 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); 713 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
714 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); 714 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
715 lp->sa_cmd.cmd.command = CmdSASetup; 715 lp->sa_cmd.cmd.command = CmdSASetup;
716 i596_add_cmd(dev, &lp->sa_cmd.cmd); 716 i596_add_cmd(dev, &lp->sa_cmd.cmd);
717 717
@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
1155 err = -ENODEV; 1155 err = -ENODEV;
1156 goto out; 1156 goto out;
1157 } 1157 }
1158 memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */ 1158 memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
1159 dev->base_addr = MVME_I596_BASE; 1159 dev->base_addr = MVME_I596_BASE;
1160 dev->irq = (unsigned) MVME16x_IRQ_I596; 1160 dev->irq = (unsigned) MVME16x_IRQ_I596;
1161 goto found; 1161 goto found;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index d653bac4cfc4..861fa15e1e81 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -607,7 +607,7 @@ static int init_i596_mem(struct net_device *dev)
607 i596_add_cmd(dev, &dma->cf_cmd.cmd); 607 i596_add_cmd(dev, &dma->cf_cmd.cmd);
608 608
609 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); 609 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
610 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6); 610 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
611 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); 611 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
612 DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); 612 DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
613 i596_add_cmd(dev, &dma->sa_cmd.cmd); 613 i596_add_cmd(dev, &dma->sa_cmd.cmd);
@@ -1396,13 +1396,13 @@ static void set_multicast_list(struct net_device *dev)
1396 netdev_for_each_mc_addr(ha, dev) { 1396 netdev_for_each_mc_addr(ha, dev) {
1397 if (!cnt--) 1397 if (!cnt--)
1398 break; 1398 break;
1399 memcpy(cp, ha->addr, 6); 1399 memcpy(cp, ha->addr, ETH_ALEN);
1400 if (i596_debug > 1) 1400 if (i596_debug > 1)
1401 DEB(DEB_MULTI, 1401 DEB(DEB_MULTI,
1402 printk(KERN_DEBUG 1402 printk(KERN_DEBUG
1403 "%s: Adding address %pM\n", 1403 "%s: Adding address %pM\n",
1404 dev->name, cp)); 1404 dev->name, cp));
1405 cp += 6; 1405 cp += ETH_ALEN;
1406 } 1406 }
1407 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); 1407 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1408 i596_add_cmd(dev, &cmd->cmd); 1408 i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b5c7222342c..ef21a2e10180 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2676,7 +2676,7 @@ static int emac_init_config(struct emac_instance *dev)
2676 np->full_name); 2676 np->full_name);
2677 return -ENXIO; 2677 return -ENXIO;
2678 } 2678 }
2679 memcpy(dev->ndev->dev_addr, p, 6); 2679 memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2680 2680
2681 /* IAHT and GAHT filter parameterization */ 2681 /* IAHT and GAHT filter parameterization */
2682 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { 2682 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 59a92d5870b5..9c45efe4c8fe 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -29,13 +29,13 @@
29struct emac_instance; 29struct emac_instance;
30struct mal_instance; 30struct mal_instance;
31 31
32extern void emac_dbg_register(struct emac_instance *dev); 32void emac_dbg_register(struct emac_instance *dev);
33extern void emac_dbg_unregister(struct emac_instance *dev); 33void emac_dbg_unregister(struct emac_instance *dev);
34extern void mal_dbg_register(struct mal_instance *mal); 34void mal_dbg_register(struct mal_instance *mal);
35extern void mal_dbg_unregister(struct mal_instance *mal); 35void mal_dbg_unregister(struct mal_instance *mal);
36extern int emac_init_debug(void) __init; 36int emac_init_debug(void) __init;
37extern void emac_fini_debug(void) __exit; 37void emac_fini_debug(void) __exit;
38extern void emac_dbg_dump_all(void); 38void emac_dbg_dump_all(void);
39 39
40# define DBG_LEVEL 1 40# define DBG_LEVEL 1
41 41
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
index 668bceeff4a2..d4f1374d1900 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.h
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -56,15 +56,15 @@ struct rgmii_instance {
56 56
57#ifdef CONFIG_IBM_EMAC_RGMII 57#ifdef CONFIG_IBM_EMAC_RGMII
58 58
59extern int rgmii_init(void); 59int rgmii_init(void);
60extern void rgmii_exit(void); 60void rgmii_exit(void);
61extern int rgmii_attach(struct platform_device *ofdev, int input, int mode); 61int rgmii_attach(struct platform_device *ofdev, int input, int mode);
62extern void rgmii_detach(struct platform_device *ofdev, int input); 62void rgmii_detach(struct platform_device *ofdev, int input);
63extern void rgmii_get_mdio(struct platform_device *ofdev, int input); 63void rgmii_get_mdio(struct platform_device *ofdev, int input);
64extern void rgmii_put_mdio(struct platform_device *ofdev, int input); 64void rgmii_put_mdio(struct platform_device *ofdev, int input);
65extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed); 65void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
66extern int rgmii_get_regs_len(struct platform_device *ofdev); 66int rgmii_get_regs_len(struct platform_device *ofdev);
67extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf); 67void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
68 68
69#else 69#else
70 70
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
index 350b7096a041..4d5f336f07b3 100644
--- a/drivers/net/ethernet/ibm/emac/tah.h
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -72,13 +72,13 @@ struct tah_instance {
72 72
73#ifdef CONFIG_IBM_EMAC_TAH 73#ifdef CONFIG_IBM_EMAC_TAH
74 74
75extern int tah_init(void); 75int tah_init(void);
76extern void tah_exit(void); 76void tah_exit(void);
77extern int tah_attach(struct platform_device *ofdev, int channel); 77int tah_attach(struct platform_device *ofdev, int channel);
78extern void tah_detach(struct platform_device *ofdev, int channel); 78void tah_detach(struct platform_device *ofdev, int channel);
79extern void tah_reset(struct platform_device *ofdev); 79void tah_reset(struct platform_device *ofdev);
80extern int tah_get_regs_len(struct platform_device *ofdev); 80int tah_get_regs_len(struct platform_device *ofdev);
81extern void *tah_dump_regs(struct platform_device *ofdev, void *buf); 81void *tah_dump_regs(struct platform_device *ofdev, void *buf);
82 82
83#else 83#else
84 84
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 455bfb085493..0959c55b1459 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -53,15 +53,15 @@ struct zmii_instance {
53 53
54#ifdef CONFIG_IBM_EMAC_ZMII 54#ifdef CONFIG_IBM_EMAC_ZMII
55 55
56extern int zmii_init(void); 56int zmii_init(void);
57extern void zmii_exit(void); 57void zmii_exit(void);
58extern int zmii_attach(struct platform_device *ofdev, int input, int *mode); 58int zmii_attach(struct platform_device *ofdev, int input, int *mode);
59extern void zmii_detach(struct platform_device *ofdev, int input); 59void zmii_detach(struct platform_device *ofdev, int input);
60extern void zmii_get_mdio(struct platform_device *ofdev, int input); 60void zmii_get_mdio(struct platform_device *ofdev, int input);
61extern void zmii_put_mdio(struct platform_device *ofdev, int input); 61void zmii_put_mdio(struct platform_device *ofdev, int input);
62extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed); 62void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
63extern int zmii_get_regs_len(struct platform_device *ocpdev); 63int zmii_get_regs_len(struct platform_device *ocpdev);
64extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf); 64void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
65 65
66#else 66#else
67# define zmii_init() 0 67# define zmii_init() 0
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5d41aee69d16..952d795230a4 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1185,7 +1185,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1185 netdev_for_each_mc_addr(ha, netdev) { 1185 netdev_for_each_mc_addr(ha, netdev) {
1186 /* add the multicast address to the filter table */ 1186 /* add the multicast address to the filter table */
1187 unsigned long mcast_addr = 0; 1187 unsigned long mcast_addr = 0;
1188 memcpy(((char *)&mcast_addr)+2, ha->addr, 6); 1188 memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
1189 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1189 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1190 IbmVethMcastAddFilter, 1190 IbmVethMcastAddFilter,
1191 mcast_addr); 1191 mcast_addr);
@@ -1370,7 +1370,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1370 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1370 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1371 1371
1372 adapter->mac_addr = 0; 1372 adapter->mac_addr = 0;
1373 memcpy(&adapter->mac_addr, mac_addr_p, 6); 1373 memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
1374 1374
1375 netdev->irq = dev->irq; 1375 netdev->irq = dev->irq;
1376 netdev->netdev_ops = &ibmveth_netdev_ops; 1376 netdev->netdev_ops = &ibmveth_netdev_ops;
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index bdf5023724e7..25045ae07171 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2183,7 +2183,6 @@ static void ipg_remove(struct pci_dev *pdev)
2183 2183
2184 free_netdev(dev); 2184 free_netdev(dev);
2185 pci_disable_device(pdev); 2185 pci_disable_device(pdev);
2186 pci_set_drvdata(pdev, NULL);
2187} 2186}
2188 2187
2189static const struct net_device_ops ipg_netdev_ops = { 2188static const struct net_device_ops ipg_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada6e210279f..cbaba4442d4b 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2985,7 +2985,6 @@ err_out_free_res:
2985err_out_disable_pdev: 2985err_out_disable_pdev:
2986 pci_disable_device(pdev); 2986 pci_disable_device(pdev);
2987err_out_free_dev: 2987err_out_free_dev:
2988 pci_set_drvdata(pdev, NULL);
2989 free_netdev(netdev); 2988 free_netdev(netdev);
2990 return err; 2989 return err;
2991} 2990}
@@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev)
3003 free_netdev(netdev); 3002 free_netdev(netdev);
3004 pci_release_regions(pdev); 3003 pci_release_regions(pdev);
3005 pci_disable_device(pdev); 3004 pci_disable_device(pdev);
3006 pci_set_drvdata(pdev, NULL);
3007 } 3005 }
3008} 3006}
3009 3007
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 26d9cd59ec75..58c147271a36 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -325,7 +325,7 @@ enum e1000_state_t {
325#undef pr_fmt 325#undef pr_fmt
326#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 326#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
327 327
328extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); 328struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
329#define e_dbg(format, arg...) \ 329#define e_dbg(format, arg...) \
330 netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) 330 netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
331#define e_err(msglvl, format, arg...) \ 331#define e_err(msglvl, format, arg...) \
@@ -346,20 +346,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
346extern char e1000_driver_name[]; 346extern char e1000_driver_name[];
347extern const char e1000_driver_version[]; 347extern const char e1000_driver_version[];
348 348
349extern int e1000_up(struct e1000_adapter *adapter); 349int e1000_up(struct e1000_adapter *adapter);
350extern void e1000_down(struct e1000_adapter *adapter); 350void e1000_down(struct e1000_adapter *adapter);
351extern void e1000_reinit_locked(struct e1000_adapter *adapter); 351void e1000_reinit_locked(struct e1000_adapter *adapter);
352extern void e1000_reset(struct e1000_adapter *adapter); 352void e1000_reset(struct e1000_adapter *adapter);
353extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); 353int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
354extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 354int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
355extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 355int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
356extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 356void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
357extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 357void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
358extern void e1000_update_stats(struct e1000_adapter *adapter); 358void e1000_update_stats(struct e1000_adapter *adapter);
359extern bool e1000_has_link(struct e1000_adapter *adapter); 359bool e1000_has_link(struct e1000_adapter *adapter);
360extern void e1000_power_up_phy(struct e1000_adapter *); 360void e1000_power_up_phy(struct e1000_adapter *);
361extern void e1000_set_ethtool_ops(struct net_device *netdev); 361void e1000_set_ethtool_ops(struct net_device *netdev);
362extern void e1000_check_options(struct e1000_adapter *adapter); 362void e1000_check_options(struct e1000_adapter *adapter);
363extern char *e1000_get_hw_dev_name(struct e1000_hw *hw); 363char *e1000_get_hw_dev_name(struct e1000_hw *hw);
364 364
365#endif /* _E1000_H_ */ 365#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ad0edd11015d..0150f7fc893d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -472,26 +472,25 @@ enum latency_range {
472extern char e1000e_driver_name[]; 472extern char e1000e_driver_name[];
473extern const char e1000e_driver_version[]; 473extern const char e1000e_driver_version[];
474 474
475extern void e1000e_check_options(struct e1000_adapter *adapter); 475void e1000e_check_options(struct e1000_adapter *adapter);
476extern void e1000e_set_ethtool_ops(struct net_device *netdev); 476void e1000e_set_ethtool_ops(struct net_device *netdev);
477 477
478extern int e1000e_up(struct e1000_adapter *adapter); 478int e1000e_up(struct e1000_adapter *adapter);
479extern void e1000e_down(struct e1000_adapter *adapter); 479void e1000e_down(struct e1000_adapter *adapter);
480extern void e1000e_reinit_locked(struct e1000_adapter *adapter); 480void e1000e_reinit_locked(struct e1000_adapter *adapter);
481extern void e1000e_reset(struct e1000_adapter *adapter); 481void e1000e_reset(struct e1000_adapter *adapter);
482extern void e1000e_power_up_phy(struct e1000_adapter *adapter); 482void e1000e_power_up_phy(struct e1000_adapter *adapter);
483extern int e1000e_setup_rx_resources(struct e1000_ring *ring); 483int e1000e_setup_rx_resources(struct e1000_ring *ring);
484extern int e1000e_setup_tx_resources(struct e1000_ring *ring); 484int e1000e_setup_tx_resources(struct e1000_ring *ring);
485extern void e1000e_free_rx_resources(struct e1000_ring *ring); 485void e1000e_free_rx_resources(struct e1000_ring *ring);
486extern void e1000e_free_tx_resources(struct e1000_ring *ring); 486void e1000e_free_tx_resources(struct e1000_ring *ring);
487extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 487struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
488 struct rtnl_link_stats64 488 struct rtnl_link_stats64 *stats);
489 *stats); 489void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
490extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 490void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
491extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 491void e1000e_get_hw_control(struct e1000_adapter *adapter);
492extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 492void e1000e_release_hw_control(struct e1000_adapter *adapter);
493extern void e1000e_release_hw_control(struct e1000_adapter *adapter); 493void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
494extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
495 494
496extern unsigned int copybreak; 495extern unsigned int copybreak;
497 496
@@ -508,8 +507,8 @@ extern const struct e1000_info e1000_pch2_info;
508extern const struct e1000_info e1000_pch_lpt_info; 507extern const struct e1000_info e1000_pch_lpt_info;
509extern const struct e1000_info e1000_es2_info; 508extern const struct e1000_info e1000_es2_info;
510 509
511extern void e1000e_ptp_init(struct e1000_adapter *adapter); 510void e1000e_ptp_init(struct e1000_adapter *adapter);
512extern void e1000e_ptp_remove(struct e1000_adapter *adapter); 511void e1000e_ptp_remove(struct e1000_adapter *adapter);
513 512
514static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) 513static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
515{ 514{
@@ -536,7 +535,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
536 return hw->phy.ops.write_reg_locked(hw, offset, data); 535 return hw->phy.ops.write_reg_locked(hw, offset, data);
537} 536}
538 537
539extern void e1000e_reload_nvm_generic(struct e1000_hw *hw); 538void e1000e_reload_nvm_generic(struct e1000_hw *hw);
540 539
541static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) 540static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
542{ 541{
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index b5252eb8a6c7..49572dcdba87 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -347,9 +347,9 @@ struct i40e_vsi {
347 u32 rx_buf_failed; 347 u32 rx_buf_failed;
348 u32 rx_page_failed; 348 u32 rx_page_failed;
349 349
350 /* These are arrays of rings, allocated at run-time */ 350 /* These are containers of ring pointers, allocated at run-time */
351 struct i40e_ring *rx_rings; 351 struct i40e_ring **rx_rings;
352 struct i40e_ring *tx_rings; 352 struct i40e_ring **tx_rings;
353 353
354 u16 work_limit; 354 u16 work_limit;
355 /* high bit set means dynamic, use accessor routines to read/write. 355 /* high bit set means dynamic, use accessor routines to read/write.
@@ -366,7 +366,7 @@ struct i40e_vsi {
366 u8 dtype; 366 u8 dtype;
367 367
368 /* List of q_vectors allocated to this VSI */ 368 /* List of q_vectors allocated to this VSI */
369 struct i40e_q_vector *q_vectors; 369 struct i40e_q_vector **q_vectors;
370 int num_q_vectors; 370 int num_q_vectors;
371 int base_vector; 371 int base_vector;
372 372
@@ -422,8 +422,9 @@ struct i40e_q_vector {
422 422
423 u8 num_ringpairs; /* total number of ring pairs in vector */ 423 u8 num_ringpairs; /* total number of ring pairs in vector */
424 424
425 char name[IFNAMSIZ + 9];
426 cpumask_t affinity_mask; 425 cpumask_t affinity_mask;
426 struct rcu_head rcu; /* to avoid race with update stats on free */
427 char name[IFNAMSIZ + 9];
427} ____cacheline_internodealigned_in_smp; 428} ____cacheline_internodealigned_in_smp;
428 429
429/* lan device */ 430/* lan device */
@@ -544,6 +545,7 @@ static inline void i40e_dbg_init(void) {}
544static inline void i40e_dbg_exit(void) {} 545static inline void i40e_dbg_exit(void) {}
545#endif /* CONFIG_DEBUG_FS*/ 546#endif /* CONFIG_DEBUG_FS*/
546void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); 547void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
548void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
547int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 549int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
548void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); 550void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
549int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); 551int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8dbd91f64b74..ef4cb1cf31f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
151 size_t count, loff_t *ppos) 151 size_t count, loff_t *ppos)
152{ 152{
153 struct i40e_pf *pf = filp->private_data; 153 struct i40e_pf *pf = filp->private_data;
154 char dump_request_buf[16];
155 bool seid_found = false; 154 bool seid_found = false;
156 int bytes_not_copied;
157 long seid = -1; 155 long seid = -1;
158 int buflen = 0; 156 int buflen = 0;
159 int i, ret; 157 int i, ret;
@@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
163 /* don't allow partial writes */ 161 /* don't allow partial writes */
164 if (*ppos != 0) 162 if (*ppos != 0)
165 return 0; 163 return 0;
166 if (count >= sizeof(dump_request_buf))
167 return -ENOSPC;
168
169 bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
170 if (bytes_not_copied < 0)
171 return bytes_not_copied;
172 if (bytes_not_copied > 0)
173 count -= bytes_not_copied;
174 dump_request_buf[count] = '\0';
175 164
176 /* decode the SEID given to be dumped */ 165 /* decode the SEID given to be dumped */
177 ret = kstrtol(dump_request_buf, 0, &seid); 166 ret = kstrtol_from_user(buffer, count, 0, &seid);
178 if (ret < 0) { 167
179 dev_info(&pf->pdev->dev, "bad seid value '%s'\n", 168 if (ret) {
180 dump_request_buf); 169 dev_info(&pf->pdev->dev, "bad seid value\n");
181 } else if (seid == 0) { 170 } else if (seid == 0) {
182 seid_found = true; 171 seid_found = true;
183 172
@@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
245 memcpy(p, vsi, len); 234 memcpy(p, vsi, len);
246 p += len; 235 p += len;
247 236
248 len = (sizeof(struct i40e_q_vector) 237 if (vsi->num_q_vectors) {
249 * vsi->num_q_vectors); 238 len = (sizeof(struct i40e_q_vector)
250 memcpy(p, vsi->q_vectors, len); 239 * vsi->num_q_vectors);
251 p += len; 240 memcpy(p, vsi->q_vectors, len);
252 241 p += len;
253 len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs); 242 }
254 memcpy(p, vsi->tx_rings, len);
255 p += len;
256 memcpy(p, vsi->rx_rings, len);
257 p += len;
258 243
259 for (i = 0; i < vsi->num_queue_pairs; i++) { 244 if (vsi->num_queue_pairs) {
260 len = sizeof(struct i40e_tx_buffer); 245 len = (sizeof(struct i40e_ring) *
261 memcpy(p, vsi->tx_rings[i].tx_bi, len); 246 vsi->num_queue_pairs);
247 memcpy(p, vsi->tx_rings, len);
248 p += len;
249 memcpy(p, vsi->rx_rings, len);
262 p += len; 250 p += len;
263 } 251 }
264 for (i = 0; i < vsi->num_queue_pairs; i++) { 252
253 if (vsi->tx_rings[0]) {
254 len = sizeof(struct i40e_tx_buffer);
255 for (i = 0; i < vsi->num_queue_pairs; i++) {
256 memcpy(p, vsi->tx_rings[i]->tx_bi, len);
257 p += len;
258 }
265 len = sizeof(struct i40e_rx_buffer); 259 len = sizeof(struct i40e_rx_buffer);
266 memcpy(p, vsi->rx_rings[i].rx_bi, len); 260 for (i = 0; i < vsi->num_queue_pairs; i++) {
267 p += len; 261 memcpy(p, vsi->rx_rings[i]->rx_bi, len);
262 p += len;
263 }
268 } 264 }
269 265
270 /* macvlan filter list */ 266 /* macvlan filter list */
@@ -484,100 +480,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
484 " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", 480 " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
485 vsi->tx_restart, vsi->tx_busy, 481 vsi->tx_restart, vsi->tx_busy,
486 vsi->rx_buf_failed, vsi->rx_page_failed); 482 vsi->rx_buf_failed, vsi->rx_page_failed);
487 if (vsi->rx_rings) { 483 rcu_read_lock();
488 for (i = 0; i < vsi->num_queue_pairs; i++) { 484 for (i = 0; i < vsi->num_queue_pairs; i++) {
489 dev_info(&pf->pdev->dev, 485 struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
490 " rx_rings[%i]: desc = %p\n", 486 if (!rx_ring)
491 i, vsi->rx_rings[i].desc); 487 continue;
492 dev_info(&pf->pdev->dev, 488
493 " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", 489 dev_info(&pf->pdev->dev,
494 i, vsi->rx_rings[i].dev, 490 " rx_rings[%i]: desc = %p\n",
495 vsi->rx_rings[i].netdev, 491 i, rx_ring->desc);
496 vsi->rx_rings[i].rx_bi); 492 dev_info(&pf->pdev->dev,
497 dev_info(&pf->pdev->dev, 493 " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
498 " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", 494 i, rx_ring->dev,
499 i, vsi->rx_rings[i].state, 495 rx_ring->netdev,
500 vsi->rx_rings[i].queue_index, 496 rx_ring->rx_bi);
501 vsi->rx_rings[i].reg_idx); 497 dev_info(&pf->pdev->dev,
502 dev_info(&pf->pdev->dev, 498 " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
503 " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", 499 i, rx_ring->state,
504 i, vsi->rx_rings[i].rx_hdr_len, 500 rx_ring->queue_index,
505 vsi->rx_rings[i].rx_buf_len, 501 rx_ring->reg_idx);
506 vsi->rx_rings[i].dtype); 502 dev_info(&pf->pdev->dev,
507 dev_info(&pf->pdev->dev, 503 " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
508 " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 504 i, rx_ring->rx_hdr_len,
509 i, vsi->rx_rings[i].hsplit, 505 rx_ring->rx_buf_len,
510 vsi->rx_rings[i].next_to_use, 506 rx_ring->dtype);
511 vsi->rx_rings[i].next_to_clean, 507 dev_info(&pf->pdev->dev,
512 vsi->rx_rings[i].ring_active); 508 " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
513 dev_info(&pf->pdev->dev, 509 i, rx_ring->hsplit,
514 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", 510 rx_ring->next_to_use,
515 i, vsi->rx_rings[i].rx_stats.packets, 511 rx_ring->next_to_clean,
516 vsi->rx_rings[i].rx_stats.bytes, 512 rx_ring->ring_active);
517 vsi->rx_rings[i].rx_stats.non_eop_descs); 513 dev_info(&pf->pdev->dev,
518 dev_info(&pf->pdev->dev, 514 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
519 " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", 515 i, rx_ring->stats.packets,
520 i, 516 rx_ring->stats.bytes,
521 vsi->rx_rings[i].rx_stats.alloc_rx_page_failed, 517 rx_ring->rx_stats.non_eop_descs);
522 vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed); 518 dev_info(&pf->pdev->dev,
523 dev_info(&pf->pdev->dev, 519 " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
524 " rx_rings[%i]: size = %i, dma = 0x%08lx\n", 520 i,
525 i, vsi->rx_rings[i].size, 521 rx_ring->rx_stats.alloc_rx_page_failed,
526 (long unsigned int)vsi->rx_rings[i].dma); 522 rx_ring->rx_stats.alloc_rx_buff_failed);
527 dev_info(&pf->pdev->dev, 523 dev_info(&pf->pdev->dev,
528 " rx_rings[%i]: vsi = %p, q_vector = %p\n", 524 " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
529 i, vsi->rx_rings[i].vsi, 525 i, rx_ring->size,
530 vsi->rx_rings[i].q_vector); 526 (long unsigned int)rx_ring->dma);
531 } 527 dev_info(&pf->pdev->dev,
528 " rx_rings[%i]: vsi = %p, q_vector = %p\n",
529 i, rx_ring->vsi,
530 rx_ring->q_vector);
532 } 531 }
533 if (vsi->tx_rings) { 532 for (i = 0; i < vsi->num_queue_pairs; i++) {
534 for (i = 0; i < vsi->num_queue_pairs; i++) { 533 struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
535 dev_info(&pf->pdev->dev, 534 if (!tx_ring)
536 " tx_rings[%i]: desc = %p\n", 535 continue;
537 i, vsi->tx_rings[i].desc); 536 dev_info(&pf->pdev->dev,
538 dev_info(&pf->pdev->dev, 537 " tx_rings[%i]: desc = %p\n",
539 " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", 538 i, tx_ring->desc);
540 i, vsi->tx_rings[i].dev, 539 dev_info(&pf->pdev->dev,
541 vsi->tx_rings[i].netdev, 540 " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
542 vsi->tx_rings[i].tx_bi); 541 i, tx_ring->dev,
543 dev_info(&pf->pdev->dev, 542 tx_ring->netdev,
544 " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", 543 tx_ring->tx_bi);
545 i, vsi->tx_rings[i].state, 544 dev_info(&pf->pdev->dev,
546 vsi->tx_rings[i].queue_index, 545 " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
547 vsi->tx_rings[i].reg_idx); 546 i, tx_ring->state,
548 dev_info(&pf->pdev->dev, 547 tx_ring->queue_index,
549 " tx_rings[%i]: dtype = %d\n", 548 tx_ring->reg_idx);
550 i, vsi->tx_rings[i].dtype); 549 dev_info(&pf->pdev->dev,
551 dev_info(&pf->pdev->dev, 550 " tx_rings[%i]: dtype = %d\n",
552 " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 551 i, tx_ring->dtype);
553 i, vsi->tx_rings[i].hsplit, 552 dev_info(&pf->pdev->dev,
554 vsi->tx_rings[i].next_to_use, 553 " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
555 vsi->tx_rings[i].next_to_clean, 554 i, tx_ring->hsplit,
556 vsi->tx_rings[i].ring_active); 555 tx_ring->next_to_use,
557 dev_info(&pf->pdev->dev, 556 tx_ring->next_to_clean,
558 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", 557 tx_ring->ring_active);
559 i, vsi->tx_rings[i].tx_stats.packets, 558 dev_info(&pf->pdev->dev,
560 vsi->tx_rings[i].tx_stats.bytes, 559 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
561 vsi->tx_rings[i].tx_stats.restart_queue); 560 i, tx_ring->stats.packets,
562 dev_info(&pf->pdev->dev, 561 tx_ring->stats.bytes,
563 " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n", 562 tx_ring->tx_stats.restart_queue);
564 i, 563 dev_info(&pf->pdev->dev,
565 vsi->tx_rings[i].tx_stats.tx_busy, 564 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
566 vsi->tx_rings[i].tx_stats.completed, 565 i,
567 vsi->tx_rings[i].tx_stats.tx_done_old); 566 tx_ring->tx_stats.tx_busy,
568 dev_info(&pf->pdev->dev, 567 tx_ring->tx_stats.tx_done_old);
569 " tx_rings[%i]: size = %i, dma = 0x%08lx\n", 568 dev_info(&pf->pdev->dev,
570 i, vsi->tx_rings[i].size, 569 " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
571 (long unsigned int)vsi->tx_rings[i].dma); 570 i, tx_ring->size,
572 dev_info(&pf->pdev->dev, 571 (long unsigned int)tx_ring->dma);
573 " tx_rings[%i]: vsi = %p, q_vector = %p\n", 572 dev_info(&pf->pdev->dev,
574 i, vsi->tx_rings[i].vsi, 573 " tx_rings[%i]: vsi = %p, q_vector = %p\n",
575 vsi->tx_rings[i].q_vector); 574 i, tx_ring->vsi,
576 dev_info(&pf->pdev->dev, 575 tx_ring->q_vector);
577 " tx_rings[%i]: DCB tc = %d\n", 576 dev_info(&pf->pdev->dev,
578 i, vsi->tx_rings[i].dcb_tc); 577 " tx_rings[%i]: DCB tc = %d\n",
579 } 578 i, tx_ring->dcb_tc);
580 } 579 }
580 rcu_read_unlock();
581 dev_info(&pf->pdev->dev, 581 dev_info(&pf->pdev->dev,
582 " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", 582 " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
583 vsi->work_limit, vsi->rx_itr_setting, 583 vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
587 dev_info(&pf->pdev->dev, 587 dev_info(&pf->pdev->dev,
588 " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", 588 " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
589 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); 589 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
590 if (vsi->q_vectors) {
591 for (i = 0; i < vsi->num_q_vectors; i++) {
592 dev_info(&pf->pdev->dev,
593 " q_vectors[%i]: base index = %ld\n",
594 i, ((long int)*vsi->q_vectors[i].rx.ring-
595 (long int)*vsi->q_vectors[0].rx.ring)/
596 sizeof(struct i40e_ring));
597 }
598 }
599 dev_info(&pf->pdev->dev, 590 dev_info(&pf->pdev->dev,
600 " num_q_vectors = %i, base_vector = %i\n", 591 " num_q_vectors = %i, base_vector = %i\n",
601 vsi->num_q_vectors, vsi->base_vector); 592 vsi->num_q_vectors, vsi->base_vector);
@@ -792,9 +783,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
792 return; 783 return;
793 } 784 }
794 if (is_rx_ring) 785 if (is_rx_ring)
795 ring = vsi->rx_rings[ring_id]; 786 ring = *vsi->rx_rings[ring_id];
796 else 787 else
797 ring = vsi->tx_rings[ring_id]; 788 ring = *vsi->tx_rings[ring_id];
798 if (cnt == 2) { 789 if (cnt == 2) {
799 dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", 790 dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
800 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); 791 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
@@ -1028,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1028 size_t count, loff_t *ppos) 1019 size_t count, loff_t *ppos)
1029{ 1020{
1030 struct i40e_pf *pf = filp->private_data; 1021 struct i40e_pf *pf = filp->private_data;
1022 char *cmd_buf, *cmd_buf_tmp;
1031 int bytes_not_copied; 1023 int bytes_not_copied;
1032 struct i40e_vsi *vsi; 1024 struct i40e_vsi *vsi;
1033 u8 *print_buf_start; 1025 u8 *print_buf_start;
1034 u8 *print_buf; 1026 u8 *print_buf;
1035 char *cmd_buf;
1036 int vsi_seid; 1027 int vsi_seid;
1037 int veb_seid; 1028 int veb_seid;
1038 int cnt; 1029 int cnt;
@@ -1051,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1051 count -= bytes_not_copied; 1042 count -= bytes_not_copied;
1052 cmd_buf[count] = '\0'; 1043 cmd_buf[count] = '\0';
1053 1044
1045 cmd_buf_tmp = strchr(cmd_buf, '\n');
1046 if (cmd_buf_tmp) {
1047 *cmd_buf_tmp = '\0';
1048 count = cmd_buf_tmp - cmd_buf + 1;
1049 }
1050
1054 print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL); 1051 print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
1055 if (!print_buf_start) 1052 if (!print_buf_start)
1056 goto command_write_done; 1053 goto command_write_done;
@@ -1157,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1157 i40e_veb_release(pf->veb[i]); 1154 i40e_veb_release(pf->veb[i]);
1158 1155
1159 } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { 1156 } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
1160 u8 ma[6];
1161 int vlan = 0;
1162 struct i40e_mac_filter *f; 1157 struct i40e_mac_filter *f;
1158 int vlan = 0;
1159 u8 ma[6];
1163 int ret; 1160 int ret;
1164 1161
1165 cnt = sscanf(&cmd_buf[11], 1162 cnt = sscanf(&cmd_buf[11],
@@ -1195,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1195 ma, vlan, vsi_seid, f, ret); 1192 ma, vlan, vsi_seid, f, ret);
1196 1193
1197 } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { 1194 } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
1198 u8 ma[6];
1199 int vlan = 0; 1195 int vlan = 0;
1196 u8 ma[6];
1200 int ret; 1197 int ret;
1201 1198
1202 cnt = sscanf(&cmd_buf[11], 1199 cnt = sscanf(&cmd_buf[11],
@@ -1232,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1232 ma, vlan, vsi_seid, ret); 1229 ma, vlan, vsi_seid, ret);
1233 1230
1234 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { 1231 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
1235 int v;
1236 u16 vid;
1237 i40e_status ret; 1232 i40e_status ret;
1233 u16 vid;
1234 int v;
1238 1235
1239 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); 1236 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
1240 if (cnt != 2) { 1237 if (cnt != 2) {
@@ -1545,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1545 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || 1542 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
1546 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { 1543 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
1547 struct i40e_fdir_data fd_data; 1544 struct i40e_fdir_data fd_data;
1548 int ret;
1549 u16 packet_len, i, j = 0; 1545 u16 packet_len, i, j = 0;
1550 char *asc_packet; 1546 char *asc_packet;
1551 bool add = false; 1547 bool add = false;
1548 int ret;
1552 1549
1553 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 1550 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1554 GFP_KERNEL); 1551 GFP_KERNEL);
@@ -1636,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1636 } 1633 }
1637 } else if (strncmp(&cmd_buf[5], 1634 } else if (strncmp(&cmd_buf[5],
1638 "get local", 9) == 0) { 1635 "get local", 9) == 0) {
1636 u16 llen, rlen;
1639 int ret, i; 1637 int ret, i;
1640 u8 *buff; 1638 u8 *buff;
1641 u16 llen, rlen;
1642 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1639 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1643 if (!buff) 1640 if (!buff)
1644 goto command_write_done; 1641 goto command_write_done;
@@ -1669,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1669 kfree(buff); 1666 kfree(buff);
1670 buff = NULL; 1667 buff = NULL;
1671 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { 1668 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1669 u16 llen, rlen;
1672 int ret, i; 1670 int ret, i;
1673 u8 *buff; 1671 u8 *buff;
1674 u16 llen, rlen;
1675 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1672 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1676 if (!buff) 1673 if (!buff)
1677 goto command_write_done; 1674 goto command_write_done;
@@ -1747,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1747 goto command_write_done; 1744 goto command_write_done;
1748 } 1745 }
1749 1746
1750 /* Read at least 512 words */ 1747 /* set the max length */
1751 if (buffer_len == 0) 1748 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1752 buffer_len = 512;
1753 1749
1754 bytes = 2 * buffer_len; 1750 bytes = 2 * buffer_len;
1751
1752 /* read at least 1k bytes, no more than 4kB */
1753 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1755 buff = kzalloc(bytes, GFP_KERNEL); 1754 buff = kzalloc(bytes, GFP_KERNEL);
1756 if (!buff) 1755 if (!buff)
1757 goto command_write_done; 1756 goto command_write_done;
@@ -1903,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1903 struct i40e_pf *pf = filp->private_data; 1902 struct i40e_pf *pf = filp->private_data;
1904 int bytes_not_copied; 1903 int bytes_not_copied;
1905 struct i40e_vsi *vsi; 1904 struct i40e_vsi *vsi;
1905 char *buf_tmp;
1906 int vsi_seid; 1906 int vsi_seid;
1907 int i, cnt; 1907 int i, cnt;
1908 1908
@@ -1921,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1921 count -= bytes_not_copied; 1921 count -= bytes_not_copied;
1922 i40e_dbg_netdev_ops_buf[count] = '\0'; 1922 i40e_dbg_netdev_ops_buf[count] = '\0';
1923 1923
1924 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
1925 if (buf_tmp) {
1926 *buf_tmp = '\0';
1927 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
1928 }
1929
1924 if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { 1930 if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
1925 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); 1931 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1926 if (cnt != 1) { 1932 if (cnt != 1) {
@@ -1996,7 +2002,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1996 goto netdev_ops_write_done; 2002 goto netdev_ops_write_done;
1997 } 2003 }
1998 for (i = 0; i < vsi->num_q_vectors; i++) 2004 for (i = 0; i < vsi->num_q_vectors; i++)
1999 napi_schedule(&vsi->q_vectors[i].napi); 2005 napi_schedule(&vsi->q_vectors[i]->napi);
2000 dev_info(&pf->pdev->dev, "napi called\n"); 2006 dev_info(&pf->pdev->dev, "napi called\n");
2001 } else { 2007 } else {
2002 dev_info(&pf->pdev->dev, "unknown command '%s'\n", 2008 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
@@ -2024,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
2024 **/ 2030 **/
2025void i40e_dbg_pf_init(struct i40e_pf *pf) 2031void i40e_dbg_pf_init(struct i40e_pf *pf)
2026{ 2032{
2027 struct dentry *pfile __attribute__((unused)); 2033 struct dentry *pfile;
2028 const char *name = pci_name(pf->pdev); 2034 const char *name = pci_name(pf->pdev);
2035 const struct device *dev = &pf->pdev->dev;
2029 2036
2030 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); 2037 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
2031 if (pf->i40e_dbg_pf) { 2038 if (!pf->i40e_dbg_pf)
2032 pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, 2039 return;
2033 pf, &i40e_dbg_command_fops); 2040
2034 pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, 2041 pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
2035 &i40e_dbg_dump_fops); 2042 &i40e_dbg_command_fops);
2036 pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, 2043 if (!pfile)
2037 pf, &i40e_dbg_netdev_ops_fops); 2044 goto create_failed;
2038 } else { 2045
2039 dev_info(&pf->pdev->dev, 2046 pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
2040 "debugfs entry for %s failed\n", name); 2047 &i40e_dbg_dump_fops);
2041 } 2048 if (!pfile)
2049 goto create_failed;
2050
2051 pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
2052 &i40e_dbg_netdev_ops_fops);
2053 if (!pfile)
2054 goto create_failed;
2055
2056 return;
2057
2058create_failed:
2059 dev_info(dev, "debugfs dir/file for %s failed\n", name);
2060 debugfs_remove_recursive(pf->i40e_dbg_pf);
2061 return;
2042} 2062}
2043 2063
2044/** 2064/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a76b8cec76c..1b86138fa9e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
399 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; 399 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
400 ring->rx_mini_max_pending = 0; 400 ring->rx_mini_max_pending = 0;
401 ring->rx_jumbo_max_pending = 0; 401 ring->rx_jumbo_max_pending = 0;
402 ring->rx_pending = vsi->rx_rings[0].count; 402 ring->rx_pending = vsi->rx_rings[0]->count;
403 ring->tx_pending = vsi->tx_rings[0].count; 403 ring->tx_pending = vsi->tx_rings[0]->count;
404 ring->rx_mini_pending = 0; 404 ring->rx_mini_pending = 0;
405 ring->rx_jumbo_pending = 0; 405 ring->rx_jumbo_pending = 0;
406} 406}
@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
429 new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); 429 new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
430 430
431 /* if nothing to do return success */ 431 /* if nothing to do return success */
432 if ((new_tx_count == vsi->tx_rings[0].count) && 432 if ((new_tx_count == vsi->tx_rings[0]->count) &&
433 (new_rx_count == vsi->rx_rings[0].count)) 433 (new_rx_count == vsi->rx_rings[0]->count))
434 return 0; 434 return 0;
435 435
436 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 436 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
439 if (!netif_running(vsi->netdev)) { 439 if (!netif_running(vsi->netdev)) {
440 /* simple case - set for the next time the netdev is started */ 440 /* simple case - set for the next time the netdev is started */
441 for (i = 0; i < vsi->num_queue_pairs; i++) { 441 for (i = 0; i < vsi->num_queue_pairs; i++) {
442 vsi->tx_rings[i].count = new_tx_count; 442 vsi->tx_rings[i]->count = new_tx_count;
443 vsi->rx_rings[i].count = new_rx_count; 443 vsi->rx_rings[i]->count = new_rx_count;
444 } 444 }
445 goto done; 445 goto done;
446 } 446 }
@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
451 */ 451 */
452 452
453 /* alloc updated Tx resources */ 453 /* alloc updated Tx resources */
454 if (new_tx_count != vsi->tx_rings[0].count) { 454 if (new_tx_count != vsi->tx_rings[0]->count) {
455 netdev_info(netdev, 455 netdev_info(netdev,
456 "Changing Tx descriptor count from %d to %d.\n", 456 "Changing Tx descriptor count from %d to %d.\n",
457 vsi->tx_rings[0].count, new_tx_count); 457 vsi->tx_rings[0]->count, new_tx_count);
458 tx_rings = kcalloc(vsi->alloc_queue_pairs, 458 tx_rings = kcalloc(vsi->alloc_queue_pairs,
459 sizeof(struct i40e_ring), GFP_KERNEL); 459 sizeof(struct i40e_ring), GFP_KERNEL);
460 if (!tx_rings) { 460 if (!tx_rings) {
@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
464 464
465 for (i = 0; i < vsi->num_queue_pairs; i++) { 465 for (i = 0; i < vsi->num_queue_pairs; i++) {
466 /* clone ring and setup updated count */ 466 /* clone ring and setup updated count */
467 tx_rings[i] = vsi->tx_rings[i]; 467 tx_rings[i] = *vsi->tx_rings[i];
468 tx_rings[i].count = new_tx_count; 468 tx_rings[i].count = new_tx_count;
469 err = i40e_setup_tx_descriptors(&tx_rings[i]); 469 err = i40e_setup_tx_descriptors(&tx_rings[i]);
470 if (err) { 470 if (err) {
@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
481 } 481 }
482 482
483 /* alloc updated Rx resources */ 483 /* alloc updated Rx resources */
484 if (new_rx_count != vsi->rx_rings[0].count) { 484 if (new_rx_count != vsi->rx_rings[0]->count) {
485 netdev_info(netdev, 485 netdev_info(netdev,
486 "Changing Rx descriptor count from %d to %d\n", 486 "Changing Rx descriptor count from %d to %d\n",
487 vsi->rx_rings[0].count, new_rx_count); 487 vsi->rx_rings[0]->count, new_rx_count);
488 rx_rings = kcalloc(vsi->alloc_queue_pairs, 488 rx_rings = kcalloc(vsi->alloc_queue_pairs,
489 sizeof(struct i40e_ring), GFP_KERNEL); 489 sizeof(struct i40e_ring), GFP_KERNEL);
490 if (!rx_rings) { 490 if (!rx_rings) {
@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
494 494
495 for (i = 0; i < vsi->num_queue_pairs; i++) { 495 for (i = 0; i < vsi->num_queue_pairs; i++) {
496 /* clone ring and setup updated count */ 496 /* clone ring and setup updated count */
497 rx_rings[i] = vsi->rx_rings[i]; 497 rx_rings[i] = *vsi->rx_rings[i];
498 rx_rings[i].count = new_rx_count; 498 rx_rings[i].count = new_rx_count;
499 err = i40e_setup_rx_descriptors(&rx_rings[i]); 499 err = i40e_setup_rx_descriptors(&rx_rings[i]);
500 if (err) { 500 if (err) {
@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
517 517
518 if (tx_rings) { 518 if (tx_rings) {
519 for (i = 0; i < vsi->num_queue_pairs; i++) { 519 for (i = 0; i < vsi->num_queue_pairs; i++) {
520 i40e_free_tx_resources(&vsi->tx_rings[i]); 520 i40e_free_tx_resources(vsi->tx_rings[i]);
521 vsi->tx_rings[i] = tx_rings[i]; 521 *vsi->tx_rings[i] = tx_rings[i];
522 } 522 }
523 kfree(tx_rings); 523 kfree(tx_rings);
524 tx_rings = NULL; 524 tx_rings = NULL;
@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
526 526
527 if (rx_rings) { 527 if (rx_rings) {
528 for (i = 0; i < vsi->num_queue_pairs; i++) { 528 for (i = 0; i < vsi->num_queue_pairs; i++) {
529 i40e_free_rx_resources(&vsi->rx_rings[i]); 529 i40e_free_rx_resources(vsi->rx_rings[i]);
530 vsi->rx_rings[i] = rx_rings[i]; 530 *vsi->rx_rings[i] = rx_rings[i];
531 } 531 }
532 kfree(rx_rings); 532 kfree(rx_rings);
533 rx_rings = NULL; 533 rx_rings = NULL;
@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
579 char *p; 579 char *p;
580 int j; 580 int j;
581 struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); 581 struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
582 unsigned int start;
582 583
583 i40e_update_stats(vsi); 584 i40e_update_stats(vsi);
584 585
@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
587 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == 588 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
588 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 589 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
589 } 590 }
590 for (j = 0; j < vsi->num_queue_pairs; j++) { 591 rcu_read_lock();
591 data[i++] = vsi->tx_rings[j].tx_stats.packets; 592 for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
592 data[i++] = vsi->tx_rings[j].tx_stats.bytes; 593 struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
593 } 594 struct i40e_ring *rx_ring;
594 for (j = 0; j < vsi->num_queue_pairs; j++) { 595
595 data[i++] = vsi->rx_rings[j].rx_stats.packets; 596 if (!tx_ring)
596 data[i++] = vsi->rx_rings[j].rx_stats.bytes; 597 continue;
598
599 /* process Tx ring statistics */
600 do {
601 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
602 data[i] = tx_ring->stats.packets;
603 data[i + 1] = tx_ring->stats.bytes;
604 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
605
606 /* Rx ring is the 2nd half of the queue pair */
607 rx_ring = &tx_ring[1];
608 do {
609 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
610 data[i + 2] = rx_ring->stats.packets;
611 data[i + 3] = rx_ring->stats.bytes;
612 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
597 } 613 }
614 rcu_read_unlock();
598 if (vsi == pf->vsi[pf->lan_vsi]) { 615 if (vsi == pf->vsi[pf->lan_vsi]) {
599 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { 616 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
600 p = (char *)pf + i40e_gstrings_stats[j].stat_offset; 617 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
641 p += ETH_GSTRING_LEN; 658 p += ETH_GSTRING_LEN;
642 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); 659 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
643 p += ETH_GSTRING_LEN; 660 p += ETH_GSTRING_LEN;
644 }
645 for (i = 0; i < vsi->num_queue_pairs; i++) {
646 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); 661 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
647 p += ETH_GSTRING_LEN; 662 p += ETH_GSTRING_LEN;
648 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); 663 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
910 } 925 }
911 926
912 vector = vsi->base_vector; 927 vector = vsi->base_vector;
913 q_vector = vsi->q_vectors; 928 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
914 for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) { 929 q_vector = vsi->q_vectors[i];
915 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 930 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
916 wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); 931 wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
917 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 932 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 221aa4795017..41a79df373d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
36 36
37#define DRV_VERSION_MAJOR 0 37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3 38#define DRV_VERSION_MINOR 3
39#define DRV_VERSION_BUILD 9 39#define DRV_VERSION_BUILD 11
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \ 41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN 42 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
347 **/ 347 **/
348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
349 struct net_device *netdev, 349 struct net_device *netdev,
350 struct rtnl_link_stats64 *storage) 350 struct rtnl_link_stats64 *stats)
351{ 351{
352 struct i40e_netdev_priv *np = netdev_priv(netdev); 352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_vsi *vsi = np->vsi; 353 struct i40e_vsi *vsi = np->vsi;
354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
355 int i;
356
357 rcu_read_lock();
358 for (i = 0; i < vsi->num_queue_pairs; i++) {
359 struct i40e_ring *tx_ring, *rx_ring;
360 u64 bytes, packets;
361 unsigned int start;
362
363 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
364 if (!tx_ring)
365 continue;
354 366
355 *storage = *i40e_get_vsi_stats_struct(vsi); 367 do {
368 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
369 packets = tx_ring->stats.packets;
370 bytes = tx_ring->stats.bytes;
371 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
372
373 stats->tx_packets += packets;
374 stats->tx_bytes += bytes;
375 rx_ring = &tx_ring[1];
356 376
357 return storage; 377 do {
378 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
379 packets = rx_ring->stats.packets;
380 bytes = rx_ring->stats.bytes;
381 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
382
383 stats->rx_packets += packets;
384 stats->rx_bytes += bytes;
385 }
386 rcu_read_unlock();
387
388 /* following stats updated by ixgbe_watchdog_task() */
389 stats->multicast = vsi_stats->multicast;
390 stats->tx_errors = vsi_stats->tx_errors;
391 stats->tx_dropped = vsi_stats->tx_dropped;
392 stats->rx_errors = vsi_stats->rx_errors;
393 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
394 stats->rx_length_errors = vsi_stats->rx_length_errors;
395
396 return stats;
358} 397}
359 398
360/** 399/**
@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
376 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 415 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
377 if (vsi->rx_rings) 416 if (vsi->rx_rings)
378 for (i = 0; i < vsi->num_queue_pairs; i++) { 417 for (i = 0; i < vsi->num_queue_pairs; i++) {
379 memset(&vsi->rx_rings[i].rx_stats, 0 , 418 memset(&vsi->rx_rings[i]->stats, 0 ,
380 sizeof(vsi->rx_rings[i].rx_stats)); 419 sizeof(vsi->rx_rings[i]->stats));
381 memset(&vsi->tx_rings[i].tx_stats, 0, 420 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
382 sizeof(vsi->tx_rings[i].tx_stats)); 421 sizeof(vsi->rx_rings[i]->rx_stats));
422 memset(&vsi->tx_rings[i]->stats, 0 ,
423 sizeof(vsi->tx_rings[i]->stats));
424 memset(&vsi->tx_rings[i]->tx_stats, 0,
425 sizeof(vsi->tx_rings[i]->tx_stats));
383 } 426 }
384 vsi->stat_offsets_loaded = false; 427 vsi->stat_offsets_loaded = false;
385} 428}
@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
598 continue; 641 continue;
599 642
600 for (i = 0; i < vsi->num_queue_pairs; i++) { 643 for (i = 0; i < vsi->num_queue_pairs; i++) {
601 struct i40e_ring *ring = &vsi->tx_rings[i]; 644 struct i40e_ring *ring = vsi->tx_rings[i];
602 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 645 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
603 } 646 }
604 } 647 }
@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
652 continue; 695 continue;
653 696
654 for (i = 0; i < vsi->num_queue_pairs; i++) { 697 for (i = 0; i < vsi->num_queue_pairs; i++) {
655 struct i40e_ring *ring = &vsi->tx_rings[i]; 698 struct i40e_ring *ring = vsi->tx_rings[i];
656 699
657 tc = ring->dcb_tc; 700 tc = ring->dcb_tc;
658 if (xoff[tc]) 701 if (xoff[tc])
@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
704 tx_restart = tx_busy = 0; 747 tx_restart = tx_busy = 0;
705 rx_page = 0; 748 rx_page = 0;
706 rx_buf = 0; 749 rx_buf = 0;
750 rcu_read_lock();
707 for (q = 0; q < vsi->num_queue_pairs; q++) { 751 for (q = 0; q < vsi->num_queue_pairs; q++) {
708 struct i40e_ring *p; 752 struct i40e_ring *p;
753 u64 bytes, packets;
754 unsigned int start;
709 755
710 p = &vsi->rx_rings[q]; 756 /* locate Tx ring */
711 rx_b += p->rx_stats.bytes; 757 p = ACCESS_ONCE(vsi->tx_rings[q]);
712 rx_p += p->rx_stats.packets;
713 rx_buf += p->rx_stats.alloc_rx_buff_failed;
714 rx_page += p->rx_stats.alloc_rx_page_failed;
715 758
716 p = &vsi->tx_rings[q]; 759 do {
717 tx_b += p->tx_stats.bytes; 760 start = u64_stats_fetch_begin_bh(&p->syncp);
718 tx_p += p->tx_stats.packets; 761 packets = p->stats.packets;
762 bytes = p->stats.bytes;
763 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
764 tx_b += bytes;
765 tx_p += packets;
719 tx_restart += p->tx_stats.restart_queue; 766 tx_restart += p->tx_stats.restart_queue;
720 tx_busy += p->tx_stats.tx_busy; 767 tx_busy += p->tx_stats.tx_busy;
768
769 /* Rx queue is part of the same block as Tx queue */
770 p = &p[1];
771 do {
772 start = u64_stats_fetch_begin_bh(&p->syncp);
773 packets = p->stats.packets;
774 bytes = p->stats.bytes;
775 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
776 rx_b += bytes;
777 rx_p += packets;
778 rx_buf += p->rx_stats.alloc_rx_buff_failed;
779 rx_page += p->rx_stats.alloc_rx_page_failed;
721 } 780 }
781 rcu_read_unlock();
722 vsi->tx_restart = tx_restart; 782 vsi->tx_restart = tx_restart;
723 vsi->tx_busy = tx_busy; 783 vsi->tx_busy = tx_busy;
724 vsi->rx_page_failed = rx_page; 784 vsi->rx_page_failed = rx_page;
@@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
1988 int i, err = 0; 2048 int i, err = 0;
1989 2049
1990 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2050 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
1991 err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]); 2051 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
1992 2052
1993 return err; 2053 return err;
1994} 2054}
@@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2004 int i; 2064 int i;
2005 2065
2006 for (i = 0; i < vsi->num_queue_pairs; i++) 2066 for (i = 0; i < vsi->num_queue_pairs; i++)
2007 if (vsi->tx_rings[i].desc) 2067 if (vsi->tx_rings[i]->desc)
2008 i40e_free_tx_resources(&vsi->tx_rings[i]); 2068 i40e_free_tx_resources(vsi->tx_rings[i]);
2009} 2069}
2010 2070
2011/** 2071/**
@@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2023 int i, err = 0; 2083 int i, err = 0;
2024 2084
2025 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2085 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2026 err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]); 2086 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2027 return err; 2087 return err;
2028} 2088}
2029 2089
@@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2038 int i; 2098 int i;
2039 2099
2040 for (i = 0; i < vsi->num_queue_pairs; i++) 2100 for (i = 0; i < vsi->num_queue_pairs; i++)
2041 if (vsi->rx_rings[i].desc) 2101 if (vsi->rx_rings[i]->desc)
2042 i40e_free_rx_resources(&vsi->rx_rings[i]); 2102 i40e_free_rx_resources(vsi->rx_rings[i]);
2043} 2103}
2044 2104
2045/** 2105/**
@@ -2114,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
2114 2174
2115 /* Now associate this queue with this PCI function */ 2175 /* Now associate this queue with this PCI function */
2116 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2176 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2117 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) 2177 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2118 & I40E_QTX_CTL_PF_INDX_MASK); 2178 I40E_QTX_CTL_PF_INDX_MASK);
2119 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2179 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2120 i40e_flush(hw); 2180 i40e_flush(hw);
2121 2181
@@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2223 int err = 0; 2283 int err = 0;
2224 u16 i; 2284 u16 i;
2225 2285
2226 for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++) 2286 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2227 err = i40e_configure_tx_ring(&vsi->tx_rings[i]); 2287 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2228 2288
2229 return err; 2289 return err;
2230} 2290}
@@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2274 2334
2275 /* set up individual rings */ 2335 /* set up individual rings */
2276 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2336 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2277 err = i40e_configure_rx_ring(&vsi->rx_rings[i]); 2337 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2278 2338
2279 return err; 2339 return err;
2280} 2340}
@@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2298 qoffset = vsi->tc_config.tc_info[n].qoffset; 2358 qoffset = vsi->tc_config.tc_info[n].qoffset;
2299 qcount = vsi->tc_config.tc_info[n].qcount; 2359 qcount = vsi->tc_config.tc_info[n].qcount;
2300 for (i = qoffset; i < (qoffset + qcount); i++) { 2360 for (i = qoffset; i < (qoffset + qcount); i++) {
2301 struct i40e_ring *rx_ring = &vsi->rx_rings[i]; 2361 struct i40e_ring *rx_ring = vsi->rx_rings[i];
2302 struct i40e_ring *tx_ring = &vsi->tx_rings[i]; 2362 struct i40e_ring *tx_ring = vsi->tx_rings[i];
2303 rx_ring->dcb_tc = n; 2363 rx_ring->dcb_tc = n;
2304 tx_ring->dcb_tc = n; 2364 tx_ring->dcb_tc = n;
2305 } 2365 }
@@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2354 */ 2414 */
2355 qp = vsi->base_queue; 2415 qp = vsi->base_queue;
2356 vector = vsi->base_vector; 2416 vector = vsi->base_vector;
2357 q_vector = vsi->q_vectors; 2417 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2358 for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) { 2418 q_vector = vsi->q_vectors[i];
2359 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2419 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2360 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2420 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2361 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2421 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2435 **/ 2495 **/
2436static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2496static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2437{ 2497{
2438 struct i40e_q_vector *q_vector = vsi->q_vectors; 2498 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2439 struct i40e_pf *pf = vsi->back; 2499 struct i40e_pf *pf = vsi->back;
2440 struct i40e_hw *hw = &pf->hw; 2500 struct i40e_hw *hw = &pf->hw;
2441 u32 val; 2501 u32 val;
@@ -2472,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2472 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2532 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2473 * @pf: board private structure 2533 * @pf: board private structure
2474 **/ 2534 **/
2475static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2535void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2476{ 2536{
2477 struct i40e_hw *hw = &pf->hw; 2537 struct i40e_hw *hw = &pf->hw;
2478 u32 val; 2538 u32 val;
@@ -2500,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2500 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2560 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2501 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2561 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2502 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2562 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2503 i40e_flush(hw); 2563 /* skip the flush */
2504} 2564}
2505 2565
2506/** 2566/**
@@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2512{ 2572{
2513 struct i40e_q_vector *q_vector = data; 2573 struct i40e_q_vector *q_vector = data;
2514 2574
2515 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) 2575 if (!q_vector->tx.ring && !q_vector->rx.ring)
2516 return IRQ_HANDLED; 2576 return IRQ_HANDLED;
2517 2577
2518 napi_schedule(&q_vector->napi); 2578 napi_schedule(&q_vector->napi);
@@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2529{ 2589{
2530 struct i40e_q_vector *q_vector = data; 2590 struct i40e_q_vector *q_vector = data;
2531 2591
2532 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) 2592 if (!q_vector->tx.ring && !q_vector->rx.ring)
2533 return IRQ_HANDLED; 2593 return IRQ_HANDLED;
2534 2594
2535 pr_info("fdir ring cleaning needed\n"); 2595 pr_info("fdir ring cleaning needed\n");
@@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2554 int vector, err; 2614 int vector, err;
2555 2615
2556 for (vector = 0; vector < q_vectors; vector++) { 2616 for (vector = 0; vector < q_vectors; vector++) {
2557 struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]); 2617 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
2558 2618
2559 if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) { 2619 if (q_vector->tx.ring && q_vector->rx.ring) {
2560 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2620 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2561 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2621 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2562 tx_int_idx++; 2622 tx_int_idx++;
2563 } else if (q_vector->rx.ring[0]) { 2623 } else if (q_vector->rx.ring) {
2564 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2624 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2565 "%s-%s-%d", basename, "rx", rx_int_idx++); 2625 "%s-%s-%d", basename, "rx", rx_int_idx++);
2566 } else if (q_vector->tx.ring[0]) { 2626 } else if (q_vector->tx.ring) {
2567 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2627 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2568 "%s-%s-%d", basename, "tx", tx_int_idx++); 2628 "%s-%s-%d", basename, "tx", tx_int_idx++);
2569 } else { 2629 } else {
@@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2611 int i; 2671 int i;
2612 2672
2613 for (i = 0; i < vsi->num_queue_pairs; i++) { 2673 for (i = 0; i < vsi->num_queue_pairs; i++) {
2614 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0); 2674 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2615 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0); 2675 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
2616 } 2676 }
2617 2677
2618 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2678 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2649,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2649 i40e_irq_dynamic_enable_icr0(pf); 2709 i40e_irq_dynamic_enable_icr0(pf);
2650 } 2710 }
2651 2711
2712 i40e_flush(&pf->hw);
2652 return 0; 2713 return 0;
2653} 2714}
2654 2715
@@ -2681,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
2681 2742
2682 icr0 = rd32(hw, I40E_PFINT_ICR0); 2743 icr0 = rd32(hw, I40E_PFINT_ICR0);
2683 2744
2684 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2685 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2686 return IRQ_NONE;
2687
2688 val = rd32(hw, I40E_PFINT_DYN_CTL0); 2745 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2689 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; 2746 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2690 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2747 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2691 2748
2749 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2750 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2751 return IRQ_NONE;
2752
2692 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 2753 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2693 2754
2694 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 2755 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2702,10 +2763,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
2702 qval = rd32(hw, I40E_QINT_TQCTL(0)); 2763 qval = rd32(hw, I40E_QINT_TQCTL(0));
2703 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 2764 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2704 wr32(hw, I40E_QINT_TQCTL(0), qval); 2765 wr32(hw, I40E_QINT_TQCTL(0), qval);
2705 i40e_flush(hw);
2706 2766
2707 if (!test_bit(__I40E_DOWN, &pf->state)) 2767 if (!test_bit(__I40E_DOWN, &pf->state))
2708 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi); 2768 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
2709 } 2769 }
2710 2770
2711 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 2771 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2764,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
2764 2824
2765 /* re-enable interrupt causes */ 2825 /* re-enable interrupt causes */
2766 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 2826 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
2767 i40e_flush(hw);
2768 if (!test_bit(__I40E_DOWN, &pf->state)) { 2827 if (!test_bit(__I40E_DOWN, &pf->state)) {
2769 i40e_service_event_schedule(pf); 2828 i40e_service_event_schedule(pf);
2770 i40e_irq_dynamic_enable_icr0(pf); 2829 i40e_irq_dynamic_enable_icr0(pf);
@@ -2774,40 +2833,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
2774} 2833}
2775 2834
2776/** 2835/**
2777 * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector 2836 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
2778 * @vsi: the VSI being configured
2779 * @v_idx: vector index
2780 * @r_idx: rx queue index
2781 **/
2782static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
2783{
2784 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
2785 struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
2786
2787 rx_ring->q_vector = q_vector;
2788 q_vector->rx.ring[q_vector->rx.count] = rx_ring;
2789 q_vector->rx.count++;
2790 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2791 q_vector->vsi = vsi;
2792}
2793
2794/**
2795 * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
2796 * @vsi: the VSI being configured 2837 * @vsi: the VSI being configured
2797 * @v_idx: vector index 2838 * @v_idx: vector index
2798 * @t_idx: tx queue index 2839 * @qp_idx: queue pair index
2799 **/ 2840 **/
2800static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx) 2841static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
2801{ 2842{
2802 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); 2843 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
2803 struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]); 2844 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
2845 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
2804 2846
2805 tx_ring->q_vector = q_vector; 2847 tx_ring->q_vector = q_vector;
2806 q_vector->tx.ring[q_vector->tx.count] = tx_ring; 2848 tx_ring->next = q_vector->tx.ring;
2849 q_vector->tx.ring = tx_ring;
2807 q_vector->tx.count++; 2850 q_vector->tx.count++;
2808 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2851
2809 q_vector->num_ringpairs++; 2852 rx_ring->q_vector = q_vector;
2810 q_vector->vsi = vsi; 2853 rx_ring->next = q_vector->rx.ring;
2854 q_vector->rx.ring = rx_ring;
2855 q_vector->rx.count++;
2811} 2856}
2812 2857
2813/** 2858/**
@@ -2823,7 +2868,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2823{ 2868{
2824 int qp_remaining = vsi->num_queue_pairs; 2869 int qp_remaining = vsi->num_queue_pairs;
2825 int q_vectors = vsi->num_q_vectors; 2870 int q_vectors = vsi->num_q_vectors;
2826 int qp_per_vector; 2871 int num_ringpairs;
2827 int v_start = 0; 2872 int v_start = 0;
2828 int qp_idx = 0; 2873 int qp_idx = 0;
2829 2874
@@ -2831,11 +2876,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2831 * group them so there are multiple queues per vector. 2876 * group them so there are multiple queues per vector.
2832 */ 2877 */
2833 for (; v_start < q_vectors && qp_remaining; v_start++) { 2878 for (; v_start < q_vectors && qp_remaining; v_start++) {
2834 qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 2879 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
2835 for (; qp_per_vector; 2880
2836 qp_per_vector--, qp_idx++, qp_remaining--) { 2881 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2837 map_vector_to_rxq(vsi, v_start, qp_idx); 2882
2838 map_vector_to_txq(vsi, v_start, qp_idx); 2883 q_vector->num_ringpairs = num_ringpairs;
2884
2885 q_vector->rx.count = 0;
2886 q_vector->tx.count = 0;
2887 q_vector->rx.ring = NULL;
2888 q_vector->tx.ring = NULL;
2889
2890 while (num_ringpairs--) {
2891 map_vector_to_qp(vsi, v_start, qp_idx);
2892 qp_idx++;
2893 qp_remaining--;
2839 } 2894 }
2840 } 2895 }
2841} 2896}
@@ -2887,7 +2942,7 @@ static void i40e_netpoll(struct net_device *netdev)
2887 pf->flags |= I40E_FLAG_IN_NETPOLL; 2942 pf->flags |= I40E_FLAG_IN_NETPOLL;
2888 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2943 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2889 for (i = 0; i < vsi->num_q_vectors; i++) 2944 for (i = 0; i < vsi->num_q_vectors; i++)
2890 i40e_msix_clean_rings(0, &vsi->q_vectors[i]); 2945 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
2891 } else { 2946 } else {
2892 i40e_intr(pf->pdev->irq, netdev); 2947 i40e_intr(pf->pdev->irq, netdev);
2893 } 2948 }
@@ -3073,14 +3128,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3073 u16 vector = i + base; 3128 u16 vector = i + base;
3074 3129
3075 /* free only the irqs that were actually requested */ 3130 /* free only the irqs that were actually requested */
3076 if (vsi->q_vectors[i].num_ringpairs == 0) 3131 if (vsi->q_vectors[i]->num_ringpairs == 0)
3077 continue; 3132 continue;
3078 3133
3079 /* clear the affinity_mask in the IRQ descriptor */ 3134 /* clear the affinity_mask in the IRQ descriptor */
3080 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3135 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3081 NULL); 3136 NULL);
3082 free_irq(pf->msix_entries[vector].vector, 3137 free_irq(pf->msix_entries[vector].vector,
3083 &vsi->q_vectors[i]); 3138 vsi->q_vectors[i]);
3084 3139
3085 /* Tear down the interrupt queue link list 3140 /* Tear down the interrupt queue link list
3086 * 3141 *
@@ -3164,6 +3219,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3164} 3219}
3165 3220
3166/** 3221/**
3222 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3223 * @vsi: the VSI being configured
3224 * @v_idx: Index of vector to be freed
3225 *
3226 * This function frees the memory allocated to the q_vector. In addition if
3227 * NAPI is enabled it will delete any references to the NAPI struct prior
3228 * to freeing the q_vector.
3229 **/
3230static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3231{
3232 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3233 struct i40e_ring *ring;
3234
3235 if (!q_vector)
3236 return;
3237
3238 /* disassociate q_vector from rings */
3239 i40e_for_each_ring(ring, q_vector->tx)
3240 ring->q_vector = NULL;
3241
3242 i40e_for_each_ring(ring, q_vector->rx)
3243 ring->q_vector = NULL;
3244
3245 /* only VSI w/ an associated netdev is set up w/ NAPI */
3246 if (vsi->netdev)
3247 netif_napi_del(&q_vector->napi);
3248
3249 vsi->q_vectors[v_idx] = NULL;
3250
3251 kfree_rcu(q_vector, rcu);
3252}
3253
3254/**
3167 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3255 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3168 * @vsi: the VSI being un-configured 3256 * @vsi: the VSI being un-configured
3169 * 3257 *
@@ -3174,24 +3262,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3174{ 3262{
3175 int v_idx; 3263 int v_idx;
3176 3264
3177 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { 3265 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3178 struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx]; 3266 i40e_free_q_vector(vsi, v_idx);
3179 int r_idx;
3180
3181 if (!q_vector)
3182 continue;
3183
3184 /* disassociate q_vector from rings */
3185 for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
3186 q_vector->tx.ring[r_idx]->q_vector = NULL;
3187 for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
3188 q_vector->rx.ring[r_idx]->q_vector = NULL;
3189
3190 /* only VSI w/ an associated netdev is set up w/ NAPI */
3191 if (vsi->netdev)
3192 netif_napi_del(&q_vector->napi);
3193 }
3194 kfree(vsi->q_vectors);
3195} 3267}
3196 3268
3197/** 3269/**
@@ -3241,7 +3313,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3241 return; 3313 return;
3242 3314
3243 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3315 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3244 napi_enable(&vsi->q_vectors[q_idx].napi); 3316 napi_enable(&vsi->q_vectors[q_idx]->napi);
3245} 3317}
3246 3318
3247/** 3319/**
@@ -3256,7 +3328,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3256 return; 3328 return;
3257 3329
3258 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3330 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3259 napi_disable(&vsi->q_vectors[q_idx].napi); 3331 napi_disable(&vsi->q_vectors[q_idx]->napi);
3260} 3332}
3261 3333
3262/** 3334/**
@@ -3703,8 +3775,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
3703 3775
3704 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 3776 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3705 (vsi->netdev)) { 3777 (vsi->netdev)) {
3778 netdev_info(vsi->netdev, "NIC Link is Up\n");
3706 netif_tx_start_all_queues(vsi->netdev); 3779 netif_tx_start_all_queues(vsi->netdev);
3707 netif_carrier_on(vsi->netdev); 3780 netif_carrier_on(vsi->netdev);
3781 } else if (vsi->netdev) {
3782 netdev_info(vsi->netdev, "NIC Link is Down\n");
3708 } 3783 }
3709 i40e_service_event_schedule(pf); 3784 i40e_service_event_schedule(pf);
3710 3785
@@ -3772,8 +3847,8 @@ void i40e_down(struct i40e_vsi *vsi)
3772 i40e_napi_disable_all(vsi); 3847 i40e_napi_disable_all(vsi);
3773 3848
3774 for (i = 0; i < vsi->num_queue_pairs; i++) { 3849 for (i = 0; i < vsi->num_queue_pairs; i++) {
3775 i40e_clean_tx_ring(&vsi->tx_rings[i]); 3850 i40e_clean_tx_ring(vsi->tx_rings[i]);
3776 i40e_clean_rx_ring(&vsi->rx_rings[i]); 3851 i40e_clean_rx_ring(vsi->rx_rings[i]);
3777 } 3852 }
3778} 3853}
3779 3854
@@ -4153,8 +4228,9 @@ static void i40e_link_event(struct i40e_pf *pf)
4153 if (new_link == old_link) 4228 if (new_link == old_link)
4154 return; 4229 return;
4155 4230
4156 netdev_info(pf->vsi[pf->lan_vsi]->netdev, 4231 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4157 "NIC Link is %s\n", (new_link ? "Up" : "Down")); 4232 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4233 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4158 4234
4159 /* Notify the base of the switch tree connected to 4235 /* Notify the base of the switch tree connected to
4160 * the link. Floating VEBs are not notified. 4236 * the link. Floating VEBs are not notified.
@@ -4199,9 +4275,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
4199 continue; 4275 continue;
4200 4276
4201 for (i = 0; i < vsi->num_queue_pairs; i++) { 4277 for (i = 0; i < vsi->num_queue_pairs; i++) {
4202 set_check_for_tx_hang(&vsi->tx_rings[i]); 4278 set_check_for_tx_hang(vsi->tx_rings[i]);
4203 if (test_bit(__I40E_HANG_CHECK_ARMED, 4279 if (test_bit(__I40E_HANG_CHECK_ARMED,
4204 &vsi->tx_rings[i].state)) 4280 &vsi->tx_rings[i]->state))
4205 armed++; 4281 armed++;
4206 } 4282 }
4207 4283
@@ -4537,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
4537 bool new_vsi = false; 4613 bool new_vsi = false;
4538 int err, i; 4614 int err, i;
4539 4615
4540 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED))) 4616 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
4617 I40E_FLAG_FDIR_ATR_ENABLED)))
4541 return; 4618 return;
4542 4619
4543 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 4620 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4937{ 5014{
4938 int ret = -ENODEV; 5015 int ret = -ENODEV;
4939 struct i40e_vsi *vsi; 5016 struct i40e_vsi *vsi;
5017 int sz_vectors;
5018 int sz_rings;
4940 int vsi_idx; 5019 int vsi_idx;
4941 int i; 5020 int i;
4942 5021
@@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4962 vsi_idx = i; /* Found one! */ 5041 vsi_idx = i; /* Found one! */
4963 } else { 5042 } else {
4964 ret = -ENODEV; 5043 ret = -ENODEV;
4965 goto err_alloc_vsi; /* out of VSI slots! */ 5044 goto unlock_pf; /* out of VSI slots! */
4966 } 5045 }
4967 pf->next_vsi = ++i; 5046 pf->next_vsi = ++i;
4968 5047
4969 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 5048 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
4970 if (!vsi) { 5049 if (!vsi) {
4971 ret = -ENOMEM; 5050 ret = -ENOMEM;
4972 goto err_alloc_vsi; 5051 goto unlock_pf;
4973 } 5052 }
4974 vsi->type = type; 5053 vsi->type = type;
4975 vsi->back = pf; 5054 vsi->back = pf;
@@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4982 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 5061 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
4983 INIT_LIST_HEAD(&vsi->mac_filter_list); 5062 INIT_LIST_HEAD(&vsi->mac_filter_list);
4984 5063
4985 i40e_set_num_rings_in_vsi(vsi); 5064 ret = i40e_set_num_rings_in_vsi(vsi);
5065 if (ret)
5066 goto err_rings;
5067
5068 /* allocate memory for ring pointers */
5069 sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5070 vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
5071 if (!vsi->tx_rings) {
5072 ret = -ENOMEM;
5073 goto err_rings;
5074 }
5075 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5076
5077 /* allocate memory for q_vector pointers */
5078 sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5079 vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
5080 if (!vsi->q_vectors) {
5081 ret = -ENOMEM;
5082 goto err_vectors;
5083 }
4986 5084
4987 /* Setup default MSIX irq handler for VSI */ 5085 /* Setup default MSIX irq handler for VSI */
4988 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 5086 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
4989 5087
4990 pf->vsi[vsi_idx] = vsi; 5088 pf->vsi[vsi_idx] = vsi;
4991 ret = vsi_idx; 5089 ret = vsi_idx;
4992err_alloc_vsi: 5090 goto unlock_pf;
5091
5092err_vectors:
5093 kfree(vsi->tx_rings);
5094err_rings:
5095 pf->next_vsi = i - 1;
5096 kfree(vsi);
5097unlock_pf:
4993 mutex_unlock(&pf->switch_mutex); 5098 mutex_unlock(&pf->switch_mutex);
4994 return ret; 5099 return ret;
4995} 5100}
@@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
5030 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 5135 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5031 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 5136 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5032 5137
5138 /* free the ring and vector containers */
5139 kfree(vsi->q_vectors);
5140 kfree(vsi->tx_rings);
5141
5033 pf->vsi[vsi->idx] = NULL; 5142 pf->vsi[vsi->idx] = NULL;
5034 if (vsi->idx < pf->next_vsi) 5143 if (vsi->idx < pf->next_vsi)
5035 pf->next_vsi = vsi->idx; 5144 pf->next_vsi = vsi->idx;
@@ -5043,34 +5152,40 @@ free_vsi:
5043} 5152}
5044 5153
5045/** 5154/**
5155 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5156 * @vsi: the VSI being cleaned
5157 **/
5158static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5159{
5160 int i;
5161
5162 if (vsi->tx_rings[0])
5163 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5164 kfree_rcu(vsi->tx_rings[i], rcu);
5165 vsi->tx_rings[i] = NULL;
5166 vsi->rx_rings[i] = NULL;
5167 }
5168
5169 return 0;
5170}
5171
5172/**
5046 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 5173 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5047 * @vsi: the VSI being configured 5174 * @vsi: the VSI being configured
5048 **/ 5175 **/
5049static int i40e_alloc_rings(struct i40e_vsi *vsi) 5176static int i40e_alloc_rings(struct i40e_vsi *vsi)
5050{ 5177{
5051 struct i40e_pf *pf = vsi->back; 5178 struct i40e_pf *pf = vsi->back;
5052 int ret = 0;
5053 int i; 5179 int i;
5054 5180
5055 vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
5056 sizeof(struct i40e_ring), GFP_KERNEL);
5057 if (!vsi->rx_rings) {
5058 ret = -ENOMEM;
5059 goto err_alloc_rings;
5060 }
5061
5062 vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
5063 sizeof(struct i40e_ring), GFP_KERNEL);
5064 if (!vsi->tx_rings) {
5065 ret = -ENOMEM;
5066 kfree(vsi->rx_rings);
5067 goto err_alloc_rings;
5068 }
5069
5070 /* Set basic values in the rings to be used later during open() */ 5181 /* Set basic values in the rings to be used later during open() */
5071 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5182 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5072 struct i40e_ring *rx_ring = &vsi->rx_rings[i]; 5183 struct i40e_ring *tx_ring;
5073 struct i40e_ring *tx_ring = &vsi->tx_rings[i]; 5184 struct i40e_ring *rx_ring;
5185
5186 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5187 if (!tx_ring)
5188 goto err_out;
5074 5189
5075 tx_ring->queue_index = i; 5190 tx_ring->queue_index = i;
5076 tx_ring->reg_idx = vsi->base_queue + i; 5191 tx_ring->reg_idx = vsi->base_queue + i;
@@ -5081,7 +5196,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
5081 tx_ring->count = vsi->num_desc; 5196 tx_ring->count = vsi->num_desc;
5082 tx_ring->size = 0; 5197 tx_ring->size = 0;
5083 tx_ring->dcb_tc = 0; 5198 tx_ring->dcb_tc = 0;
5199 vsi->tx_rings[i] = tx_ring;
5084 5200
5201 rx_ring = &tx_ring[1];
5085 rx_ring->queue_index = i; 5202 rx_ring->queue_index = i;
5086 rx_ring->reg_idx = vsi->base_queue + i; 5203 rx_ring->reg_idx = vsi->base_queue + i;
5087 rx_ring->ring_active = false; 5204 rx_ring->ring_active = false;
@@ -5095,24 +5212,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
5095 set_ring_16byte_desc_enabled(rx_ring); 5212 set_ring_16byte_desc_enabled(rx_ring);
5096 else 5213 else
5097 clear_ring_16byte_desc_enabled(rx_ring); 5214 clear_ring_16byte_desc_enabled(rx_ring);
5098 } 5215 vsi->rx_rings[i] = rx_ring;
5099
5100err_alloc_rings:
5101 return ret;
5102}
5103
5104/**
5105 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5106 * @vsi: the VSI being cleaned
5107 **/
5108static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5109{
5110 if (vsi) {
5111 kfree(vsi->rx_rings);
5112 kfree(vsi->tx_rings);
5113 } 5216 }
5114 5217
5115 return 0; 5218 return 0;
5219
5220err_out:
5221 i40e_vsi_clear_rings(vsi);
5222 return -ENOMEM;
5116} 5223}
5117 5224
5118/** 5225/**
@@ -5249,6 +5356,38 @@ static int i40e_init_msix(struct i40e_pf *pf)
5249} 5356}
5250 5357
5251/** 5358/**
5359 * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
5360 * @vsi: the VSI being configured
5361 * @v_idx: index of the vector in the vsi struct
5362 *
5363 * We allocate one q_vector. If allocation fails we return -ENOMEM.
5364 **/
5365static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
5366{
5367 struct i40e_q_vector *q_vector;
5368
5369 /* allocate q_vector */
5370 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
5371 if (!q_vector)
5372 return -ENOMEM;
5373
5374 q_vector->vsi = vsi;
5375 q_vector->v_idx = v_idx;
5376 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
5377 if (vsi->netdev)
5378 netif_napi_add(vsi->netdev, &q_vector->napi,
5379 i40e_napi_poll, vsi->work_limit);
5380
5381 q_vector->rx.latency_range = I40E_LOW_LATENCY;
5382 q_vector->tx.latency_range = I40E_LOW_LATENCY;
5383
5384 /* tie q_vector and vsi together */
5385 vsi->q_vectors[v_idx] = q_vector;
5386
5387 return 0;
5388}
5389
5390/**
5252 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors 5391 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5253 * @vsi: the VSI being configured 5392 * @vsi: the VSI being configured
5254 * 5393 *
@@ -5259,6 +5398,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5259{ 5398{
5260 struct i40e_pf *pf = vsi->back; 5399 struct i40e_pf *pf = vsi->back;
5261 int v_idx, num_q_vectors; 5400 int v_idx, num_q_vectors;
5401 int err;
5262 5402
5263 /* if not MSIX, give the one vector only to the LAN VSI */ 5403 /* if not MSIX, give the one vector only to the LAN VSI */
5264 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5404 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5268,22 +5408,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5268 else 5408 else
5269 return -EINVAL; 5409 return -EINVAL;
5270 5410
5271 vsi->q_vectors = kcalloc(num_q_vectors,
5272 sizeof(struct i40e_q_vector),
5273 GFP_KERNEL);
5274 if (!vsi->q_vectors)
5275 return -ENOMEM;
5276
5277 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 5411 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
5278 vsi->q_vectors[v_idx].vsi = vsi; 5412 err = i40e_alloc_q_vector(vsi, v_idx);
5279 vsi->q_vectors[v_idx].v_idx = v_idx; 5413 if (err)
5280 cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask); 5414 goto err_out;
5281 if (vsi->netdev)
5282 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
5283 i40e_napi_poll, vsi->work_limit);
5284 } 5415 }
5285 5416
5286 return 0; 5417 return 0;
5418
5419err_out:
5420 while (v_idx--)
5421 i40e_free_q_vector(vsi, v_idx);
5422
5423 return err;
5287} 5424}
5288 5425
5289/** 5426/**
@@ -5297,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5297 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 5434 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5298 err = i40e_init_msix(pf); 5435 err = i40e_init_msix(pf);
5299 if (err) { 5436 if (err) {
5300 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 5437 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
5438 I40E_FLAG_RSS_ENABLED |
5301 I40E_FLAG_MQ_ENABLED | 5439 I40E_FLAG_MQ_ENABLED |
5302 I40E_FLAG_DCB_ENABLED | 5440 I40E_FLAG_DCB_ENABLED |
5303 I40E_FLAG_SRIOV_ENABLED | 5441 I40E_FLAG_SRIOV_ENABLED |
@@ -5312,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5312 5450
5313 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 5451 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5314 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 5452 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
5453 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
5315 err = pci_enable_msi(pf->pdev); 5454 err = pci_enable_msi(pf->pdev);
5316 if (err) { 5455 if (err) {
5317 dev_info(&pf->pdev->dev, 5456 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
5318 "MSI init failed (%d), trying legacy.\n", err);
5319 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 5457 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5320 } 5458 }
5321 } 5459 }
5322 5460
5461 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
5462 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
5463
5323 /* track first vector for misc interrupts */ 5464 /* track first vector for misc interrupts */
5324 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 5465 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5325} 5466}
@@ -5950,7 +6091,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
5950 int ret = -ENOENT; 6091 int ret = -ENOENT;
5951 struct i40e_pf *pf = vsi->back; 6092 struct i40e_pf *pf = vsi->back;
5952 6093
5953 if (vsi->q_vectors) { 6094 if (vsi->q_vectors[0]) {
5954 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 6095 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
5955 vsi->seid); 6096 vsi->seid);
5956 return -EEXIST; 6097 return -EEXIST;
@@ -5972,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
5972 goto vector_setup_out; 6113 goto vector_setup_out;
5973 } 6114 }
5974 6115
5975 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 6116 if (vsi->num_q_vectors)
5976 vsi->num_q_vectors, vsi->idx); 6117 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
6118 vsi->num_q_vectors, vsi->idx);
5977 if (vsi->base_vector < 0) { 6119 if (vsi->base_vector < 0) {
5978 dev_info(&pf->pdev->dev, 6120 dev_info(&pf->pdev->dev,
5979 "failed to get q tracking for VSI %d, err=%d\n", 6121 "failed to get q tracking for VSI %d, err=%d\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 49d2cfa9b0cc..f1f03bc5c729 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
37 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); 37 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
38} 38}
39 39
40#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
40/** 41/**
41 * i40e_program_fdir_filter - Program a Flow Director filter 42 * i40e_program_fdir_filter - Program a Flow Director filter
42 * @fdir_input: Packet data that will be filter parameters 43 * @fdir_input: Packet data that will be filter parameters
@@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
50 struct i40e_tx_buffer *tx_buf; 51 struct i40e_tx_buffer *tx_buf;
51 struct i40e_tx_desc *tx_desc; 52 struct i40e_tx_desc *tx_desc;
52 struct i40e_ring *tx_ring; 53 struct i40e_ring *tx_ring;
54 unsigned int fpt, dcc;
53 struct i40e_vsi *vsi; 55 struct i40e_vsi *vsi;
54 struct device *dev; 56 struct device *dev;
55 dma_addr_t dma; 57 dma_addr_t dma;
@@ -64,93 +66,78 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
64 if (!vsi) 66 if (!vsi)
65 return -ENOENT; 67 return -ENOENT;
66 68
67 tx_ring = &vsi->tx_rings[0]; 69 tx_ring = vsi->tx_rings[0];
68 dev = tx_ring->dev; 70 dev = tx_ring->dev;
69 71
70 dma = dma_map_single(dev, fdir_data->raw_packet, 72 dma = dma_map_single(dev, fdir_data->raw_packet,
71 I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); 73 I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
72 if (dma_mapping_error(dev, dma)) 74 if (dma_mapping_error(dev, dma))
73 goto dma_fail; 75 goto dma_fail;
74 76
75 /* grab the next descriptor */ 77 /* grab the next descriptor */
76 fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); 78 i = tx_ring->next_to_use;
77 tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; 79 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
78 tx_ring->next_to_use++; 80 tx_buf = &tx_ring->tx_bi[i];
79 if (tx_ring->next_to_use == tx_ring->count) 81
80 tx_ring->next_to_use = 0; 82 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
81 83
82 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index 84 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
83 << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) 85 I40E_TXD_FLTR_QW0_QINDEX_MASK;
84 & I40E_TXD_FLTR_QW0_QINDEX_MASK);
85 86
86 fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off 87 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
87 << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) 88 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
88 & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
89 89
90 fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype 90 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
91 << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) 91 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
92 & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
93 92
94 /* Use LAN VSI Id if not programmed by user */ 93 /* Use LAN VSI Id if not programmed by user */
95 if (fdir_data->dest_vsi == 0) 94 if (fdir_data->dest_vsi == 0)
96 fdir_desc->qindex_flex_ptype_vsi |= 95 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
97 cpu_to_le32((pf->vsi[pf->lan_vsi]->id) 96 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
98 << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
99 else 97 else
100 fdir_desc->qindex_flex_ptype_vsi |= 98 fpt |= ((u32)fdir_data->dest_vsi <<
101 cpu_to_le32((fdir_data->dest_vsi 99 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
102 << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) 100 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
103 & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
104 101
105 fdir_desc->dtype_cmd_cntindex = 102 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
106 cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG); 103
104 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
107 105
108 if (add) 106 if (add)
109 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( 107 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
110 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE 108 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
111 << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
112 else 109 else
113 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( 110 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
114 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE 111 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
115 << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
116 112
117 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl 113 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
118 << I40E_TXD_FLTR_QW1_DEST_SHIFT) 114 I40E_TXD_FLTR_QW1_DEST_MASK;
119 & I40E_TXD_FLTR_QW1_DEST_MASK);
120 115
121 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( 116 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
122 (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) 117 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
123 & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
124 118
125 if (fdir_data->cnt_index != 0) { 119 if (fdir_data->cnt_index != 0) {
126 fdir_desc->dtype_cmd_cntindex |= 120 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
127 cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); 121 dcc |= ((u32)fdir_data->cnt_index <<
128 fdir_desc->dtype_cmd_cntindex |= 122 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
129 cpu_to_le32((fdir_data->cnt_index 123 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
130 << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
131 & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
132 } 124 }
133 125
126 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
134 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); 127 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
135 128
136 /* Now program a dummy descriptor */ 129 /* Now program a dummy descriptor */
137 tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use); 130 i = tx_ring->next_to_use;
138 tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; 131 tx_desc = I40E_TX_DESC(tx_ring, i);
139 tx_ring->next_to_use++; 132
140 if (tx_ring->next_to_use == tx_ring->count) 133 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
141 tx_ring->next_to_use = 0;
142 134
143 tx_desc->buffer_addr = cpu_to_le64(dma); 135 tx_desc->buffer_addr = cpu_to_le64(dma);
144 td_cmd = I40E_TX_DESC_CMD_EOP | 136 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
145 I40E_TX_DESC_CMD_RS |
146 I40E_TX_DESC_CMD_DUMMY;
147 137
148 tx_desc->cmd_type_offset_bsz = 138 tx_desc->cmd_type_offset_bsz =
149 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); 139 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
150 140
151 /* Mark the data descriptor to be watched */
152 tx_buf->next_to_watch = tx_desc;
153
154 /* Force memory writes to complete before letting h/w 141 /* Force memory writes to complete before letting h/w
155 * know there are new descriptors to fetch. (Only 142 * know there are new descriptors to fetch. (Only
156 * applicable for weak-ordered memory model archs, 143 * applicable for weak-ordered memory model archs,
@@ -158,6 +145,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
158 */ 145 */
159 wmb(); 146 wmb();
160 147
148 /* Mark the data descriptor to be watched */
149 tx_buf->next_to_watch = tx_desc;
150
161 writel(tx_ring->next_to_use, tx_ring->tail); 151 writel(tx_ring->next_to_use, tx_ring->tail);
162 return 0; 152 return 0;
163 153
@@ -188,27 +178,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
188} 178}
189 179
190/** 180/**
191 * i40e_unmap_tx_resource - Release a Tx buffer 181 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
192 * @ring: the ring that owns the buffer 182 * @ring: the ring that owns the buffer
193 * @tx_buffer: the buffer to free 183 * @tx_buffer: the buffer to free
194 **/ 184 **/
195static inline void i40e_unmap_tx_resource(struct i40e_ring *ring, 185static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
196 struct i40e_tx_buffer *tx_buffer) 186 struct i40e_tx_buffer *tx_buffer)
197{ 187{
198 if (tx_buffer->dma) { 188 if (tx_buffer->skb) {
199 if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE) 189 dev_kfree_skb_any(tx_buffer->skb);
200 dma_unmap_page(ring->dev, 190 if (dma_unmap_len(tx_buffer, len))
201 tx_buffer->dma,
202 tx_buffer->length,
203 DMA_TO_DEVICE);
204 else
205 dma_unmap_single(ring->dev, 191 dma_unmap_single(ring->dev,
206 tx_buffer->dma, 192 dma_unmap_addr(tx_buffer, dma),
207 tx_buffer->length, 193 dma_unmap_len(tx_buffer, len),
208 DMA_TO_DEVICE); 194 DMA_TO_DEVICE);
195 } else if (dma_unmap_len(tx_buffer, len)) {
196 dma_unmap_page(ring->dev,
197 dma_unmap_addr(tx_buffer, dma),
198 dma_unmap_len(tx_buffer, len),
199 DMA_TO_DEVICE);
209 } 200 }
210 tx_buffer->dma = 0; 201 tx_buffer->next_to_watch = NULL;
211 tx_buffer->time_stamp = 0; 202 tx_buffer->skb = NULL;
203 dma_unmap_len_set(tx_buffer, len, 0);
204 /* tx_buffer must be completely set up in the transmit path */
212} 205}
213 206
214/** 207/**
@@ -217,7 +210,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
217 **/ 210 **/
218void i40e_clean_tx_ring(struct i40e_ring *tx_ring) 211void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
219{ 212{
220 struct i40e_tx_buffer *tx_buffer;
221 unsigned long bi_size; 213 unsigned long bi_size;
222 u16 i; 214 u16 i;
223 215
@@ -226,13 +218,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
226 return; 218 return;
227 219
228 /* Free all the Tx ring sk_buffs */ 220 /* Free all the Tx ring sk_buffs */
229 for (i = 0; i < tx_ring->count; i++) { 221 for (i = 0; i < tx_ring->count; i++)
230 tx_buffer = &tx_ring->tx_bi[i]; 222 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
231 i40e_unmap_tx_resource(tx_ring, tx_buffer);
232 if (tx_buffer->skb)
233 dev_kfree_skb_any(tx_buffer->skb);
234 tx_buffer->skb = NULL;
235 }
236 223
237 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 224 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
238 memset(tx_ring->tx_bi, 0, bi_size); 225 memset(tx_ring->tx_bi, 0, bi_size);
@@ -242,6 +229,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
242 229
243 tx_ring->next_to_use = 0; 230 tx_ring->next_to_use = 0;
244 tx_ring->next_to_clean = 0; 231 tx_ring->next_to_clean = 0;
232
233 if (!tx_ring->netdev)
234 return;
235
236 /* cleanup Tx queue statistics */
237 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
238 tx_ring->queue_index));
245} 239}
246 240
247/** 241/**
@@ -300,14 +294,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
300 * run the check_tx_hang logic with a transmit completion 294 * run the check_tx_hang logic with a transmit completion
301 * pending but without time to complete it yet. 295 * pending but without time to complete it yet.
302 */ 296 */
303 if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) && 297 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
304 tx_pending) { 298 tx_pending) {
305 /* make sure it is true for two checks in a row */ 299 /* make sure it is true for two checks in a row */
306 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 300 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
307 &tx_ring->state); 301 &tx_ring->state);
308 } else { 302 } else {
309 /* update completed stats and disarm the hang check */ 303 /* update completed stats and disarm the hang check */
310 tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets; 304 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
311 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 305 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
312 } 306 }
313 307
@@ -331,62 +325,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
331 325
332 tx_buf = &tx_ring->tx_bi[i]; 326 tx_buf = &tx_ring->tx_bi[i];
333 tx_desc = I40E_TX_DESC(tx_ring, i); 327 tx_desc = I40E_TX_DESC(tx_ring, i);
328 i -= tx_ring->count;
334 329
335 for (; budget; budget--) { 330 do {
336 struct i40e_tx_desc *eop_desc; 331 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
337
338 eop_desc = tx_buf->next_to_watch;
339 332
340 /* if next_to_watch is not set then there is no work pending */ 333 /* if next_to_watch is not set then there is no work pending */
341 if (!eop_desc) 334 if (!eop_desc)
342 break; 335 break;
343 336
337 /* prevent any other reads prior to eop_desc */
338 read_barrier_depends();
339
344 /* if the descriptor isn't done, no work yet to do */ 340 /* if the descriptor isn't done, no work yet to do */
345 if (!(eop_desc->cmd_type_offset_bsz & 341 if (!(eop_desc->cmd_type_offset_bsz &
346 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 342 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
347 break; 343 break;
348 344
349 /* count the packet as being completed */ 345 /* clear next_to_watch to prevent false hangs */
350 tx_ring->tx_stats.completed++;
351 tx_buf->next_to_watch = NULL; 346 tx_buf->next_to_watch = NULL;
352 tx_buf->time_stamp = 0;
353
354 /* set memory barrier before eop_desc is verified */
355 rmb();
356 347
357 do { 348 /* update the statistics for this packet */
358 i40e_unmap_tx_resource(tx_ring, tx_buf); 349 total_bytes += tx_buf->bytecount;
350 total_packets += tx_buf->gso_segs;
359 351
360 /* clear dtype status */ 352 /* free the skb */
361 tx_desc->cmd_type_offset_bsz &= 353 dev_kfree_skb_any(tx_buf->skb);
362 ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
363 354
364 if (likely(tx_desc == eop_desc)) { 355 /* unmap skb header data */
365 eop_desc = NULL; 356 dma_unmap_single(tx_ring->dev,
357 dma_unmap_addr(tx_buf, dma),
358 dma_unmap_len(tx_buf, len),
359 DMA_TO_DEVICE);
366 360
367 dev_kfree_skb_any(tx_buf->skb); 361 /* clear tx_buffer data */
368 tx_buf->skb = NULL; 362 tx_buf->skb = NULL;
363 dma_unmap_len_set(tx_buf, len, 0);
369 364
370 total_bytes += tx_buf->bytecount; 365 /* unmap remaining buffers */
371 total_packets += tx_buf->gso_segs; 366 while (tx_desc != eop_desc) {
372 }
373 367
374 tx_buf++; 368 tx_buf++;
375 tx_desc++; 369 tx_desc++;
376 i++; 370 i++;
377 if (unlikely(i == tx_ring->count)) { 371 if (unlikely(!i)) {
378 i = 0; 372 i -= tx_ring->count;
379 tx_buf = tx_ring->tx_bi; 373 tx_buf = tx_ring->tx_bi;
380 tx_desc = I40E_TX_DESC(tx_ring, 0); 374 tx_desc = I40E_TX_DESC(tx_ring, 0);
381 } 375 }
382 } while (eop_desc);
383 }
384 376
377 /* unmap any remaining paged data */
378 if (dma_unmap_len(tx_buf, len)) {
379 dma_unmap_page(tx_ring->dev,
380 dma_unmap_addr(tx_buf, dma),
381 dma_unmap_len(tx_buf, len),
382 DMA_TO_DEVICE);
383 dma_unmap_len_set(tx_buf, len, 0);
384 }
385 }
386
387 /* move us one more past the eop_desc for start of next pkt */
388 tx_buf++;
389 tx_desc++;
390 i++;
391 if (unlikely(!i)) {
392 i -= tx_ring->count;
393 tx_buf = tx_ring->tx_bi;
394 tx_desc = I40E_TX_DESC(tx_ring, 0);
395 }
396
397 /* update budget accounting */
398 budget--;
399 } while (likely(budget));
400
401 i += tx_ring->count;
385 tx_ring->next_to_clean = i; 402 tx_ring->next_to_clean = i;
386 tx_ring->tx_stats.bytes += total_bytes; 403 u64_stats_update_begin(&tx_ring->syncp);
387 tx_ring->tx_stats.packets += total_packets; 404 tx_ring->stats.bytes += total_bytes;
405 tx_ring->stats.packets += total_packets;
406 u64_stats_update_end(&tx_ring->syncp);
388 tx_ring->q_vector->tx.total_bytes += total_bytes; 407 tx_ring->q_vector->tx.total_bytes += total_bytes;
389 tx_ring->q_vector->tx.total_packets += total_packets; 408 tx_ring->q_vector->tx.total_packets += total_packets;
409
390 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { 410 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
391 /* schedule immediate reset if we believe we hung */ 411 /* schedule immediate reset if we believe we hung */
392 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" 412 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -414,6 +434,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
414 return true; 434 return true;
415 } 435 }
416 436
437 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
438 tx_ring->queue_index),
439 total_packets, total_bytes);
440
417#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 441#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
418 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 442 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
419 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 443 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -524,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
524 i40e_set_new_dynamic_itr(&q_vector->tx); 548 i40e_set_new_dynamic_itr(&q_vector->tx);
525 if (old_itr != q_vector->tx.itr) 549 if (old_itr != q_vector->tx.itr)
526 wr32(hw, reg_addr, q_vector->tx.itr); 550 wr32(hw, reg_addr, q_vector->tx.itr);
527
528 i40e_flush(hw);
529} 551}
530 552
531/** 553/**
@@ -1042,8 +1064,10 @@ next_desc:
1042 } 1064 }
1043 1065
1044 rx_ring->next_to_clean = i; 1066 rx_ring->next_to_clean = i;
1045 rx_ring->rx_stats.packets += total_rx_packets; 1067 u64_stats_update_begin(&rx_ring->syncp);
1046 rx_ring->rx_stats.bytes += total_rx_bytes; 1068 rx_ring->stats.packets += total_rx_packets;
1069 rx_ring->stats.bytes += total_rx_bytes;
1070 u64_stats_update_end(&rx_ring->syncp);
1047 rx_ring->q_vector->rx.total_packets += total_rx_packets; 1071 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1048 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 1072 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1049 1073
@@ -1067,27 +1091,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1067 struct i40e_q_vector *q_vector = 1091 struct i40e_q_vector *q_vector =
1068 container_of(napi, struct i40e_q_vector, napi); 1092 container_of(napi, struct i40e_q_vector, napi);
1069 struct i40e_vsi *vsi = q_vector->vsi; 1093 struct i40e_vsi *vsi = q_vector->vsi;
1094 struct i40e_ring *ring;
1070 bool clean_complete = true; 1095 bool clean_complete = true;
1071 int budget_per_ring; 1096 int budget_per_ring;
1072 int i;
1073 1097
1074 if (test_bit(__I40E_DOWN, &vsi->state)) { 1098 if (test_bit(__I40E_DOWN, &vsi->state)) {
1075 napi_complete(napi); 1099 napi_complete(napi);
1076 return 0; 1100 return 0;
1077 } 1101 }
1078 1102
1103 /* Since the actual Tx work is minimal, we can give the Tx a larger
1104 * budget and be more aggressive about cleaning up the Tx descriptors.
1105 */
1106 i40e_for_each_ring(ring, q_vector->tx)
1107 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1108
1079 /* We attempt to distribute budget to each Rx queue fairly, but don't 1109 /* We attempt to distribute budget to each Rx queue fairly, but don't
1080 * allow the budget to go below 1 because that would exit polling early. 1110 * allow the budget to go below 1 because that would exit polling early.
1081 * Since the actual Tx work is minimal, we can give the Tx a larger
1082 * budget and be more aggressive about cleaning up the Tx descriptors.
1083 */ 1111 */
1084 budget_per_ring = max(budget/q_vector->num_ringpairs, 1); 1112 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1085 for (i = 0; i < q_vector->num_ringpairs; i++) { 1113
1086 clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i], 1114 i40e_for_each_ring(ring, q_vector->rx)
1087 vsi->work_limit); 1115 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
1088 clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
1089 budget_per_ring);
1090 }
1091 1116
1092 /* If work not completed, return budget and polling will return */ 1117 /* If work not completed, return budget and polling will return */
1093 if (!clean_complete) 1118 if (!clean_complete)
@@ -1117,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1117 qval = rd32(hw, I40E_QINT_TQCTL(0)); 1142 qval = rd32(hw, I40E_QINT_TQCTL(0));
1118 qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; 1143 qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1119 wr32(hw, I40E_QINT_TQCTL(0), qval); 1144 wr32(hw, I40E_QINT_TQCTL(0), qval);
1120 i40e_flush(hw); 1145
1146 i40e_irq_dynamic_enable_icr0(vsi->back);
1121 } 1147 }
1122 } 1148 }
1123 1149
@@ -1144,6 +1170,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1144 struct tcphdr *th; 1170 struct tcphdr *th;
1145 unsigned int hlen; 1171 unsigned int hlen;
1146 u32 flex_ptype, dtype_cmd; 1172 u32 flex_ptype, dtype_cmd;
1173 u16 i;
1147 1174
1148 /* make sure ATR is enabled */ 1175 /* make sure ATR is enabled */
1149 if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED)) 1176 if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
@@ -1183,10 +1210,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1183 tx_ring->atr_count = 0; 1210 tx_ring->atr_count = 0;
1184 1211
1185 /* grab the next descriptor */ 1212 /* grab the next descriptor */
1186 fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); 1213 i = tx_ring->next_to_use;
1187 tx_ring->next_to_use++; 1214 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1188 if (tx_ring->next_to_use == tx_ring->count) 1215
1189 tx_ring->next_to_use = 0; 1216 i++;
1217 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1190 1218
1191 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & 1219 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1192 I40E_TXD_FLTR_QW0_QINDEX_MASK; 1220 I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -1216,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1216 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 1244 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
1217} 1245}
1218 1246
1219#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
1220/** 1247/**
1221 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 1248 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1222 * @skb: send buffer 1249 * @skb: send buffer
@@ -1276,27 +1303,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1276} 1303}
1277 1304
1278/** 1305/**
1279 * i40e_tx_csum - is checksum offload requested
1280 * @tx_ring: ptr to the ring to send
1281 * @skb: ptr to the skb we're sending
1282 * @tx_flags: the collected send information
1283 * @protocol: the send protocol
1284 *
1285 * Returns true if checksum offload is requested
1286 **/
1287static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
1288 u32 tx_flags, __be16 protocol)
1289{
1290 if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
1291 !(tx_flags & I40E_TX_FLAGS_TXSW)) {
1292 if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
1293 return false;
1294 }
1295
1296 return skb->ip_summed == CHECKSUM_PARTIAL;
1297}
1298
1299/**
1300 * i40e_tso - set up the tso context descriptor 1306 * i40e_tso - set up the tso context descriptor
1301 * @tx_ring: ptr to the ring to send 1307 * @tx_ring: ptr to the ring to send
1302 * @skb: ptr to the skb we're sending 1308 * @skb: ptr to the skb we're sending
@@ -1482,15 +1488,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1482 const u32 cd_tunneling, const u32 cd_l2tag2) 1488 const u32 cd_tunneling, const u32 cd_l2tag2)
1483{ 1489{
1484 struct i40e_tx_context_desc *context_desc; 1490 struct i40e_tx_context_desc *context_desc;
1491 int i = tx_ring->next_to_use;
1485 1492
1486 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) 1493 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
1487 return; 1494 return;
1488 1495
1489 /* grab the next descriptor */ 1496 /* grab the next descriptor */
1490 context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); 1497 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
1491 tx_ring->next_to_use++; 1498
1492 if (tx_ring->next_to_use == tx_ring->count) 1499 i++;
1493 tx_ring->next_to_use = 0; 1500 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1494 1501
1495 /* cpu_to_le32 and assign to struct fields */ 1502 /* cpu_to_le32 and assign to struct fields */
1496 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); 1503 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
@@ -1512,68 +1519,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1512 struct i40e_tx_buffer *first, u32 tx_flags, 1519 struct i40e_tx_buffer *first, u32 tx_flags,
1513 const u8 hdr_len, u32 td_cmd, u32 td_offset) 1520 const u8 hdr_len, u32 td_cmd, u32 td_offset)
1514{ 1521{
1515 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1516 unsigned int data_len = skb->data_len; 1522 unsigned int data_len = skb->data_len;
1517 unsigned int size = skb_headlen(skb); 1523 unsigned int size = skb_headlen(skb);
1518 struct device *dev = tx_ring->dev; 1524 struct skb_frag_struct *frag;
1519 u32 paylen = skb->len - hdr_len;
1520 u16 i = tx_ring->next_to_use;
1521 struct i40e_tx_buffer *tx_bi; 1525 struct i40e_tx_buffer *tx_bi;
1522 struct i40e_tx_desc *tx_desc; 1526 struct i40e_tx_desc *tx_desc;
1523 u32 buf_offset = 0; 1527 u16 i = tx_ring->next_to_use;
1524 u32 td_tag = 0; 1528 u32 td_tag = 0;
1525 dma_addr_t dma; 1529 dma_addr_t dma;
1526 u16 gso_segs; 1530 u16 gso_segs;
1527 1531
1528 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1529 if (dma_mapping_error(dev, dma))
1530 goto dma_error;
1531
1532 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { 1532 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
1533 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; 1533 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
1534 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> 1534 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
1535 I40E_TX_FLAGS_VLAN_SHIFT; 1535 I40E_TX_FLAGS_VLAN_SHIFT;
1536 } 1536 }
1537 1537
1538 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
1539 gso_segs = skb_shinfo(skb)->gso_segs;
1540 else
1541 gso_segs = 1;
1542
1543 /* multiply data chunks by size of headers */
1544 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
1545 first->gso_segs = gso_segs;
1546 first->skb = skb;
1547 first->tx_flags = tx_flags;
1548
1549 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1550
1538 tx_desc = I40E_TX_DESC(tx_ring, i); 1551 tx_desc = I40E_TX_DESC(tx_ring, i);
1539 for (;;) { 1552 tx_bi = first;
1540 while (size > I40E_MAX_DATA_PER_TXD) { 1553
1541 tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); 1554 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1555 if (dma_mapping_error(tx_ring->dev, dma))
1556 goto dma_error;
1557
1558 /* record length, and DMA address */
1559 dma_unmap_len_set(tx_bi, len, size);
1560 dma_unmap_addr_set(tx_bi, dma, dma);
1561
1562 tx_desc->buffer_addr = cpu_to_le64(dma);
1563
1564 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
1542 tx_desc->cmd_type_offset_bsz = 1565 tx_desc->cmd_type_offset_bsz =
1543 build_ctob(td_cmd, td_offset, 1566 build_ctob(td_cmd, td_offset,
1544 I40E_MAX_DATA_PER_TXD, td_tag); 1567 I40E_MAX_DATA_PER_TXD, td_tag);
1545 1568
1546 buf_offset += I40E_MAX_DATA_PER_TXD;
1547 size -= I40E_MAX_DATA_PER_TXD;
1548
1549 tx_desc++; 1569 tx_desc++;
1550 i++; 1570 i++;
1551 if (i == tx_ring->count) { 1571 if (i == tx_ring->count) {
1552 tx_desc = I40E_TX_DESC(tx_ring, 0); 1572 tx_desc = I40E_TX_DESC(tx_ring, 0);
1553 i = 0; 1573 i = 0;
1554 } 1574 }
1555 }
1556 1575
1557 tx_bi = &tx_ring->tx_bi[i]; 1576 dma += I40E_MAX_DATA_PER_TXD;
1558 tx_bi->length = buf_offset + size; 1577 size -= I40E_MAX_DATA_PER_TXD;
1559 tx_bi->tx_flags = tx_flags;
1560 tx_bi->dma = dma;
1561 1578
1562 tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); 1579 tx_desc->buffer_addr = cpu_to_le64(dma);
1563 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 1580 }
1564 size, td_tag);
1565 1581
1566 if (likely(!data_len)) 1582 if (likely(!data_len))
1567 break; 1583 break;
1568 1584
1569 size = skb_frag_size(frag); 1585 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1570 data_len -= size; 1586 size, td_tag);
1571 buf_offset = 0;
1572 tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
1573
1574 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1575 if (dma_mapping_error(dev, dma))
1576 goto dma_error;
1577 1587
1578 tx_desc++; 1588 tx_desc++;
1579 i++; 1589 i++;
@@ -1582,31 +1592,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1582 i = 0; 1592 i = 0;
1583 } 1593 }
1584 1594
1585 frag++; 1595 size = skb_frag_size(frag);
1586 } 1596 data_len -= size;
1587
1588 tx_desc->cmd_type_offset_bsz |=
1589 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
1590 1597
1591 i++; 1598 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1592 if (i == tx_ring->count) 1599 DMA_TO_DEVICE);
1593 i = 0;
1594 1600
1595 tx_ring->next_to_use = i; 1601 tx_bi = &tx_ring->tx_bi[i];
1602 }
1596 1603
1597 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) 1604 tx_desc->cmd_type_offset_bsz =
1598 gso_segs = skb_shinfo(skb)->gso_segs; 1605 build_ctob(td_cmd, td_offset, size, td_tag) |
1599 else 1606 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
1600 gso_segs = 1;
1601 1607
1602 /* multiply data chunks by size of headers */ 1608 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
1603 tx_bi->bytecount = paylen + (gso_segs * hdr_len); 1609 tx_ring->queue_index),
1604 tx_bi->gso_segs = gso_segs; 1610 first->bytecount);
1605 tx_bi->skb = skb;
1606 1611
1607 /* set the timestamp and next to watch values */ 1612 /* set the timestamp */
1608 first->time_stamp = jiffies; 1613 first->time_stamp = jiffies;
1609 first->next_to_watch = tx_desc;
1610 1614
1611 /* Force memory writes to complete before letting h/w 1615 /* Force memory writes to complete before letting h/w
1612 * know there are new descriptors to fetch. (Only 1616 * know there are new descriptors to fetch. (Only
@@ -1615,16 +1619,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1615 */ 1619 */
1616 wmb(); 1620 wmb();
1617 1621
1622 /* set next_to_watch value indicating a packet is present */
1623 first->next_to_watch = tx_desc;
1624
1625 i++;
1626 if (i == tx_ring->count)
1627 i = 0;
1628
1629 tx_ring->next_to_use = i;
1630
1631 /* notify HW of packet */
1618 writel(i, tx_ring->tail); 1632 writel(i, tx_ring->tail);
1633
1619 return; 1634 return;
1620 1635
1621dma_error: 1636dma_error:
1622 dev_info(dev, "TX DMA map failed\n"); 1637 dev_info(tx_ring->dev, "TX DMA map failed\n");
1623 1638
1624 /* clear dma mappings for failed tx_bi map */ 1639 /* clear dma mappings for failed tx_bi map */
1625 for (;;) { 1640 for (;;) {
1626 tx_bi = &tx_ring->tx_bi[i]; 1641 tx_bi = &tx_ring->tx_bi[i];
1627 i40e_unmap_tx_resource(tx_ring, tx_bi); 1642 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
1628 if (tx_bi == first) 1643 if (tx_bi == first)
1629 break; 1644 break;
1630 if (i == 0) 1645 if (i == 0)
@@ -1632,8 +1647,6 @@ dma_error:
1632 i--; 1647 i--;
1633 } 1648 }
1634 1649
1635 dev_kfree_skb_any(skb);
1636
1637 tx_ring->next_to_use = i; 1650 tx_ring->next_to_use = i;
1638} 1651}
1639 1652
@@ -1758,16 +1771,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1758 1771
1759 skb_tx_timestamp(skb); 1772 skb_tx_timestamp(skb);
1760 1773
1774 /* always enable CRC insertion offload */
1775 td_cmd |= I40E_TX_DESC_CMD_ICRC;
1776
1761 /* Always offload the checksum, since it's in the data descriptor */ 1777 /* Always offload the checksum, since it's in the data descriptor */
1762 if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol)) 1778 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1763 tx_flags |= I40E_TX_FLAGS_CSUM; 1779 tx_flags |= I40E_TX_FLAGS_CSUM;
1764 1780
1765 /* always enable offload insertion */
1766 td_cmd |= I40E_TX_DESC_CMD_ICRC;
1767
1768 if (tx_flags & I40E_TX_FLAGS_CSUM)
1769 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, 1781 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
1770 tx_ring, &cd_tunneling); 1782 tx_ring, &cd_tunneling);
1783 }
1771 1784
1772 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 1785 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
1773 cd_tunneling, cd_l2tag2); 1786 cd_tunneling, cd_l2tag2);
@@ -1801,7 +1814,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1801{ 1814{
1802 struct i40e_netdev_priv *np = netdev_priv(netdev); 1815 struct i40e_netdev_priv *np = netdev_priv(netdev);
1803 struct i40e_vsi *vsi = np->vsi; 1816 struct i40e_vsi *vsi = np->vsi;
1804 struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping]; 1817 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
1805 1818
1806 /* hardware can't handle really short frames, hardware padding works 1819 /* hardware can't handle really short frames, hardware padding works
1807 * beyond this point 1820 * beyond this point
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b1d7722d98a7..db55d9947f15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,23 +102,20 @@
102#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) 102#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
103#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) 103#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
104#define I40E_TX_FLAGS_FSO (u32)(1 << 7) 104#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
105#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
106#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
107#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 105#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
108#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 106#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
109#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 107#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
110#define I40E_TX_FLAGS_VLAN_SHIFT 16 108#define I40E_TX_FLAGS_VLAN_SHIFT 16
111 109
112struct i40e_tx_buffer { 110struct i40e_tx_buffer {
113 struct sk_buff *skb;
114 dma_addr_t dma;
115 unsigned long time_stamp;
116 u16 length;
117 u32 tx_flags;
118 struct i40e_tx_desc *next_to_watch; 111 struct i40e_tx_desc *next_to_watch;
112 unsigned long time_stamp;
113 struct sk_buff *skb;
119 unsigned int bytecount; 114 unsigned int bytecount;
120 u16 gso_segs; 115 unsigned short gso_segs;
121 u8 mapped_as_page; 116 DEFINE_DMA_UNMAP_ADDR(dma);
117 DEFINE_DMA_UNMAP_LEN(len);
118 u32 tx_flags;
122}; 119};
123 120
124struct i40e_rx_buffer { 121struct i40e_rx_buffer {
@@ -129,18 +126,18 @@ struct i40e_rx_buffer {
129 unsigned int page_offset; 126 unsigned int page_offset;
130}; 127};
131 128
132struct i40e_tx_queue_stats { 129struct i40e_queue_stats {
133 u64 packets; 130 u64 packets;
134 u64 bytes; 131 u64 bytes;
132};
133
134struct i40e_tx_queue_stats {
135 u64 restart_queue; 135 u64 restart_queue;
136 u64 tx_busy; 136 u64 tx_busy;
137 u64 completed;
138 u64 tx_done_old; 137 u64 tx_done_old;
139}; 138};
140 139
141struct i40e_rx_queue_stats { 140struct i40e_rx_queue_stats {
142 u64 packets;
143 u64 bytes;
144 u64 non_eop_descs; 141 u64 non_eop_descs;
145 u64 alloc_rx_page_failed; 142 u64 alloc_rx_page_failed;
146 u64 alloc_rx_buff_failed; 143 u64 alloc_rx_buff_failed;
@@ -183,6 +180,7 @@ enum i40e_ring_state_t {
183 180
184/* struct that defines a descriptor ring, associated with a VSI */ 181/* struct that defines a descriptor ring, associated with a VSI */
185struct i40e_ring { 182struct i40e_ring {
183 struct i40e_ring *next; /* pointer to next ring in q_vector */
186 void *desc; /* Descriptor ring memory */ 184 void *desc; /* Descriptor ring memory */
187 struct device *dev; /* Used for DMA mapping */ 185 struct device *dev; /* Used for DMA mapping */
188 struct net_device *netdev; /* netdev ring maps to */ 186 struct net_device *netdev; /* netdev ring maps to */
@@ -219,6 +217,8 @@ struct i40e_ring {
219 bool ring_active; /* is ring online or not */ 217 bool ring_active; /* is ring online or not */
220 218
221 /* stats structs */ 219 /* stats structs */
220 struct i40e_queue_stats stats;
221 struct u64_stats_sync syncp;
222 union { 222 union {
223 struct i40e_tx_queue_stats tx_stats; 223 struct i40e_tx_queue_stats tx_stats;
224 struct i40e_rx_queue_stats rx_stats; 224 struct i40e_rx_queue_stats rx_stats;
@@ -229,6 +229,8 @@ struct i40e_ring {
229 229
230 struct i40e_vsi *vsi; /* Backreference to associated VSI */ 230 struct i40e_vsi *vsi; /* Backreference to associated VSI */
231 struct i40e_q_vector *q_vector; /* Backreference to associated vector */ 231 struct i40e_q_vector *q_vector; /* Backreference to associated vector */
232
233 struct rcu_head rcu; /* to avoid race on free */
232} ____cacheline_internodealigned_in_smp; 234} ____cacheline_internodealigned_in_smp;
233 235
234enum i40e_latency_range { 236enum i40e_latency_range {
@@ -238,9 +240,8 @@ enum i40e_latency_range {
238}; 240};
239 241
240struct i40e_ring_container { 242struct i40e_ring_container {
241#define I40E_MAX_RINGPAIR_PER_VECTOR 8
242 /* array of pointers to rings */ 243 /* array of pointers to rings */
243 struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR]; 244 struct i40e_ring *ring;
244 unsigned int total_bytes; /* total bytes processed this int */ 245 unsigned int total_bytes; /* total bytes processed this int */
245 unsigned int total_packets; /* total packets processed this int */ 246 unsigned int total_packets; /* total packets processed this int */
246 u16 count; 247 u16 count;
@@ -248,6 +249,10 @@ struct i40e_ring_container {
248 u16 itr; 249 u16 itr;
249}; 250};
250 251
252/* iterator for handling rings in ring container */
253#define i40e_for_each_ring(pos, head) \
254 for (pos = (head).ring; pos != NULL; pos = pos->next)
255
251void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); 256void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
252netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 257netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
253void i40e_clean_tx_ring(struct i40e_ring *tx_ring); 258void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8967e58e2408..07596982a477 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
251 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 251 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
252 else 252 else
253 reg_idx = I40E_VPINT_LNKLSTN( 253 reg_idx = I40E_VPINT_LNKLSTN(
254 ((pf->hw.func_caps.num_msix_vectors_vf - 1) 254 (pf->hw.func_caps.num_msix_vectors_vf
255 * vf->vf_id) + (vector_id - 1)); 255 * vf->vf_id) + (vector_id - 1));
256 256
257 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 257 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
383 383
384 /* associate this queue with the PCI VF function */ 384 /* associate this queue with the PCI VF function */
385 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 385 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
386 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) 386 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
387 & I40E_QTX_CTL_PF_INDX_MASK); 387 & I40E_QTX_CTL_PF_INDX_MASK);
388 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 388 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
389 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 389 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 74a1506b4235..8c2437722aad 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -28,14 +28,14 @@
28#ifndef _E1000_82575_H_ 28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_ 29#define _E1000_82575_H_
30 30
31extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); 31void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
32extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw); 32void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
33extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw); 33void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
34extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); 34void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
35extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 35s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
36 u8 dev_addr, u8 *data); 36 u8 *data);
37extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 37s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
38 u8 dev_addr, u8 data); 38 u8 data);
39 39
40#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ 40#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
41 (ID_LED_DEF1_DEF2 << 8) | \ 41 (ID_LED_DEF1_DEF2 << 8) | \
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 37a9c06a6c68..2e166b22d52b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -562,11 +562,11 @@ struct e1000_hw {
562 u8 revision_id; 562 u8 revision_id;
563}; 563};
564 564
565extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw); 565struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
566#define hw_dbg(format, arg...) \ 566#define hw_dbg(format, arg...) \
567 netdev_dbg(igb_get_hw_dev(hw), format, ##arg) 567 netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
568 568
569/* These functions must be implemented by drivers */ 569/* These functions must be implemented by drivers */
570s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 570s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
571s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 571s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
572#endif /* _E1000_HW_H_ */ 572#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index dde3c4b7ea99..2d913716573a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -28,26 +28,24 @@
28#ifndef _E1000_I210_H_ 28#ifndef _E1000_I210_H_
29#define _E1000_I210_H_ 29#define _E1000_I210_H_
30 30
31extern s32 igb_update_flash_i210(struct e1000_hw *hw); 31s32 igb_update_flash_i210(struct e1000_hw *hw);
32extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw); 32s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
33extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw); 33s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
34extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, 34s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
35 u16 words, u16 *data); 35 u16 *data);
36extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, 36s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
37 u16 words, u16 *data); 37 u16 *data);
38extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 38s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
39extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 39void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
40extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw); 40s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
41extern void igb_release_nvm_i210(struct e1000_hw *hw); 41void igb_release_nvm_i210(struct e1000_hw *hw);
42extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); 42s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
43extern s32 igb_read_invm_version(struct e1000_hw *hw, 43s32 igb_read_invm_version(struct e1000_hw *hw,
44 struct e1000_fw_version *invm_ver); 44 struct e1000_fw_version *invm_ver);
45extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, 45s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
46 u16 *data); 46s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
47extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, 47s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
48 u16 data); 48bool igb_get_flash_presence_i210(struct e1000_hw *hw);
49extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
50extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
51 49
52#define E1000_STM_OPCODE 0xDB00 50#define E1000_STM_OPCODE 0xDB00
53#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 51#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 5e13e83cc608..e4cbe8ef67b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -86,6 +86,6 @@ enum e1000_mng_mode {
86 86
87#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 87#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
88 88
89extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); 89void e1000_init_function_pointers_82575(struct e1000_hw *hw);
90 90
91#endif 91#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 6807b098edae..5e9ed89403aa 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -483,40 +483,38 @@ enum igb_boards {
483extern char igb_driver_name[]; 483extern char igb_driver_name[];
484extern char igb_driver_version[]; 484extern char igb_driver_version[];
485 485
486extern int igb_up(struct igb_adapter *); 486int igb_up(struct igb_adapter *);
487extern void igb_down(struct igb_adapter *); 487void igb_down(struct igb_adapter *);
488extern void igb_reinit_locked(struct igb_adapter *); 488void igb_reinit_locked(struct igb_adapter *);
489extern void igb_reset(struct igb_adapter *); 489void igb_reset(struct igb_adapter *);
490extern void igb_write_rss_indir_tbl(struct igb_adapter *); 490int igb_reinit_queues(struct igb_adapter *);
491extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8); 491void igb_write_rss_indir_tbl(struct igb_adapter *);
492extern int igb_setup_tx_resources(struct igb_ring *); 492int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
493extern int igb_setup_rx_resources(struct igb_ring *); 493int igb_setup_tx_resources(struct igb_ring *);
494extern void igb_free_tx_resources(struct igb_ring *); 494int igb_setup_rx_resources(struct igb_ring *);
495extern void igb_free_rx_resources(struct igb_ring *); 495void igb_free_tx_resources(struct igb_ring *);
496extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); 496void igb_free_rx_resources(struct igb_ring *);
497extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); 497void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
498extern void igb_setup_tctl(struct igb_adapter *); 498void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
499extern void igb_setup_rctl(struct igb_adapter *); 499void igb_setup_tctl(struct igb_adapter *);
500extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); 500void igb_setup_rctl(struct igb_adapter *);
501extern void igb_unmap_and_free_tx_resource(struct igb_ring *, 501netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
502 struct igb_tx_buffer *); 502void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
503extern void igb_alloc_rx_buffers(struct igb_ring *, u16); 503void igb_alloc_rx_buffers(struct igb_ring *, u16);
504extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); 504void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
505extern bool igb_has_link(struct igb_adapter *adapter); 505bool igb_has_link(struct igb_adapter *adapter);
506extern void igb_set_ethtool_ops(struct net_device *); 506void igb_set_ethtool_ops(struct net_device *);
507extern void igb_power_up_link(struct igb_adapter *); 507void igb_power_up_link(struct igb_adapter *);
508extern void igb_set_fw_version(struct igb_adapter *); 508void igb_set_fw_version(struct igb_adapter *);
509extern void igb_ptp_init(struct igb_adapter *adapter); 509void igb_ptp_init(struct igb_adapter *adapter);
510extern void igb_ptp_stop(struct igb_adapter *adapter); 510void igb_ptp_stop(struct igb_adapter *adapter);
511extern void igb_ptp_reset(struct igb_adapter *adapter); 511void igb_ptp_reset(struct igb_adapter *adapter);
512extern void igb_ptp_tx_work(struct work_struct *work); 512void igb_ptp_tx_work(struct work_struct *work);
513extern void igb_ptp_rx_hang(struct igb_adapter *adapter); 513void igb_ptp_rx_hang(struct igb_adapter *adapter);
514extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); 514void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
515extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, 515void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
516 struct sk_buff *skb); 516void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
517extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, 517 struct sk_buff *skb);
518 unsigned char *va,
519 struct sk_buff *skb);
520static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, 518static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
521 union e1000_adv_rx_desc *rx_desc, 519 union e1000_adv_rx_desc *rx_desc,
522 struct sk_buff *skb) 520 struct sk_buff *skb)
@@ -531,11 +529,11 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
531 rx_ring->last_rx_timestamp = jiffies; 529 rx_ring->last_rx_timestamp = jiffies;
532} 530}
533 531
534extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 532int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
535 struct ifreq *ifr, int cmd); 533 int cmd);
536#ifdef CONFIG_IGB_HWMON 534#ifdef CONFIG_IGB_HWMON
537extern void igb_sysfs_exit(struct igb_adapter *adapter); 535void igb_sysfs_exit(struct igb_adapter *adapter);
538extern int igb_sysfs_init(struct igb_adapter *adapter); 536int igb_sysfs_init(struct igb_adapter *adapter);
539#endif 537#endif
540static inline s32 igb_reset_phy(struct e1000_hw *hw) 538static inline s32 igb_reset_phy(struct e1000_hw *hw)
541{ 539{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 151e00cad113..0ae3177416c7 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1659,7 +1659,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1659 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || 1659 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1660 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || 1660 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1661 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || 1661 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1662 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { 1662 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
1663 (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
1663 1664
1664 /* Enable DH89xxCC MPHY for near end loopback */ 1665 /* Enable DH89xxCC MPHY for near end loopback */
1665 reg = rd32(E1000_MPHY_ADDR_CTL); 1666 reg = rd32(E1000_MPHY_ADDR_CTL);
@@ -1725,7 +1726,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
1725 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || 1726 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1726 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || 1727 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1727 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || 1728 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1728 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { 1729 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
1730 (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
1729 u32 reg; 1731 u32 reg;
1730 1732
1731 /* Disable near end loopback on DH89xxCC */ 1733 /* Disable near end loopback on DH89xxCC */
@@ -2877,6 +2879,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
2877 return 0; 2879 return 0;
2878} 2880}
2879 2881
2882static unsigned int igb_max_channels(struct igb_adapter *adapter)
2883{
2884 struct e1000_hw *hw = &adapter->hw;
2885 unsigned int max_combined = 0;
2886
2887 switch (hw->mac.type) {
2888 case e1000_i211:
2889 max_combined = IGB_MAX_RX_QUEUES_I211;
2890 break;
2891 case e1000_82575:
2892 case e1000_i210:
2893 max_combined = IGB_MAX_RX_QUEUES_82575;
2894 break;
2895 case e1000_i350:
2896 if (!!adapter->vfs_allocated_count) {
2897 max_combined = 1;
2898 break;
2899 }
2900 /* fall through */
2901 case e1000_82576:
2902 if (!!adapter->vfs_allocated_count) {
2903 max_combined = 2;
2904 break;
2905 }
2906 /* fall through */
2907 case e1000_82580:
2908 case e1000_i354:
2909 default:
2910 max_combined = IGB_MAX_RX_QUEUES;
2911 break;
2912 }
2913
2914 return max_combined;
2915}
2916
2917static void igb_get_channels(struct net_device *netdev,
2918 struct ethtool_channels *ch)
2919{
2920 struct igb_adapter *adapter = netdev_priv(netdev);
2921
2922 /* Report maximum channels */
2923 ch->max_combined = igb_max_channels(adapter);
2924
2925 /* Report info for other vector */
2926 if (adapter->msix_entries) {
2927 ch->max_other = NON_Q_VECTORS;
2928 ch->other_count = NON_Q_VECTORS;
2929 }
2930
2931 ch->combined_count = adapter->rss_queues;
2932}
2933
2934static int igb_set_channels(struct net_device *netdev,
2935 struct ethtool_channels *ch)
2936{
2937 struct igb_adapter *adapter = netdev_priv(netdev);
2938 unsigned int count = ch->combined_count;
2939
2940 /* Verify they are not requesting separate vectors */
2941 if (!count || ch->rx_count || ch->tx_count)
2942 return -EINVAL;
2943
2944 /* Verify other_count is valid and has not been changed */
2945 if (ch->other_count != NON_Q_VECTORS)
2946 return -EINVAL;
2947
2948 /* Verify the number of channels doesn't exceed hw limits */
2949 if (count > igb_max_channels(adapter))
2950 return -EINVAL;
2951
2952 if (count != adapter->rss_queues) {
2953 adapter->rss_queues = count;
2954
2955 /* Hardware has to reinitialize queues and interrupts to
2956 * match the new configuration.
2957 */
2958 return igb_reinit_queues(adapter);
2959 }
2960
2961 return 0;
2962}
2963
2880static const struct ethtool_ops igb_ethtool_ops = { 2964static const struct ethtool_ops igb_ethtool_ops = {
2881 .get_settings = igb_get_settings, 2965 .get_settings = igb_get_settings,
2882 .set_settings = igb_set_settings, 2966 .set_settings = igb_set_settings,
@@ -2913,6 +2997,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
2913 .get_rxfh_indir_size = igb_get_rxfh_indir_size, 2997 .get_rxfh_indir_size = igb_get_rxfh_indir_size,
2914 .get_rxfh_indir = igb_get_rxfh_indir, 2998 .get_rxfh_indir = igb_get_rxfh_indir,
2915 .set_rxfh_indir = igb_set_rxfh_indir, 2999 .set_rxfh_indir = igb_set_rxfh_indir,
3000 .get_channels = igb_get_channels,
3001 .set_channels = igb_set_channels,
2916 .begin = igb_ethtool_begin, 3002 .begin = igb_ethtool_begin,
2917 .complete = igb_ethtool_complete, 3003 .complete = igb_ethtool_complete,
2918}; 3004};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8cf44f2a8ccd..a505d3bad09a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5708,7 +5708,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
5708 5708
5709 /* reply to reset with ack and vf mac address */ 5709 /* reply to reset with ack and vf mac address */
5710 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 5710 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5711 memcpy(addr, vf_mac, 6); 5711 memcpy(addr, vf_mac, ETH_ALEN);
5712 igb_write_mbx(hw, msgbuf, 3, vf); 5712 igb_write_mbx(hw, msgbuf, 3, vf);
5713} 5713}
5714 5714
@@ -7838,4 +7838,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7838 return E1000_SUCCESS; 7838 return E1000_SUCCESS;
7839 7839
7840} 7840}
7841
7842int igb_reinit_queues(struct igb_adapter *adapter)
7843{
7844 struct net_device *netdev = adapter->netdev;
7845 struct pci_dev *pdev = adapter->pdev;
7846 int err = 0;
7847
7848 if (netif_running(netdev))
7849 igb_close(netdev);
7850
7851 igb_clear_interrupt_scheme(adapter);
7852
7853 if (igb_init_interrupt_scheme(adapter, true)) {
7854 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7855 return -ENOMEM;
7856 }
7857
7858 if (netif_running(netdev))
7859 err = igb_open(netdev);
7860
7861 return err;
7862}
7841/* igb_main.c */ 7863/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a1463e3d14c0..7d6a25c8f889 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -312,17 +312,17 @@ enum igbvf_state_t {
312extern char igbvf_driver_name[]; 312extern char igbvf_driver_name[];
313extern const char igbvf_driver_version[]; 313extern const char igbvf_driver_version[];
314 314
315extern void igbvf_check_options(struct igbvf_adapter *); 315void igbvf_check_options(struct igbvf_adapter *);
316extern void igbvf_set_ethtool_ops(struct net_device *); 316void igbvf_set_ethtool_ops(struct net_device *);
317 317
318extern int igbvf_up(struct igbvf_adapter *); 318int igbvf_up(struct igbvf_adapter *);
319extern void igbvf_down(struct igbvf_adapter *); 319void igbvf_down(struct igbvf_adapter *);
320extern void igbvf_reinit_locked(struct igbvf_adapter *); 320void igbvf_reinit_locked(struct igbvf_adapter *);
321extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); 321int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
322extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); 322int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
323extern void igbvf_free_rx_resources(struct igbvf_ring *); 323void igbvf_free_rx_resources(struct igbvf_ring *);
324extern void igbvf_free_tx_resources(struct igbvf_ring *); 324void igbvf_free_tx_resources(struct igbvf_ring *);
325extern void igbvf_update_stats(struct igbvf_adapter *); 325void igbvf_update_stats(struct igbvf_adapter *);
326 326
327extern unsigned int copybreak; 327extern unsigned int copybreak;
328 328
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index eea0e10ce12f..955ad8c2c534 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
154 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); 154 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
155 if (!ret_val) { 155 if (!ret_val) {
156 if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) 156 if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
157 memcpy(hw->mac.perm_addr, addr, 6); 157 memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
158 else 158 else
159 ret_val = -E1000_ERR_MAC_INIT; 159 ret_val = -E1000_ERR_MAC_INIT;
160 } 160 }
@@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
314 314
315 memset(msgbuf, 0, 12); 315 memset(msgbuf, 0, 12);
316 msgbuf[0] = E1000_VF_SET_MAC_ADDR; 316 msgbuf[0] = E1000_VF_SET_MAC_ADDR;
317 memcpy(msg_addr, addr, 6); 317 memcpy(msg_addr, addr, ETH_ALEN);
318 ret_val = mbx->ops.write_posted(hw, msgbuf, 3); 318 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
319 319
320 if (!ret_val) 320 if (!ret_val)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 4d2ae97ff1b3..2224cc2edf13 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -187,21 +187,21 @@ enum ixgb_state_t {
187}; 187};
188 188
189/* Exported from other modules */ 189/* Exported from other modules */
190extern void ixgb_check_options(struct ixgb_adapter *adapter); 190void ixgb_check_options(struct ixgb_adapter *adapter);
191extern void ixgb_set_ethtool_ops(struct net_device *netdev); 191void ixgb_set_ethtool_ops(struct net_device *netdev);
192extern char ixgb_driver_name[]; 192extern char ixgb_driver_name[];
193extern const char ixgb_driver_version[]; 193extern const char ixgb_driver_version[];
194 194
195extern void ixgb_set_speed_duplex(struct net_device *netdev); 195void ixgb_set_speed_duplex(struct net_device *netdev);
196 196
197extern int ixgb_up(struct ixgb_adapter *adapter); 197int ixgb_up(struct ixgb_adapter *adapter);
198extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog); 198void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
199extern void ixgb_reset(struct ixgb_adapter *adapter); 199void ixgb_reset(struct ixgb_adapter *adapter);
200extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); 200int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
201extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); 201int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
202extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter); 202void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
203extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter); 203void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
204extern void ixgb_update_stats(struct ixgb_adapter *adapter); 204void ixgb_update_stats(struct ixgb_adapter *adapter);
205 205
206 206
207#endif /* _IXGB_H_ */ 207#endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 2a99a35c33aa..0bd5d72e1af5 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -759,27 +759,20 @@ struct ixgb_hw_stats {
759}; 759};
760 760
761/* Function Prototypes */ 761/* Function Prototypes */
762extern bool ixgb_adapter_stop(struct ixgb_hw *hw); 762bool ixgb_adapter_stop(struct ixgb_hw *hw);
763extern bool ixgb_init_hw(struct ixgb_hw *hw); 763bool ixgb_init_hw(struct ixgb_hw *hw);
764extern bool ixgb_adapter_start(struct ixgb_hw *hw); 764bool ixgb_adapter_start(struct ixgb_hw *hw);
765extern void ixgb_check_for_link(struct ixgb_hw *hw); 765void ixgb_check_for_link(struct ixgb_hw *hw);
766extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw); 766bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
767
768extern void ixgb_rar_set(struct ixgb_hw *hw,
769 u8 *addr,
770 u32 index);
771 767
768void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
772 769
773/* Filters (multicast, vlan, receive) */ 770/* Filters (multicast, vlan, receive) */
774extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, 771void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
775 u8 *mc_addr_list, 772 u32 mc_addr_count, u32 pad);
776 u32 mc_addr_count,
777 u32 pad);
778 773
779/* Vfta functions */ 774/* Vfta functions */
780extern void ixgb_write_vfta(struct ixgb_hw *hw, 775void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
781 u32 offset,
782 u32 value);
783 776
784/* Access functions to eeprom data */ 777/* Access functions to eeprom data */
785void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr); 778void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0ac6b11c6e4e..dc1588ee264a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -55,7 +55,7 @@
55#include <net/busy_poll.h> 55#include <net/busy_poll.h>
56 56
57#ifdef CONFIG_NET_RX_BUSY_POLL 57#ifdef CONFIG_NET_RX_BUSY_POLL
58#define LL_EXTENDED_STATS 58#define BP_EXTENDED_STATS
59#endif 59#endif
60/* common prefix used by pr_<> macros */ 60/* common prefix used by pr_<> macros */
61#undef pr_fmt 61#undef pr_fmt
@@ -187,11 +187,11 @@ struct ixgbe_rx_buffer {
187struct ixgbe_queue_stats { 187struct ixgbe_queue_stats {
188 u64 packets; 188 u64 packets;
189 u64 bytes; 189 u64 bytes;
190#ifdef LL_EXTENDED_STATS 190#ifdef BP_EXTENDED_STATS
191 u64 yields; 191 u64 yields;
192 u64 misses; 192 u64 misses;
193 u64 cleaned; 193 u64 cleaned;
194#endif /* LL_EXTENDED_STATS */ 194#endif /* BP_EXTENDED_STATS */
195}; 195};
196 196
197struct ixgbe_tx_queue_stats { 197struct ixgbe_tx_queue_stats {
@@ -399,7 +399,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
399 WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); 399 WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
400 q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; 400 q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
401 rc = false; 401 rc = false;
402#ifdef LL_EXTENDED_STATS 402#ifdef BP_EXTENDED_STATS
403 q_vector->tx.ring->stats.yields++; 403 q_vector->tx.ring->stats.yields++;
404#endif 404#endif
405 } else 405 } else
@@ -432,7 +432,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
432 if ((q_vector->state & IXGBE_QV_LOCKED)) { 432 if ((q_vector->state & IXGBE_QV_LOCKED)) {
433 q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; 433 q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
434 rc = false; 434 rc = false;
435#ifdef LL_EXTENDED_STATS 435#ifdef BP_EXTENDED_STATS
436 q_vector->rx.ring->stats.yields++; 436 q_vector->rx.ring->stats.yields++;
437#endif 437#endif
438 } else 438 } else
@@ -457,7 +457,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
457} 457}
458 458
459/* true if a socket is polling, even if it did not get the lock */ 459/* true if a socket is polling, even if it did not get the lock */
460static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) 460static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
461{ 461{
462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); 462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
463 return q_vector->state & IXGBE_QV_USER_PEND; 463 return q_vector->state & IXGBE_QV_USER_PEND;
@@ -487,7 +487,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
487 return false; 487 return false;
488} 488}
489 489
490static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) 490static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
491{ 491{
492 return false; 492 return false;
493} 493}
@@ -786,93 +786,89 @@ extern const char ixgbe_driver_version[];
786extern char ixgbe_default_device_descr[]; 786extern char ixgbe_default_device_descr[];
787#endif /* IXGBE_FCOE */ 787#endif /* IXGBE_FCOE */
788 788
789extern void ixgbe_up(struct ixgbe_adapter *adapter); 789void ixgbe_up(struct ixgbe_adapter *adapter);
790extern void ixgbe_down(struct ixgbe_adapter *adapter); 790void ixgbe_down(struct ixgbe_adapter *adapter);
791extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 791void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
792extern void ixgbe_reset(struct ixgbe_adapter *adapter); 792void ixgbe_reset(struct ixgbe_adapter *adapter);
793extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 793void ixgbe_set_ethtool_ops(struct net_device *netdev);
794extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); 794int ixgbe_setup_rx_resources(struct ixgbe_ring *);
795extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); 795int ixgbe_setup_tx_resources(struct ixgbe_ring *);
796extern void ixgbe_free_rx_resources(struct ixgbe_ring *); 796void ixgbe_free_rx_resources(struct ixgbe_ring *);
797extern void ixgbe_free_tx_resources(struct ixgbe_ring *); 797void ixgbe_free_tx_resources(struct ixgbe_ring *);
798extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 798void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
799extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 799void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
800extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, 800void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
801 struct ixgbe_ring *); 801void ixgbe_update_stats(struct ixgbe_adapter *adapter);
802extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 802int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
803extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 803int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
804extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
805 u16 subdevice_id); 804 u16 subdevice_id);
806extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 805void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
807extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, 806netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
808 struct ixgbe_adapter *, 807 struct ixgbe_ring *);
809 struct ixgbe_ring *); 808void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
810extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, 809 struct ixgbe_tx_buffer *);
811 struct ixgbe_tx_buffer *); 810void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
812extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); 811void ixgbe_write_eitr(struct ixgbe_q_vector *);
813extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 812int ixgbe_poll(struct napi_struct *napi, int budget);
814extern int ixgbe_poll(struct napi_struct *napi, int budget); 813int ethtool_ioctl(struct ifreq *ifr);
815extern int ethtool_ioctl(struct ifreq *ifr); 814s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
816extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 815s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
817extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); 816s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
818extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); 817s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
819extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 818 union ixgbe_atr_hash_dword input,
820 union ixgbe_atr_hash_dword input, 819 union ixgbe_atr_hash_dword common,
821 union ixgbe_atr_hash_dword common, 820 u8 queue);
822 u8 queue); 821s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
823extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 822 union ixgbe_atr_input *input_mask);
824 union ixgbe_atr_input *input_mask); 823s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
825extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 824 union ixgbe_atr_input *input,
826 union ixgbe_atr_input *input, 825 u16 soft_id, u8 queue);
827 u16 soft_id, u8 queue); 826s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
828extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 827 union ixgbe_atr_input *input,
829 union ixgbe_atr_input *input, 828 u16 soft_id);
830 u16 soft_id); 829void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
831extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 830 union ixgbe_atr_input *mask);
832 union ixgbe_atr_input *mask); 831bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
833extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); 832void ixgbe_set_rx_mode(struct net_device *netdev);
834extern void ixgbe_set_rx_mode(struct net_device *netdev);
835#ifdef CONFIG_IXGBE_DCB 833#ifdef CONFIG_IXGBE_DCB
836extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); 834void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
837#endif 835#endif
838extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); 836int ixgbe_setup_tc(struct net_device *dev, u8 tc);
839extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); 837void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
840extern void ixgbe_do_reset(struct net_device *netdev); 838void ixgbe_do_reset(struct net_device *netdev);
841#ifdef CONFIG_IXGBE_HWMON 839#ifdef CONFIG_IXGBE_HWMON
842extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); 840void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
843extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); 841int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
844#endif /* CONFIG_IXGBE_HWMON */ 842#endif /* CONFIG_IXGBE_HWMON */
845#ifdef IXGBE_FCOE 843#ifdef IXGBE_FCOE
846extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 844void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
847extern int ixgbe_fso(struct ixgbe_ring *tx_ring, 845int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
848 struct ixgbe_tx_buffer *first, 846 u8 *hdr_len);
849 u8 *hdr_len); 847int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
850extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 848 union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
851 union ixgbe_adv_rx_desc *rx_desc, 849int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
852 struct sk_buff *skb); 850 struct scatterlist *sgl, unsigned int sgc);
853extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 851int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
854 struct scatterlist *sgl, unsigned int sgc); 852 struct scatterlist *sgl, unsigned int sgc);
855extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 853int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
856 struct scatterlist *sgl, unsigned int sgc); 854int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
857extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 855void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
858extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); 856int ixgbe_fcoe_enable(struct net_device *netdev);
859extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); 857int ixgbe_fcoe_disable(struct net_device *netdev);
860extern int ixgbe_fcoe_enable(struct net_device *netdev);
861extern int ixgbe_fcoe_disable(struct net_device *netdev);
862#ifdef CONFIG_IXGBE_DCB 858#ifdef CONFIG_IXGBE_DCB
863extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); 859u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
864extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); 860u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
865#endif /* CONFIG_IXGBE_DCB */ 861#endif /* CONFIG_IXGBE_DCB */
866extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); 862int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
867extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, 863int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
868 struct netdev_fcoe_hbainfo *info); 864 struct netdev_fcoe_hbainfo *info);
869extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); 865u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
870#endif /* IXGBE_FCOE */ 866#endif /* IXGBE_FCOE */
871#ifdef CONFIG_DEBUG_FS 867#ifdef CONFIG_DEBUG_FS
872extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); 868void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
873extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); 869void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
874extern void ixgbe_dbg_init(void); 870void ixgbe_dbg_init(void);
875extern void ixgbe_dbg_exit(void); 871void ixgbe_dbg_exit(void);
876#else 872#else
877static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} 873static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
878static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} 874static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,12 +880,12 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
884 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 880 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
885} 881}
886 882
887extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter); 883void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
888extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 884void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
889extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 885void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
890extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 886void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
891extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 887void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
892 struct sk_buff *skb); 888 struct sk_buff *skb);
893static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, 889static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
894 union ixgbe_adv_rx_desc *rx_desc, 890 union ixgbe_adv_rx_desc *rx_desc,
895 struct sk_buff *skb) 891 struct sk_buff *skb)
@@ -906,11 +902,11 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
906 rx_ring->last_rx_timestamp = jiffies; 902 rx_ring->last_rx_timestamp = jiffies;
907} 903}
908 904
909extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 905int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
910 struct ifreq *ifr, int cmd); 906 int cmd);
911extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 907void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
912extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); 908void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
913extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); 909void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
914#ifdef CONFIG_PCI_IOV 910#ifdef CONFIG_PCI_IOV
915void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); 911void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
916#endif 912#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e8649abf97c0..90aac31b3551 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
442 442
443static int ixgbe_get_regs_len(struct net_device *netdev) 443static int ixgbe_get_regs_len(struct net_device *netdev)
444{ 444{
445#define IXGBE_REGS_LEN 1129 445#define IXGBE_REGS_LEN 1139
446 return IXGBE_REGS_LEN * sizeof(u32); 446 return IXGBE_REGS_LEN * sizeof(u32);
447} 447}
448 448
@@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
602 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 602 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
603 603
604 /* DCB */ 604 /* DCB */
605 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 605 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
606 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 606 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
607 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 607
608 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 608 switch (hw->mac.type) {
609 for (i = 0; i < 8; i++) 609 case ixgbe_mac_82598EB:
610 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 610 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
611 for (i = 0; i < 8; i++) 611 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
612 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 612 for (i = 0; i < 8; i++)
613 for (i = 0; i < 8; i++) 613 regs_buff[833 + i] =
614 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 614 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
615 for (i = 0; i < 8; i++) 615 for (i = 0; i < 8; i++)
616 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 616 regs_buff[841 + i] =
617 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
618 for (i = 0; i < 8; i++)
619 regs_buff[849 + i] =
620 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
621 for (i = 0; i < 8; i++)
622 regs_buff[857 + i] =
623 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
624 break;
625 case ixgbe_mac_82599EB:
626 case ixgbe_mac_X540:
627 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
628 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
629 for (i = 0; i < 8; i++)
630 regs_buff[833 + i] =
631 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
632 for (i = 0; i < 8; i++)
633 regs_buff[841 + i] =
634 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
635 for (i = 0; i < 8; i++)
636 regs_buff[849 + i] =
637 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
638 for (i = 0; i < 8; i++)
639 regs_buff[857 + i] =
640 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
641 break;
642 default:
643 break;
644 }
645
617 for (i = 0; i < 8; i++) 646 for (i = 0; i < 8; i++)
618 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); 647 regs_buff[865 + i] =
648 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
619 for (i = 0; i < 8; i++) 649 for (i = 0; i < 8; i++)
620 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); 650 regs_buff[873 + i] =
651 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
621 652
622 /* Statistics */ 653 /* Statistics */
623 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 654 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
757 788
758 /* 82599 X540 specific registers */ 789 /* 82599 X540 specific registers */
759 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); 790 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
791
792 /* 82599 X540 specific DCB registers */
793 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
794 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
795 for (i = 0; i < 4; i++)
796 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
797 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
798 /* same as RTTQCNRM */
799 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
800 /* same as RTTQCNRR */
801
802 /* X540 specific DCB registers */
803 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
804 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
760} 805}
761 806
762static int ixgbe_get_eeprom_len(struct net_device *netdev) 807static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1072 data[i] = 0; 1117 data[i] = 0;
1073 data[i+1] = 0; 1118 data[i+1] = 0;
1074 i += 2; 1119 i += 2;
1075#ifdef LL_EXTENDED_STATS 1120#ifdef BP_EXTENDED_STATS
1076 data[i] = 0; 1121 data[i] = 0;
1077 data[i+1] = 0; 1122 data[i+1] = 0;
1078 data[i+2] = 0; 1123 data[i+2] = 0;
@@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1087 data[i+1] = ring->stats.bytes; 1132 data[i+1] = ring->stats.bytes;
1088 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1133 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1089 i += 2; 1134 i += 2;
1090#ifdef LL_EXTENDED_STATS 1135#ifdef BP_EXTENDED_STATS
1091 data[i] = ring->stats.yields; 1136 data[i] = ring->stats.yields;
1092 data[i+1] = ring->stats.misses; 1137 data[i+1] = ring->stats.misses;
1093 data[i+2] = ring->stats.cleaned; 1138 data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1100 data[i] = 0; 1145 data[i] = 0;
1101 data[i+1] = 0; 1146 data[i+1] = 0;
1102 i += 2; 1147 i += 2;
1103#ifdef LL_EXTENDED_STATS 1148#ifdef BP_EXTENDED_STATS
1104 data[i] = 0; 1149 data[i] = 0;
1105 data[i+1] = 0; 1150 data[i+1] = 0;
1106 data[i+2] = 0; 1151 data[i+2] = 0;
@@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1115 data[i+1] = ring->stats.bytes; 1160 data[i+1] = ring->stats.bytes;
1116 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1161 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1117 i += 2; 1162 i += 2;
1118#ifdef LL_EXTENDED_STATS 1163#ifdef BP_EXTENDED_STATS
1119 data[i] = ring->stats.yields; 1164 data[i] = ring->stats.yields;
1120 data[i+1] = ring->stats.misses; 1165 data[i+1] = ring->stats.misses;
1121 data[i+2] = ring->stats.cleaned; 1166 data[i+2] = ring->stats.cleaned;
@@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1157 p += ETH_GSTRING_LEN; 1202 p += ETH_GSTRING_LEN;
1158 sprintf(p, "tx_queue_%u_bytes", i); 1203 sprintf(p, "tx_queue_%u_bytes", i);
1159 p += ETH_GSTRING_LEN; 1204 p += ETH_GSTRING_LEN;
1160#ifdef LL_EXTENDED_STATS 1205#ifdef BP_EXTENDED_STATS
1161 sprintf(p, "tx_queue_%u_ll_napi_yield", i); 1206 sprintf(p, "tx_queue_%u_bp_napi_yield", i);
1162 p += ETH_GSTRING_LEN; 1207 p += ETH_GSTRING_LEN;
1163 sprintf(p, "tx_queue_%u_ll_misses", i); 1208 sprintf(p, "tx_queue_%u_bp_misses", i);
1164 p += ETH_GSTRING_LEN; 1209 p += ETH_GSTRING_LEN;
1165 sprintf(p, "tx_queue_%u_ll_cleaned", i); 1210 sprintf(p, "tx_queue_%u_bp_cleaned", i);
1166 p += ETH_GSTRING_LEN; 1211 p += ETH_GSTRING_LEN;
1167#endif /* LL_EXTENDED_STATS */ 1212#endif /* BP_EXTENDED_STATS */
1168 } 1213 }
1169 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { 1214 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1170 sprintf(p, "rx_queue_%u_packets", i); 1215 sprintf(p, "rx_queue_%u_packets", i);
1171 p += ETH_GSTRING_LEN; 1216 p += ETH_GSTRING_LEN;
1172 sprintf(p, "rx_queue_%u_bytes", i); 1217 sprintf(p, "rx_queue_%u_bytes", i);
1173 p += ETH_GSTRING_LEN; 1218 p += ETH_GSTRING_LEN;
1174#ifdef LL_EXTENDED_STATS 1219#ifdef BP_EXTENDED_STATS
1175 sprintf(p, "rx_queue_%u_ll_poll_yield", i); 1220 sprintf(p, "rx_queue_%u_bp_poll_yield", i);
1176 p += ETH_GSTRING_LEN; 1221 p += ETH_GSTRING_LEN;
1177 sprintf(p, "rx_queue_%u_ll_misses", i); 1222 sprintf(p, "rx_queue_%u_bp_misses", i);
1178 p += ETH_GSTRING_LEN; 1223 p += ETH_GSTRING_LEN;
1179 sprintf(p, "rx_queue_%u_ll_cleaned", i); 1224 sprintf(p, "rx_queue_%u_bp_cleaned", i);
1180 p += ETH_GSTRING_LEN; 1225 p += ETH_GSTRING_LEN;
1181#endif /* LL_EXTENDED_STATS */ 1226#endif /* BP_EXTENDED_STATS */
1182 } 1227 }
1183 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1228 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1184 sprintf(p, "tx_pb_%u_pxon", i); 1229 sprintf(p, "tx_pb_%u_pxon", i);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0ade0cd5ef53..43b777aad288 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1585,7 +1585,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1585{ 1585{
1586 struct ixgbe_adapter *adapter = q_vector->adapter; 1586 struct ixgbe_adapter *adapter = q_vector->adapter;
1587 1587
1588 if (ixgbe_qv_ll_polling(q_vector)) 1588 if (ixgbe_qv_busy_polling(q_vector))
1589 netif_receive_skb(skb); 1589 netif_receive_skb(skb);
1590 else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 1590 else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1591 napi_gro_receive(&q_vector->napi, skb); 1591 napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2097,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
2097 2097
2098 ixgbe_for_each_ring(ring, q_vector->rx) { 2098 ixgbe_for_each_ring(ring, q_vector->rx) {
2099 found = ixgbe_clean_rx_irq(q_vector, ring, 4); 2099 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2100#ifdef LL_EXTENDED_STATS 2100#ifdef BP_EXTENDED_STATS
2101 if (found) 2101 if (found)
2102 ring->stats.cleaned += found; 2102 ring->stats.cleaned += found;
2103 else 2103 else
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 24af12e3719e..aae900a256da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -57,28 +57,28 @@
57#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 57#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
58 58
59/* Bitmasks */ 59/* Bitmasks */
60#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 60#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
61#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 61#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
62#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 62#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
63#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 63#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
64#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 64#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
65#define IXGBE_SFF_1GBASET_CAPABLE 0x8 65#define IXGBE_SFF_1GBASET_CAPABLE 0x8
66#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 66#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
67#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 67#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
68#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 68#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
69#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 69#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
70#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 70#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
71#define IXGBE_SFF_ADDRESSING_MODE 0x4 71#define IXGBE_SFF_ADDRESSING_MODE 0x4
72#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 72#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
73#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 73#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
74#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 74#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
75#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 75#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
76#define IXGBE_I2C_EEPROM_READ_MASK 0x100 76#define IXGBE_I2C_EEPROM_READ_MASK 0x100
77#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 77#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
78#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 78#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
79#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 79#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
80#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 80#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
81#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 81#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
82 82
83/* Flow control defines */ 83/* Flow control defines */
84#define IXGBE_TAF_SYM_PAUSE 0x400 84#define IXGBE_TAF_SYM_PAUSE 0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 276d7b135332..1fe7cb0142e1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -558,7 +558,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
558 struct ixgbe_hw *hw = &adapter->hw; 558 struct ixgbe_hw *hw = &adapter->hw;
559 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 559 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
560 560
561 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); 561 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
562 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); 562 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
563 563
564 return 0; 564 return 0;
@@ -621,16 +621,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
621 621
622int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 622int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
623{ 623{
624 unsigned char vf_mac_addr[6];
625 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 624 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
626 unsigned int vfn = (event_mask & 0x3f); 625 unsigned int vfn = (event_mask & 0x3f);
627 626
628 bool enable = ((event_mask & 0x10000000U) != 0); 627 bool enable = ((event_mask & 0x10000000U) != 0);
629 628
630 if (enable) { 629 if (enable)
631 eth_zero_addr(vf_mac_addr); 630 eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
632 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
633 }
634 631
635 return 0; 632 return 0;
636} 633}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 10775cb9b6d8..7c19e969576f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data {
561#define IXGBE_RTTDQSEL 0x04904 561#define IXGBE_RTTDQSEL 0x04904
562#define IXGBE_RTTDT1C 0x04908 562#define IXGBE_RTTDT1C 0x04908
563#define IXGBE_RTTDT1S 0x0490C 563#define IXGBE_RTTDT1S 0x0490C
564#define IXGBE_RTTQCNCR 0x08B00
565#define IXGBE_RTTQCNTG 0x04A90
566#define IXGBE_RTTBCNRD 0x0498C
567#define IXGBE_RTTQCNRR 0x0498C
564#define IXGBE_RTTDTECC 0x04990 568#define IXGBE_RTTDTECC 0x04990
565#define IXGBE_RTTDTECC_NO_BCN 0x00000100 569#define IXGBE_RTTDTECC_NO_BCN 0x00000100
566#define IXGBE_RTTBCNRC 0x04984 570#define IXGBE_RTTBCNRC 0x04984
@@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data {
570#define IXGBE_RTTBCNRC_RF_INT_MASK \ 574#define IXGBE_RTTBCNRC_RF_INT_MASK \
571 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) 575 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
572#define IXGBE_RTTBCNRM 0x04980 576#define IXGBE_RTTBCNRM 0x04980
577#define IXGBE_RTTQCNRM 0x04980
573 578
574/* FCoE DMA Context Registers */ 579/* FCoE DMA Context Registers */
575#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ 580#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 389324f5929a..24b80a6cfca4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -32,12 +32,12 @@
32#include "ixgbe.h" 32#include "ixgbe.h"
33#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
34 34
35#define IXGBE_X540_MAX_TX_QUEUES 128 35#define IXGBE_X540_MAX_TX_QUEUES 128
36#define IXGBE_X540_MAX_RX_QUEUES 128 36#define IXGBE_X540_MAX_RX_QUEUES 128
37#define IXGBE_X540_RAR_ENTRIES 128 37#define IXGBE_X540_RAR_ENTRIES 128
38#define IXGBE_X540_MC_TBL_SIZE 128 38#define IXGBE_X540_MC_TBL_SIZE 128
39#define IXGBE_X540_VFT_TBL_SIZE 128 39#define IXGBE_X540_VFT_TBL_SIZE 128
40#define IXGBE_X540_RX_PB_SIZE 384 40#define IXGBE_X540_RX_PB_SIZE 384
41 41
42static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); 42static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
43static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); 43static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c9d0c12d6f04..84329b0d567a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -140,58 +140,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
140 140
141#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) 141#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
142 142
143static char *ixgbevf_reg_names[] = {
144 "IXGBE_VFCTRL",
145 "IXGBE_VFSTATUS",
146 "IXGBE_VFLINKS",
147 "IXGBE_VFRXMEMWRAP",
148 "IXGBE_VFFRTIMER",
149 "IXGBE_VTEICR",
150 "IXGBE_VTEICS",
151 "IXGBE_VTEIMS",
152 "IXGBE_VTEIMC",
153 "IXGBE_VTEIAC",
154 "IXGBE_VTEIAM",
155 "IXGBE_VTEITR",
156 "IXGBE_VTIVAR",
157 "IXGBE_VTIVAR_MISC",
158 "IXGBE_VFRDBAL0",
159 "IXGBE_VFRDBAL1",
160 "IXGBE_VFRDBAH0",
161 "IXGBE_VFRDBAH1",
162 "IXGBE_VFRDLEN0",
163 "IXGBE_VFRDLEN1",
164 "IXGBE_VFRDH0",
165 "IXGBE_VFRDH1",
166 "IXGBE_VFRDT0",
167 "IXGBE_VFRDT1",
168 "IXGBE_VFRXDCTL0",
169 "IXGBE_VFRXDCTL1",
170 "IXGBE_VFSRRCTL0",
171 "IXGBE_VFSRRCTL1",
172 "IXGBE_VFPSRTYPE",
173 "IXGBE_VFTDBAL0",
174 "IXGBE_VFTDBAL1",
175 "IXGBE_VFTDBAH0",
176 "IXGBE_VFTDBAH1",
177 "IXGBE_VFTDLEN0",
178 "IXGBE_VFTDLEN1",
179 "IXGBE_VFTDH0",
180 "IXGBE_VFTDH1",
181 "IXGBE_VFTDT0",
182 "IXGBE_VFTDT1",
183 "IXGBE_VFTXDCTL0",
184 "IXGBE_VFTXDCTL1",
185 "IXGBE_VFTDWBAL0",
186 "IXGBE_VFTDWBAL1",
187 "IXGBE_VFTDWBAH0",
188 "IXGBE_VFTDWBAH1"
189};
190
191
192static int ixgbevf_get_regs_len(struct net_device *netdev) 143static int ixgbevf_get_regs_len(struct net_device *netdev)
193{ 144{
194 return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32); 145#define IXGBE_REGS_LEN 45
146 return IXGBE_REGS_LEN * sizeof(u32);
195} 147}
196 148
197static void ixgbevf_get_regs(struct net_device *netdev, 149static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +216,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
264 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); 216 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
265 for (i = 0; i < 2; i++) 217 for (i = 0; i < 2; i++)
266 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); 218 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
267
268 for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
269 hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
270} 219}
271 220
272static void ixgbevf_get_drvinfo(struct net_device *netdev, 221static void ixgbevf_get_drvinfo(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fff0d9867529..64a2b912e73c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -281,27 +281,23 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
281extern const char ixgbevf_driver_name[]; 281extern const char ixgbevf_driver_name[];
282extern const char ixgbevf_driver_version[]; 282extern const char ixgbevf_driver_version[];
283 283
284extern void ixgbevf_up(struct ixgbevf_adapter *adapter); 284void ixgbevf_up(struct ixgbevf_adapter *adapter);
285extern void ixgbevf_down(struct ixgbevf_adapter *adapter); 285void ixgbevf_down(struct ixgbevf_adapter *adapter);
286extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); 286void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
287extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); 287void ixgbevf_reset(struct ixgbevf_adapter *adapter);
288extern void ixgbevf_set_ethtool_ops(struct net_device *netdev); 288void ixgbevf_set_ethtool_ops(struct net_device *netdev);
289extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, 289int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
290 struct ixgbevf_ring *); 290int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
291extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, 291void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
292 struct ixgbevf_ring *); 292void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
293extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, 293void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
294 struct ixgbevf_ring *); 294int ethtool_ioctl(struct ifreq *ifr);
295extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, 295
296 struct ixgbevf_ring *); 296void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
297extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); 297void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
298extern int ethtool_ioctl(struct ifreq *ifr);
299
300extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
301extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
302 298
303#ifdef DEBUG 299#ifdef DEBUG
304extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); 300char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
305#define hw_dbg(hw, format, arg...) \ 301#define hw_dbg(hw, format, arg...) \
306 printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) 302 printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
307#else 303#else
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59a62bbfb371..ce27d62f9c8e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -756,37 +756,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
756static irqreturn_t ixgbevf_msix_other(int irq, void *data) 756static irqreturn_t ixgbevf_msix_other(int irq, void *data)
757{ 757{
758 struct ixgbevf_adapter *adapter = data; 758 struct ixgbevf_adapter *adapter = data;
759 struct pci_dev *pdev = adapter->pdev;
760 struct ixgbe_hw *hw = &adapter->hw; 759 struct ixgbe_hw *hw = &adapter->hw;
761 u32 msg;
762 bool got_ack = false;
763 760
764 hw->mac.get_link_status = 1; 761 hw->mac.get_link_status = 1;
765 if (!hw->mbx.ops.check_for_ack(hw))
766 got_ack = true;
767
768 if (!hw->mbx.ops.check_for_msg(hw)) {
769 hw->mbx.ops.read(hw, &msg, 1);
770
771 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
772 mod_timer(&adapter->watchdog_timer,
773 round_jiffies(jiffies + 1));
774 adapter->link_up = false;
775 }
776 762
777 if (msg & IXGBE_VT_MSGTYPE_NACK) 763 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
778 dev_info(&pdev->dev, 764 mod_timer(&adapter->watchdog_timer, jiffies);
779 "Last Request of type %2.2x to PF Nacked\n",
780 msg & 0xFF);
781 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
782 }
783
784 /* checking for the ack clears the PFACK bit. Place
785 * it back in the v2p_mailbox cache so that anyone
786 * polling for an ack will not miss it
787 */
788 if (got_ack)
789 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
790 765
791 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 766 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
792 767
@@ -1327,27 +1302,51 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1327 } 1302 }
1328} 1303}
1329 1304
1330#define IXGBE_MAX_RX_DESC_POLL 10 1305#define IXGBEVF_MAX_RX_DESC_POLL 10
1331static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1306static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1332 int rxr) 1307 int rxr)
1333{ 1308{
1334 struct ixgbe_hw *hw = &adapter->hw; 1309 struct ixgbe_hw *hw = &adapter->hw;
1310 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1311 u32 rxdctl;
1335 int j = adapter->rx_ring[rxr].reg_idx; 1312 int j = adapter->rx_ring[rxr].reg_idx;
1336 int k;
1337 1313
1338 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1314 do {
1339 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1315 usleep_range(1000, 2000);
1340 break; 1316 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1341 else 1317 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1342 msleep(1); 1318
1343 } 1319 if (!wait_loop)
1344 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1320 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1345 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1321 rxr);
1346 "not set within the polling period\n", rxr); 1322
1347 } 1323 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1324 (adapter->rx_ring[rxr].count - 1));
1325}
1326
1327static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1328 struct ixgbevf_ring *ring)
1329{
1330 struct ixgbe_hw *hw = &adapter->hw;
1331 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1332 u32 rxdctl;
1333 u8 reg_idx = ring->reg_idx;
1348 1334
1349 ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], 1335 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1350 adapter->rx_ring[rxr].count - 1); 1336 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1337
1338 /* write value back with RXDCTL.ENABLE bit cleared */
1339 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1340
1341 /* the hardware may take up to 100us to really disable the rx queue */
1342 do {
1343 udelay(10);
1344 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1345 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1346
1347 if (!wait_loop)
1348 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1349 reg_idx);
1351} 1350}
1352 1351
1353static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1352static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1545,8 +1544,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
1545{ 1544{
1546 struct ixgbe_hw *hw = &adapter->hw; 1545 struct ixgbe_hw *hw = &adapter->hw;
1547 1546
1548 ixgbevf_negotiate_api(adapter);
1549
1550 ixgbevf_reset_queues(adapter); 1547 ixgbevf_reset_queues(adapter);
1551 1548
1552 ixgbevf_configure(adapter); 1549 ixgbevf_configure(adapter);
@@ -1679,7 +1676,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1679 1676
1680 /* signal that we are down to the interrupt handler */ 1677 /* signal that we are down to the interrupt handler */
1681 set_bit(__IXGBEVF_DOWN, &adapter->state); 1678 set_bit(__IXGBEVF_DOWN, &adapter->state);
1682 /* disable receives */ 1679
1680 /* disable all enabled rx queues */
1681 for (i = 0; i < adapter->num_rx_queues; i++)
1682 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
1683 1683
1684 netif_tx_disable(netdev); 1684 netif_tx_disable(netdev);
1685 1685
@@ -1733,10 +1733,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1733 struct ixgbe_hw *hw = &adapter->hw; 1733 struct ixgbe_hw *hw = &adapter->hw;
1734 struct net_device *netdev = adapter->netdev; 1734 struct net_device *netdev = adapter->netdev;
1735 1735
1736 if (hw->mac.ops.reset_hw(hw)) 1736 if (hw->mac.ops.reset_hw(hw)) {
1737 hw_dbg(hw, "PF still resetting\n"); 1737 hw_dbg(hw, "PF still resetting\n");
1738 else 1738 } else {
1739 hw->mac.ops.init_hw(hw); 1739 hw->mac.ops.init_hw(hw);
1740 ixgbevf_negotiate_api(adapter);
1741 }
1740 1742
1741 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1743 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1742 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1744 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -2072,6 +2074,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2072 hw->mac.max_tx_queues = 2; 2074 hw->mac.max_tx_queues = 2;
2073 hw->mac.max_rx_queues = 2; 2075 hw->mac.max_rx_queues = 2;
2074 2076
2077 /* lock to protect mailbox accesses */
2078 spin_lock_init(&adapter->mbx_lock);
2079
2075 err = hw->mac.ops.reset_hw(hw); 2080 err = hw->mac.ops.reset_hw(hw);
2076 if (err) { 2081 if (err) {
2077 dev_info(&pdev->dev, 2082 dev_info(&pdev->dev,
@@ -2082,6 +2087,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2082 pr_err("init_shared_code failed: %d\n", err); 2087 pr_err("init_shared_code failed: %d\n", err);
2083 goto out; 2088 goto out;
2084 } 2089 }
2090 ixgbevf_negotiate_api(adapter);
2085 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2091 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2086 if (err) 2092 if (err)
2087 dev_info(&pdev->dev, "Error reading MAC address\n"); 2093 dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2103,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2097 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); 2103 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2098 } 2104 }
2099 2105
2100 /* lock to protect mailbox accesses */
2101 spin_lock_init(&adapter->mbx_lock);
2102
2103 /* Enable dynamic interrupt throttling rates */ 2106 /* Enable dynamic interrupt throttling rates */
2104 adapter->rx_itr_setting = 1; 2107 adapter->rx_itr_setting = 1;
2105 adapter->tx_itr_setting = 1; 2108 adapter->tx_itr_setting = 1;
@@ -2620,8 +2623,6 @@ static int ixgbevf_open(struct net_device *netdev)
2620 } 2623 }
2621 } 2624 }
2622 2625
2623 ixgbevf_negotiate_api(adapter);
2624
2625 /* setup queue reg_idx and Rx queue count */ 2626 /* setup queue reg_idx and Rx queue count */
2626 err = ixgbevf_setup_queues(adapter); 2627 err = ixgbevf_setup_queues(adapter);
2627 if (err) 2628 if (err)
@@ -3216,6 +3217,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
3216 } 3217 }
3217 pci_set_master(pdev); 3218 pci_set_master(pdev);
3218 3219
3220 ixgbevf_reset(adapter);
3221
3219 rtnl_lock(); 3222 rtnl_lock();
3220 err = ixgbevf_init_interrupt_scheme(adapter); 3223 err = ixgbevf_init_interrupt_scheme(adapter);
3221 rtnl_unlock(); 3224 rtnl_unlock();
@@ -3224,8 +3227,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
3224 return err; 3227 return err;
3225 } 3228 }
3226 3229
3227 ixgbevf_reset(adapter);
3228
3229 if (netif_running(netdev)) { 3230 if (netif_running(netdev)) {
3230 err = ixgbevf_open(netdev); 3231 err = ixgbevf_open(netdev);
3231 if (err) 3232 if (err)
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 387b52635bc0..4d44d64ae387 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
242 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; 242 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
243 msgbuf[0] |= IXGBE_VF_SET_MACVLAN; 243 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
244 if (addr) 244 if (addr)
245 memcpy(msg_addr, addr, 6); 245 memcpy(msg_addr, addr, ETH_ALEN);
246 ret_val = mbx->ops.write_posted(hw, msgbuf, 3); 246 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
247 247
248 if (!ret_val) 248 if (!ret_val)
@@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
275 275
276 memset(msgbuf, 0, sizeof(msgbuf)); 276 memset(msgbuf, 0, sizeof(msgbuf));
277 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; 277 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
278 memcpy(msg_addr, addr, 6); 278 memcpy(msg_addr, addr, ETH_ALEN);
279 ret_val = mbx->ops.write_posted(hw, msgbuf, 3); 279 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
280 280
281 if (!ret_val) 281 if (!ret_val)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 23de82a9da82..f5685c0d0579 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -309,7 +309,7 @@ static void
309jme_load_macaddr(struct net_device *netdev) 309jme_load_macaddr(struct net_device *netdev)
310{ 310{
311 struct jme_adapter *jme = netdev_priv(netdev); 311 struct jme_adapter *jme = netdev_priv(netdev);
312 unsigned char macaddr[6]; 312 unsigned char macaddr[ETH_ALEN];
313 u32 val; 313 u32 val;
314 314
315 spin_lock_bh(&jme->macaddr_lock); 315 spin_lock_bh(&jme->macaddr_lock);
@@ -321,7 +321,7 @@ jme_load_macaddr(struct net_device *netdev)
321 val = jread32(jme, JME_RXUMA_HI); 321 val = jread32(jme, JME_RXUMA_HI);
322 macaddr[4] = (val >> 0) & 0xFF; 322 macaddr[4] = (val >> 0) & 0xFF;
323 macaddr[5] = (val >> 8) & 0xFF; 323 macaddr[5] = (val >> 8) & 0xFF;
324 memcpy(netdev->dev_addr, macaddr, 6); 324 memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
325 spin_unlock_bh(&jme->macaddr_lock); 325 spin_unlock_bh(&jme->macaddr_lock);
326} 326}
327 327
@@ -3192,7 +3192,6 @@ jme_init_one(struct pci_dev *pdev,
3192err_out_unmap: 3192err_out_unmap:
3193 iounmap(jme->regs); 3193 iounmap(jme->regs);
3194err_out_free_netdev: 3194err_out_free_netdev:
3195 pci_set_drvdata(pdev, NULL);
3196 free_netdev(netdev); 3195 free_netdev(netdev);
3197err_out_release_regions: 3196err_out_release_regions:
3198 pci_release_regions(pdev); 3197 pci_release_regions(pdev);
@@ -3210,7 +3209,6 @@ jme_remove_one(struct pci_dev *pdev)
3210 3209
3211 unregister_netdev(netdev); 3210 unregister_netdev(netdev);
3212 iounmap(jme->regs); 3211 iounmap(jme->regs);
3213 pci_set_drvdata(pdev, NULL);
3214 free_netdev(netdev); 3212 free_netdev(netdev);
3215 pci_release_regions(pdev); 3213 pci_release_regions(pdev);
3216 pci_disable_device(pdev); 3214 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index a36fa80968eb..4a5e3b0f712e 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1110,7 +1110,7 @@ static int korina_probe(struct platform_device *pdev)
1110 lp = netdev_priv(dev); 1110 lp = netdev_priv(dev);
1111 1111
1112 bif->dev = dev; 1112 bif->dev = dev;
1113 memcpy(dev->dev_addr, bif->mac, 6); 1113 memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
1114 1114
1115 lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx"); 1115 lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
1116 lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx"); 1116 lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2c210ec35d59..4cfae6c9a63f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2513,7 +2513,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2513 2513
2514 mac_addr = of_get_mac_address(pnp); 2514 mac_addr = of_get_mac_address(pnp);
2515 if (mac_addr) 2515 if (mac_addr)
2516 memcpy(ppd.mac_addr, mac_addr, 6); 2516 memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
2517 2517
2518 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); 2518 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
2519 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); 2519 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2696,7 +2696,7 @@ static void set_params(struct mv643xx_eth_private *mp,
2696 struct net_device *dev = mp->dev; 2696 struct net_device *dev = mp->dev;
2697 2697
2698 if (is_valid_ether_addr(pd->mac_addr)) 2698 if (is_valid_ether_addr(pd->mac_addr))
2699 memcpy(dev->dev_addr, pd->mac_addr, 6); 2699 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2700 else 2700 else
2701 uc_addr_get(mp, dev->dev_addr); 2701 uc_addr_get(mp, dev->dev_addr);
2702 2702
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ecc7f7b696b8..597846193869 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4046,7 +4046,6 @@ err_out_free_regions:
4046 pci_release_regions(pdev); 4046 pci_release_regions(pdev);
4047err_out_disable_pdev: 4047err_out_disable_pdev:
4048 pci_disable_device(pdev); 4048 pci_disable_device(pdev);
4049 pci_set_drvdata(pdev, NULL);
4050err_out: 4049err_out:
4051 return err; 4050 return err;
4052} 4051}
@@ -4090,7 +4089,6 @@ static void skge_remove(struct pci_dev *pdev)
4090 4089
4091 iounmap(hw->regs); 4090 iounmap(hw->regs);
4092 kfree(hw); 4091 kfree(hw);
4093 pci_set_drvdata(pdev, NULL);
4094} 4092}
4095 4093
4096#ifdef CONFIG_PM_SLEEP 4094#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e09a8c6f8536..a7df981d2123 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5081,7 +5081,6 @@ err_out_free_regions:
5081err_out_disable: 5081err_out_disable:
5082 pci_disable_device(pdev); 5082 pci_disable_device(pdev);
5083err_out: 5083err_out:
5084 pci_set_drvdata(pdev, NULL);
5085 return err; 5084 return err;
5086} 5085}
5087 5086
@@ -5124,8 +5123,6 @@ static void sky2_remove(struct pci_dev *pdev)
5124 5123
5125 iounmap(hw->regs); 5124 iounmap(hw->regs);
5126 kfree(hw); 5125 kfree(hw);
5127
5128 pci_set_drvdata(pdev, NULL);
5129} 5126}
5130 5127
5131static int sky2_suspend(struct device *dev) 5128static int sky2_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index ea20182c6969..735765c21c95 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2253,7 +2253,6 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2253int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) 2253int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2254{ 2254{
2255 struct mlx4_priv *priv = mlx4_priv(dev); 2255 struct mlx4_priv *priv = mlx4_priv(dev);
2256 struct mlx4_vport_oper_state *vf_oper;
2257 struct mlx4_vport_state *vf_admin; 2256 struct mlx4_vport_state *vf_admin;
2258 int slave; 2257 int slave;
2259 2258
@@ -2269,7 +2268,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2269 return -EINVAL; 2268 return -EINVAL;
2270 2269
2271 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 2270 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2272 vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2273 2271
2274 if ((0 == vlan) && (0 == qos)) 2272 if ((0 == vlan) && (0 == qos))
2275 vf_admin->default_vlan = MLX4_VGT; 2273 vf_admin->default_vlan = MLX4_VGT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fa37b7a61213..85d91665d400 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1733,7 +1733,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1733 1733
1734 /* Unregister Mac address for the port */ 1734 /* Unregister Mac address for the port */
1735 mlx4_en_put_qp(priv); 1735 mlx4_en_put_qp(priv);
1736 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) 1736 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1737 mdev->mac_removed[priv->port] = 1; 1737 mdev->mac_removed[priv->port] = 1;
1738 1738
1739 /* Free RX Rings */ 1739 /* Free RX Rings */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 0d63daa2f422..c151e7a6710a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -652,7 +652,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
652 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 652 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
653 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 653 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
654 if (field & 1<<6) 654 if (field & 1<<6)
655 dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN; 655 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
656 MLX4_GET(dev_cap->max_icm_sz, outbox, 656 MLX4_GET(dev_cap->max_icm_sz, outbox,
657 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 657 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
658 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 658 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1713,7 +1713,6 @@ void mlx4_opreq_action(struct work_struct *work)
1713 u32 *outbox; 1713 u32 *outbox;
1714 u32 modifier; 1714 u32 modifier;
1715 u16 token; 1715 u16 token;
1716 u16 type_m;
1717 u16 type; 1716 u16 type;
1718 int err; 1717 int err;
1719 u32 num_qps; 1718 u32 num_qps;
@@ -1746,7 +1745,6 @@ void mlx4_opreq_action(struct work_struct *work)
1746 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 1745 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1747 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); 1746 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1748 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); 1747 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1749 type_m = type >> 12;
1750 type &= 0xfff; 1748 type &= 0xfff;
1751 1749
1752 switch (type) { 1750 switch (type) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 60c9f4f103fc..179d26709c94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
42#include <linux/io-mapping.h> 42#include <linux/io-mapping.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/netdevice.h> 44#include <linux/netdevice.h>
45#include <linux/kmod.h>
45 46
46#include <linux/mlx4/device.h> 47#include <linux/mlx4/device.h>
47#include <linux/mlx4/doorbell.h> 48#include <linux/mlx4/doorbell.h>
@@ -650,6 +651,27 @@ err_mem:
650 return err; 651 return err;
651} 652}
652 653
654static void mlx4_request_modules(struct mlx4_dev *dev)
655{
656 int port;
657 int has_ib_port = false;
658 int has_eth_port = false;
659#define EN_DRV_NAME "mlx4_en"
660#define IB_DRV_NAME "mlx4_ib"
661
662 for (port = 1; port <= dev->caps.num_ports; port++) {
663 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
664 has_ib_port = true;
665 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
666 has_eth_port = true;
667 }
668
669 if (has_ib_port)
670 request_module_nowait(IB_DRV_NAME);
671 if (has_eth_port)
672 request_module_nowait(EN_DRV_NAME);
673}
674
653/* 675/*
654 * Change the port configuration of the device. 676 * Change the port configuration of the device.
655 * Every user of this function must hold the port mutex. 677 * Every user of this function must hold the port mutex.
@@ -681,6 +703,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
681 } 703 }
682 mlx4_set_port_mask(dev); 704 mlx4_set_port_mask(dev);
683 err = mlx4_register_device(dev); 705 err = mlx4_register_device(dev);
706 if (err) {
707 mlx4_err(dev, "Failed to register device\n");
708 goto out;
709 }
710 mlx4_request_modules(dev);
684 } 711 }
685 712
686out: 713out:
@@ -2305,6 +2332,8 @@ slave_start:
2305 if (err) 2332 if (err)
2306 goto err_port; 2333 goto err_port;
2307 2334
2335 mlx4_request_modules(dev);
2336
2308 mlx4_sense_init(dev); 2337 mlx4_sense_init(dev);
2309 mlx4_start_sense(dev); 2338 mlx4_start_sense(dev);
2310 2339
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 55f6245efb6c..70f0213d68c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -645,7 +645,7 @@ static const u8 __promisc_mode[] = {
645int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 645int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
646 enum mlx4_net_trans_promisc_mode flow_type) 646 enum mlx4_net_trans_promisc_mode flow_type)
647{ 647{
648 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { 648 if (flow_type >= MLX4_FS_MODE_NUM) {
649 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 649 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
650 return -EINVAL; 650 return -EINVAL;
651 } 651 }
@@ -681,7 +681,7 @@ const u16 __sw_id_hw[] = {
681int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, 681int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
682 enum mlx4_net_trans_rule_id id) 682 enum mlx4_net_trans_rule_id id)
683{ 683{
684 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 684 if (id >= MLX4_NET_TRANS_RULE_NUM) {
685 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 685 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
686 return -EINVAL; 686 return -EINVAL;
687 } 687 }
@@ -706,7 +706,7 @@ static const int __rule_hw_sz[] = {
706int mlx4_hw_rule_sz(struct mlx4_dev *dev, 706int mlx4_hw_rule_sz(struct mlx4_dev *dev,
707 enum mlx4_net_trans_rule_id id) 707 enum mlx4_net_trans_rule_id id)
708{ 708{
709 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 709 if (id >= MLX4_NET_TRANS_RULE_NUM) {
710 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 710 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
711 return -EINVAL; 711 return -EINVAL;
712 } 712 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 79fd269e2c54..9e08e35ce351 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35 35
36#include <linux/mlx4/cmd.h> 36#include <linux/mlx4/cmd.h>
37#include <linux/mlx4/srq.h>
37#include <linux/export.h> 38#include <linux/export.h>
38#include <linux/gfp.h> 39#include <linux/gfp.h>
39 40
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 075f4e21d33d..c83d16dc7cd5 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1248,7 +1248,7 @@ static void ks_set_mac(struct ks_net *ks, u8 *data)
1248 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); 1248 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1249 ks_wrreg16(ks, KS_MARL, w); 1249 ks_wrreg16(ks, KS_MARL, w);
1250 1250
1251 memcpy(ks->mac_addr, data, 6); 1251 memcpy(ks->mac_addr, data, ETH_ALEN);
1252 1252
1253 if (ks->enabled) 1253 if (ks->enabled)
1254 ks_start_rx(ks); 1254 ks_start_rx(ks);
@@ -1651,7 +1651,7 @@ static int ks8851_probe(struct platform_device *pdev)
1651 } 1651 }
1652 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr); 1652 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1653 1653
1654 memcpy(netdev->dev_addr, ks->mac_addr, 6); 1654 memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1655 1655
1656 ks_set_mac(ks, netdev->dev_addr); 1656 ks_set_mac(ks, netdev->dev_addr);
1657 1657
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 8ebc352bcbe6..ddd252a3da9c 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7150,8 +7150,6 @@ static void pcidev_exit(struct pci_dev *pdev)
7150 struct platform_info *info = pci_get_drvdata(pdev); 7150 struct platform_info *info = pci_get_drvdata(pdev);
7151 struct dev_info *hw_priv = &info->dev_info; 7151 struct dev_info *hw_priv = &info->dev_info;
7152 7152
7153 pci_set_drvdata(pdev, NULL);
7154
7155 release_mem_region(pci_resource_start(pdev, 0), 7153 release_mem_region(pci_resource_start(pdev, 0),
7156 pci_resource_len(pdev, 0)); 7154 pci_resource_len(pdev, 0));
7157 for (i = 0; i < hw_priv->hw.dev_count; i++) { 7155 for (i = 0; i < hw_priv->hw.dev_count; i++) {
@@ -7227,7 +7225,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
7227 7225
7228static char pcidev_name[] = "ksz884xp"; 7226static char pcidev_name[] = "ksz884xp";
7229 7227
7230static struct pci_device_id pcidev_table[] = { 7228static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = {
7231 { PCI_VENDOR_ID_MICREL_KS, 0x8841, 7229 { PCI_VENDOR_ID_MICREL_KS, 0x8841,
7232 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 7230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
7233 { PCI_VENDOR_ID_MICREL_KS, 0x8842, 7231 { PCI_VENDOR_ID_MICREL_KS, 0x8842,
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index ea54d95e5b9f..cbd013379252 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -26,7 +26,6 @@
26#include <linux/of_irq.h> 26#include <linux/of_irq.h>
27#include <linux/crc32.h> 27#include <linux/crc32.h>
28#include <linux/crc32c.h> 28#include <linux/crc32c.h>
29#include <linux/dma-mapping.h>
30 29
31#include "moxart_ether.h" 30#include "moxart_ether.h"
32 31
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 149355b52ad0..68026f7e8ba3 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -934,7 +934,7 @@ static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
934 934
935static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) 935static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
936{ 936{
937 int rc = true; 937 bool rc = true;
938 spin_lock(&ss->lock); 938 spin_lock(&ss->lock);
939 if ((ss->state & SLICE_LOCKED)) { 939 if ((ss->state & SLICE_LOCKED)) {
940 WARN_ON((ss->state & SLICE_STATE_NAPI)); 940 WARN_ON((ss->state & SLICE_STATE_NAPI));
@@ -957,7 +957,7 @@ static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
957 957
958static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) 958static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
959{ 959{
960 int rc = true; 960 bool rc = true;
961 spin_lock_bh(&ss->lock); 961 spin_lock_bh(&ss->lock);
962 if ((ss->state & SLICE_LOCKED)) { 962 if ((ss->state & SLICE_LOCKED)) {
963 ss->state |= SLICE_STATE_POLL_YIELD; 963 ss->state |= SLICE_STATE_POLL_YIELD;
@@ -3164,7 +3164,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3164 3164
3165 /* Walk the multicast list, and add each address */ 3165 /* Walk the multicast list, and add each address */
3166 netdev_for_each_mc_addr(ha, dev) { 3166 netdev_for_each_mc_addr(ha, dev) {
3167 memcpy(data, &ha->addr, 6); 3167 memcpy(data, &ha->addr, ETH_ALEN);
3168 cmd.data0 = ntohl(data[0]); 3168 cmd.data0 = ntohl(data[0]);
3169 cmd.data1 = ntohl(data[1]); 3169 cmd.data1 = ntohl(data[1]);
3170 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, 3170 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3207,7 +3207,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
3207 } 3207 }
3208 3208
3209 /* change the dev structure */ 3209 /* change the dev structure */
3210 memcpy(dev->dev_addr, sa->sa_data, 6); 3210 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
3211 return 0; 3211 return 0;
3212} 3212}
3213 3213
@@ -4208,7 +4208,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
4208 set_fw_name(mgp, NULL, false); 4208 set_fw_name(mgp, NULL, false);
4209 free_netdev(netdev); 4209 free_netdev(netdev);
4210 pci_disable_device(pdev); 4210 pci_disable_device(pdev);
4211 pci_set_drvdata(pdev, NULL);
4212} 4211}
4213 4212
4214#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 4213#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 7a5e295588b0..64ec2a437f46 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -970,7 +970,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
970 970
971 err_ioremap: 971 err_ioremap:
972 pci_release_regions(pdev); 972 pci_release_regions(pdev);
973 pci_set_drvdata(pdev, NULL);
974 973
975 err_pci_request_regions: 974 err_pci_request_regions:
976 free_netdev(dev); 975 free_netdev(dev);
@@ -3220,7 +3219,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
3220 pci_release_regions (pdev); 3219 pci_release_regions (pdev);
3221 iounmap(ioaddr); 3220 iounmap(ioaddr);
3222 free_netdev (dev); 3221 free_netdev (dev);
3223 pci_set_drvdata(pdev, NULL);
3224} 3222}
3225 3223
3226#ifdef CONFIG_PM 3224#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 51b00941302c..9eeddbd0b2c7 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8185,7 +8185,6 @@ mem_alloc_failed:
8185 free_shared_mem(sp); 8185 free_shared_mem(sp);
8186 pci_disable_device(pdev); 8186 pci_disable_device(pdev);
8187 pci_release_regions(pdev); 8187 pci_release_regions(pdev);
8188 pci_set_drvdata(pdev, NULL);
8189 free_netdev(dev); 8188 free_netdev(dev);
8190 8189
8191 return ret; 8190 return ret;
@@ -8221,7 +8220,6 @@ static void s2io_rem_nic(struct pci_dev *pdev)
8221 iounmap(sp->bar0); 8220 iounmap(sp->bar0);
8222 iounmap(sp->bar1); 8221 iounmap(sp->bar1);
8223 pci_release_regions(pdev); 8222 pci_release_regions(pdev);
8224 pci_set_drvdata(pdev, NULL);
8225 free_netdev(dev); 8223 free_netdev(dev);
8226 pci_disable_device(pdev); 8224 pci_disable_device(pdev);
8227} 8225}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 5a20eaf903dd..8614eeb7de81 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -4739,7 +4739,6 @@ _exit6:
4739_exit5: 4739_exit5:
4740 vxge_device_unregister(hldev); 4740 vxge_device_unregister(hldev);
4741_exit4: 4741_exit4:
4742 pci_set_drvdata(pdev, NULL);
4743 vxge_hw_device_terminate(hldev); 4742 vxge_hw_device_terminate(hldev);
4744 pci_disable_sriov(pdev); 4743 pci_disable_sriov(pdev);
4745_exit3: 4744_exit3:
@@ -4782,7 +4781,6 @@ static void vxge_remove(struct pci_dev *pdev)
4782 vxge_free_mac_add_list(&vdev->vpaths[i]); 4781 vxge_free_mac_add_list(&vdev->vpaths[i]);
4783 4782
4784 vxge_device_unregister(hldev); 4783 vxge_device_unregister(hldev);
4785 pci_set_drvdata(pdev, NULL);
4786 /* Do not call pci_disable_sriov here, as it will break child devices */ 4784 /* Do not call pci_disable_sriov here, as it will break child devices */
4787 vxge_hw_device_terminate(hldev); 4785 vxge_hw_device_terminate(hldev);
4788 iounmap(vdev->bar0); 4786 iounmap(vdev->bar0);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 6797b1075874..2a9003071d51 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -653,38 +653,38 @@ struct pch_gbe_adapter {
653extern const char pch_driver_version[]; 653extern const char pch_driver_version[];
654 654
655/* pch_gbe_main.c */ 655/* pch_gbe_main.c */
656extern int pch_gbe_up(struct pch_gbe_adapter *adapter); 656int pch_gbe_up(struct pch_gbe_adapter *adapter);
657extern void pch_gbe_down(struct pch_gbe_adapter *adapter); 657void pch_gbe_down(struct pch_gbe_adapter *adapter);
658extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter); 658void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
659extern void pch_gbe_reset(struct pch_gbe_adapter *adapter); 659void pch_gbe_reset(struct pch_gbe_adapter *adapter);
660extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, 660int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
661 struct pch_gbe_tx_ring *txdr); 661 struct pch_gbe_tx_ring *txdr);
662extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, 662int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
663 struct pch_gbe_rx_ring *rxdr); 663 struct pch_gbe_rx_ring *rxdr);
664extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, 664void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
665 struct pch_gbe_tx_ring *tx_ring); 665 struct pch_gbe_tx_ring *tx_ring);
666extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, 666void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
667 struct pch_gbe_rx_ring *rx_ring); 667 struct pch_gbe_rx_ring *rx_ring);
668extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter); 668void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
669extern u32 pch_ch_control_read(struct pci_dev *pdev); 669u32 pch_ch_control_read(struct pci_dev *pdev);
670extern void pch_ch_control_write(struct pci_dev *pdev, u32 val); 670void pch_ch_control_write(struct pci_dev *pdev, u32 val);
671extern u32 pch_ch_event_read(struct pci_dev *pdev); 671u32 pch_ch_event_read(struct pci_dev *pdev);
672extern void pch_ch_event_write(struct pci_dev *pdev, u32 val); 672void pch_ch_event_write(struct pci_dev *pdev, u32 val);
673extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev); 673u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
674extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev); 674u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
675extern u64 pch_rx_snap_read(struct pci_dev *pdev); 675u64 pch_rx_snap_read(struct pci_dev *pdev);
676extern u64 pch_tx_snap_read(struct pci_dev *pdev); 676u64 pch_tx_snap_read(struct pci_dev *pdev);
677extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev); 677int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
678 678
679/* pch_gbe_param.c */ 679/* pch_gbe_param.c */
680extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter); 680void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
681 681
682/* pch_gbe_ethtool.c */ 682/* pch_gbe_ethtool.c */
683extern void pch_gbe_set_ethtool_ops(struct net_device *netdev); 683void pch_gbe_set_ethtool_ops(struct net_device *netdev);
684 684
685/* pch_gbe_mac.c */ 685/* pch_gbe_mac.c */
686extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw); 686s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
687extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw); 687s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
688extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, 688u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
689 u32 addr, u32 dir, u32 reg, u16 data); 689 u16 data);
690#endif /* _PCH_GBE_H_ */ 690#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index cac33e5f9bc2..b6bdeb3c1971 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1910,7 +1910,6 @@ static void hamachi_remove_one(struct pci_dev *pdev)
1910 iounmap(hmp->base); 1910 iounmap(hmp->base);
1911 free_netdev(dev); 1911 free_netdev(dev);
1912 pci_release_regions(pdev); 1912 pci_release_regions(pdev);
1913 pci_set_drvdata(pdev, NULL);
1914 } 1913 }
1915} 1914}
1916 1915
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index d28593b1fc3e..07a890eb72ad 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -513,7 +513,6 @@ err_out_unmap_rx:
513err_out_unmap_tx: 513err_out_unmap_tx:
514 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 514 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
515err_out_cleardev: 515err_out_cleardev:
516 pci_set_drvdata(pdev, NULL);
517 pci_iounmap(pdev, ioaddr); 516 pci_iounmap(pdev, ioaddr);
518err_out_free_res: 517err_out_free_res:
519 pci_release_regions(pdev); 518 pci_release_regions(pdev);
@@ -1392,7 +1391,6 @@ static void yellowfin_remove_one(struct pci_dev *pdev)
1392 pci_release_regions (pdev); 1391 pci_release_regions (pdev);
1393 1392
1394 free_netdev (dev); 1393 free_netdev (dev);
1395 pci_set_drvdata(pdev, NULL);
1396} 1394}
1397 1395
1398 1396
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 5b65356e7568..dbaa49e58b0c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1870,7 +1870,6 @@ static void pasemi_mac_remove(struct pci_dev *pdev)
1870 pasemi_dma_free_chan(&mac->tx->chan); 1870 pasemi_dma_free_chan(&mac->tx->chan);
1871 pasemi_dma_free_chan(&mac->rx->chan); 1871 pasemi_dma_free_chan(&mac->rx->chan);
1872 1872
1873 pci_set_drvdata(pdev, NULL);
1874 free_netdev(netdev); 1873 free_netdev(netdev);
1875} 1874}
1876 1875
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 32675e16021e..9adcdbb49476 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 81 56#define _NETXEN_NIC_LINUX_SUBVERSION 82
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.81" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.82"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
@@ -1883,9 +1883,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
1883 1883
1884int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); 1884int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
1885int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); 1885int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
1886extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1886void netxen_change_ringparam(struct netxen_adapter *adapter);
1887extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, 1887int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
1888 int *valp);
1889 1888
1890extern const struct ethtool_ops netxen_nic_ethtool_ops; 1889extern const struct ethtool_ops netxen_nic_ethtool_ops;
1891 1890
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 32c790659f9c..0c64c82b9acf 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -958,6 +958,7 @@ enum {
958#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac)) 958#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac))
959#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) 959#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
960#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) 960#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
961#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178))
961 962
962/* MiniDIMM related macros */ 963/* MiniDIMM related macros */
963#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258)) 964#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 8375cbde9969..67efe754367d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -648,7 +648,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
648 648
649 mac_req = (nx_mac_req_t *)&req.words[0]; 649 mac_req = (nx_mac_req_t *)&req.words[0];
650 mac_req->op = op; 650 mac_req->op = op;
651 memcpy(mac_req->mac_addr, addr, 6); 651 memcpy(mac_req->mac_addr, addr, ETH_ALEN);
652 652
653 return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 653 return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
654} 654}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index cbd75f97ffb3..3bec8cfebf99 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1415,6 +1415,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1415 return 0; 1415 return 0;
1416} 1416}
1417 1417
1418#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01)
1419#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00)
1420
1421static void netxen_read_ula_info(struct netxen_adapter *adapter)
1422{
1423 u32 temp;
1424
1425 /* Print ULA info only once for an adapter */
1426 if (adapter->portnum != 0)
1427 return;
1428
1429 temp = NXRD32(adapter, NETXEN_ULA_KEY);
1430 switch (temp) {
1431 case NETXEN_ULA_ADAPTER_KEY:
1432 dev_info(&adapter->pdev->dev, "ULA adapter");
1433 break;
1434 case NETXEN_NON_ULA_ADAPTER_KEY:
1435 dev_info(&adapter->pdev->dev, "non ULA adapter");
1436 break;
1437 default:
1438 break;
1439 }
1440
1441 return;
1442}
1443
1418#ifdef CONFIG_PCIEAER 1444#ifdef CONFIG_PCIEAER
1419static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) 1445static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
1420{ 1446{
@@ -1561,6 +1587,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1561 goto err_out_disable_msi; 1587 goto err_out_disable_msi;
1562 } 1588 }
1563 1589
1590 netxen_read_ula_info(adapter);
1591
1564 err = netxen_setup_netdev(adapter, netdev); 1592 err = netxen_setup_netdev(adapter, netdev);
1565 if (err) 1593 if (err)
1566 goto err_out_disable_msi; 1594 goto err_out_disable_msi;
@@ -1602,7 +1630,6 @@ err_out_free_res:
1602 pci_release_regions(pdev); 1630 pci_release_regions(pdev);
1603 1631
1604err_out_disable_pdev: 1632err_out_disable_pdev:
1605 pci_set_drvdata(pdev, NULL);
1606 pci_disable_device(pdev); 1633 pci_disable_device(pdev);
1607 return err; 1634 return err;
1608} 1635}
@@ -1661,7 +1688,6 @@ static void netxen_nic_remove(struct pci_dev *pdev)
1661 1688
1662 pci_release_regions(pdev); 1689 pci_release_regions(pdev);
1663 pci_disable_device(pdev); 1690 pci_disable_device(pdev);
1664 pci_set_drvdata(pdev, NULL);
1665 1691
1666 free_netdev(netdev); 1692 free_netdev(netdev);
1667} 1693}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 91a8fcd6c246..0758b9435358 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3916,7 +3916,6 @@ err_out_free_regions:
3916 pci_release_regions(pdev); 3916 pci_release_regions(pdev);
3917err_out_disable_pdev: 3917err_out_disable_pdev:
3918 pci_disable_device(pdev); 3918 pci_disable_device(pdev);
3919 pci_set_drvdata(pdev, NULL);
3920err_out: 3919err_out:
3921 return err; 3920 return err;
3922} 3921}
@@ -3939,7 +3938,6 @@ static void ql3xxx_remove(struct pci_dev *pdev)
3939 3938
3940 iounmap(qdev->mem_map_registers); 3939 iounmap(qdev->mem_map_registers);
3941 pci_release_regions(pdev); 3940 pci_release_regions(pdev);
3942 pci_set_drvdata(pdev, NULL);
3943 free_netdev(ndev); 3941 free_netdev(ndev);
3944} 3942}
3945 3943
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81bf83604c4f..0c2405dbc970 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
38 38
39#define _QLCNIC_LINUX_MAJOR 5 39#define _QLCNIC_LINUX_MAJOR 5
40#define _QLCNIC_LINUX_MINOR 3 40#define _QLCNIC_LINUX_MINOR 3
41#define _QLCNIC_LINUX_SUBVERSION 50 41#define _QLCNIC_LINUX_SUBVERSION 51
42#define QLCNIC_LINUX_VERSIONID "5.3.50" 42#define QLCNIC_LINUX_VERSIONID "5.3.51"
43#define QLCNIC_DRV_IDC_VER 0x01 43#define QLCNIC_DRV_IDC_VER 0x01
44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -961,8 +961,6 @@ struct qlcnic_ipaddr {
961#define __QLCNIC_SRIOV_CAPABLE 11 961#define __QLCNIC_SRIOV_CAPABLE 11
962#define __QLCNIC_MBX_POLL_ENABLE 12 962#define __QLCNIC_MBX_POLL_ENABLE 12
963#define __QLCNIC_DIAG_MODE 13 963#define __QLCNIC_DIAG_MODE 13
964#define __QLCNIC_DCB_STATE 14
965#define __QLCNIC_DCB_IN_AEN 15
966 964
967#define QLCNIC_INTERRUPT_TEST 1 965#define QLCNIC_INTERRUPT_TEST 1
968#define QLCNIC_LOOPBACK_TEST 2 966#define QLCNIC_LOOPBACK_TEST 2
@@ -1199,6 +1197,7 @@ struct qlcnic_npar_info {
1199 u8 promisc_mode; 1197 u8 promisc_mode;
1200 u8 offload_flags; 1198 u8 offload_flags;
1201 u8 pci_func; 1199 u8 pci_func;
1200 u8 mac[ETH_ALEN];
1202}; 1201};
1203 1202
1204struct qlcnic_eswitch { 1203struct qlcnic_eswitch {
@@ -2115,98 +2114,4 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
2115 2114
2116 return status; 2115 return status;
2117} 2116}
2118
2119static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
2120{
2121 struct qlcnic_dcb *dcb = adapter->dcb;
2122
2123 if (dcb && dcb->ops->get_hw_capability)
2124 return dcb->ops->get_hw_capability(adapter);
2125
2126 return 0;
2127}
2128
2129static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
2130{
2131 struct qlcnic_dcb *dcb = adapter->dcb;
2132
2133 if (dcb && dcb->ops->free)
2134 dcb->ops->free(adapter);
2135}
2136
2137static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
2138{
2139 struct qlcnic_dcb *dcb = adapter->dcb;
2140
2141 if (dcb && dcb->ops->attach)
2142 return dcb->ops->attach(adapter);
2143
2144 return 0;
2145}
2146
2147static inline int
2148qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
2149{
2150 struct qlcnic_dcb *dcb = adapter->dcb;
2151
2152 if (dcb && dcb->ops->query_hw_capability)
2153 return dcb->ops->query_hw_capability(adapter, buf);
2154
2155 return 0;
2156}
2157
2158static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
2159{
2160 struct qlcnic_dcb *dcb = adapter->dcb;
2161
2162 if (dcb && dcb->ops->get_info)
2163 dcb->ops->get_info(adapter);
2164}
2165
2166static inline int
2167qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
2168{
2169 struct qlcnic_dcb *dcb = adapter->dcb;
2170
2171 if (dcb && dcb->ops->query_cee_param)
2172 return dcb->ops->query_cee_param(adapter, buf, type);
2173
2174 return 0;
2175}
2176
2177static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
2178{
2179 struct qlcnic_dcb *dcb = adapter->dcb;
2180
2181 if (dcb && dcb->ops->get_cee_cfg)
2182 return dcb->ops->get_cee_cfg(adapter);
2183
2184 return 0;
2185}
2186
2187static inline void
2188qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
2189{
2190 struct qlcnic_dcb *dcb = adapter->dcb;
2191
2192 if (dcb && dcb->ops->register_aen)
2193 dcb->ops->register_aen(adapter, flag);
2194}
2195
2196static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
2197 void *msg)
2198{
2199 struct qlcnic_dcb *dcb = adapter->dcb;
2200
2201 if (dcb && dcb->ops->handle_aen)
2202 dcb->ops->handle_aen(adapter, msg);
2203}
2204
2205static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
2206{
2207 struct qlcnic_dcb *dcb = adapter->dcb;
2208
2209 if (dcb && dcb->ops->init_dcbnl_ops)
2210 dcb->ops->init_dcbnl_ops(adapter);
2211}
2212#endif /* __QLCNIC_H_ */ 2117#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 3ca00e05f23d..a126bdf27952 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -902,7 +902,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
902 QLCNIC_MBX_RSP(event[0])); 902 QLCNIC_MBX_RSP(event[0]));
903 break; 903 break;
904 case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT: 904 case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
905 qlcnic_dcb_handle_aen(adapter, (void *)&event[1]); 905 qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
906 break; 906 break;
907 default: 907 default:
908 dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n", 908 dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
@@ -2321,19 +2321,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
2321 i++; 2321 i++;
2322 memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2); 2322 memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
2323 i = i + 3; 2323 i = i + 3;
2324 if (ahw->op_mode == QLCNIC_MGMT_FUNC)
2325 dev_info(dev, "id = %d active = %d type = %d\n"
2326 "\tport = %d min bw = %d max bw = %d\n"
2327 "\tmac_addr = %pM\n", pci_info->id,
2328 pci_info->active, pci_info->type,
2329 pci_info->default_port,
2330 pci_info->tx_min_bw,
2331 pci_info->tx_max_bw, pci_info->mac);
2332 } 2324 }
2333 if (ahw->op_mode == QLCNIC_MGMT_FUNC)
2334 dev_info(dev, "Max functions = %d, active functions = %d\n",
2335 ahw->max_pci_func, ahw->act_pci_func);
2336
2337 } else { 2325 } else {
2338 dev_err(dev, "Failed to get PCI Info, error = %d\n", err); 2326 dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
2339 err = -EIO; 2327 err = -EIO;
@@ -3279,12 +3267,12 @@ int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
3279 return 0; 3267 return 0;
3280} 3268}
3281 3269
3282int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter) 3270inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
3283{ 3271{
3284 return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) * 3272 return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
3285 sizeof(adapter->ahw->ext_reg_tbl)) + 3273 sizeof(*adapter->ahw->ext_reg_tbl)) +
3286 (ARRAY_SIZE(qlcnic_83xx_reg_tbl) + 3274 (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
3287 sizeof(adapter->ahw->reg_tbl)); 3275 sizeof(*adapter->ahw->reg_tbl));
3288} 3276}
3289 3277
3290int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff) 3278int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
@@ -3381,10 +3369,21 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
3381 } 3369 }
3382 config = ahw->port_config; 3370 config = ahw->port_config;
3383 if (config & QLC_83XX_CFG_STD_PAUSE) { 3371 if (config & QLC_83XX_CFG_STD_PAUSE) {
3384 if (config & QLC_83XX_CFG_STD_TX_PAUSE) 3372 switch (MSW(config)) {
3373 case QLC_83XX_TX_PAUSE:
3385 pause->tx_pause = 1; 3374 pause->tx_pause = 1;
3386 if (config & QLC_83XX_CFG_STD_RX_PAUSE) 3375 break;
3376 case QLC_83XX_RX_PAUSE:
3387 pause->rx_pause = 1; 3377 pause->rx_pause = 1;
3378 break;
3379 case QLC_83XX_TX_RX_PAUSE:
3380 default:
3381 /* Backward compatibility for existing
3382 * flash definitions
3383 */
3384 pause->tx_pause = 1;
3385 pause->rx_pause = 1;
3386 }
3388 } 3387 }
3389 3388
3390 if (QLC_83XX_AUTONEG(config)) 3389 if (QLC_83XX_AUTONEG(config))
@@ -3427,7 +3426,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
3427 ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE; 3426 ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
3428 ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE; 3427 ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
3429 } else if (!pause->rx_pause && !pause->tx_pause) { 3428 } else if (!pause->rx_pause && !pause->tx_pause) {
3430 ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE; 3429 ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
3430 QLC_83XX_CFG_STD_PAUSE);
3431 } 3431 }
3432 status = qlcnic_83xx_set_port_config(adapter); 3432 status = qlcnic_83xx_set_port_config(adapter);
3433 if (status) { 3433 if (status) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 533e150503af..9f4e4c4ab521 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -363,6 +363,9 @@ enum qlcnic_83xx_states {
363#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13) 363#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
364#define QLC_83XX_DCBX(data) (((data) >> 28) & 7) 364#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
365#define QLC_83XX_AUTONEG(data) ((data) & BIT_15) 365#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
366#define QLC_83XX_TX_PAUSE 0x10
367#define QLC_83XX_RX_PAUSE 0x20
368#define QLC_83XX_TX_RX_PAUSE 0x30
366#define QLC_83XX_CFG_STD_PAUSE (1 << 5) 369#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
367#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20) 370#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
368#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20) 371#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
@@ -626,7 +629,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
626int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *, 629int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
627 struct qlcnic_info *, u8); 630 struct qlcnic_info *, u8);
628int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); 631int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
629int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int); 632int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
630 633
631void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); 634void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
632void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); 635void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f09e787af0b2..e2cd48417041 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -636,7 +636,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
636 if (adapter->portnum == 0) 636 if (adapter->portnum == 0)
637 qlcnic_set_drv_version(adapter); 637 qlcnic_set_drv_version(adapter);
638 638
639 qlcnic_dcb_get_info(adapter); 639 qlcnic_dcb_get_info(adapter->dcb);
640 qlcnic_83xx_idc_attach_driver(adapter); 640 qlcnic_83xx_idc_attach_driver(adapter);
641 641
642 return 0; 642 return 0;
@@ -818,6 +818,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
818 struct qlcnic_hardware_context *ahw = adapter->ahw; 818 struct qlcnic_hardware_context *ahw = adapter->ahw;
819 struct qlcnic_mailbox *mbx = ahw->mailbox; 819 struct qlcnic_mailbox *mbx = ahw->mailbox;
820 int ret = 0; 820 int ret = 0;
821 u32 owner;
821 u32 val; 822 u32 val;
822 823
823 /* Perform NIC configuration based ready state entry actions */ 824 /* Perform NIC configuration based ready state entry actions */
@@ -846,6 +847,10 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
846 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 847 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
847 set_bit(__QLCNIC_RESETTING, &adapter->state); 848 set_bit(__QLCNIC_RESETTING, &adapter->state);
848 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
850 } else {
851 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
852 if (ahw->pci_func == owner)
853 qlcnic_dump_fw(adapter);
849 } 854 }
850 return -EIO; 855 return -EIO;
851 } 856 }
@@ -1058,6 +1063,12 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
1058 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; 1063 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
1059 qlcnic_83xx_periodic_tasks(adapter); 1064 qlcnic_83xx_periodic_tasks(adapter);
1060 1065
1066 /* Do not reschedule if firmaware is in hanged state and auto
1067 * recovery is disabled
1068 */
1069 if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
1070 return;
1071
1061 /* Re-schedule the function */ 1072 /* Re-schedule the function */
1062 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) 1073 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
1063 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 1074 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -2163,6 +2174,7 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
2163int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) 2174int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2164{ 2175{
2165 struct qlcnic_hardware_context *ahw = adapter->ahw; 2176 struct qlcnic_hardware_context *ahw = adapter->ahw;
2177 struct qlcnic_dcb *dcb;
2166 int err = 0; 2178 int err = 0;
2167 2179
2168 ahw->msix_supported = !!qlcnic_use_msi_x; 2180 ahw->msix_supported = !!qlcnic_use_msi_x;
@@ -2220,8 +2232,10 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2220 if (err) 2232 if (err)
2221 goto disable_mbx_intr; 2233 goto disable_mbx_intr;
2222 2234
2223 if (adapter->dcb && qlcnic_dcb_attach(adapter)) 2235 dcb = adapter->dcb;
2224 qlcnic_clear_dcb_ops(adapter); 2236
2237 if (dcb && qlcnic_dcb_attach(dcb))
2238 qlcnic_clear_dcb_ops(dcb);
2225 2239
2226 /* Periodically monitor device status */ 2240 /* Periodically monitor device status */
2227 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); 2241 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 0248a4c2f5dd..734d28602ac3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -94,13 +94,29 @@ qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
94 **/ 94 **/
95static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter) 95static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
96{ 96{
97 int err = -EIO; 97 struct qlcnic_hardware_context *ahw = adapter->ahw;
98 struct device *dev = &adapter->pdev->dev;
99 struct qlcnic_npar_info *npar;
100 int i, err = -EIO;
98 101
99 qlcnic_83xx_get_minidump_template(adapter); 102 qlcnic_83xx_get_minidump_template(adapter);
103
100 if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) { 104 if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
101 if (qlcnic_init_pci_info(adapter)) 105 if (qlcnic_init_pci_info(adapter))
102 return err; 106 return err;
103 107
108 npar = adapter->npars;
109
110 for (i = 0; i < ahw->act_pci_func; i++, npar++) {
111 dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
112 npar->pci_func, npar->active, npar->type,
113 npar->phy_port, npar->min_bw, npar->max_bw,
114 npar->mac);
115 }
116
117 dev_info(dev, "Max functions = %d, active functions = %d\n",
118 ahw->max_pci_func, ahw->act_pci_func);
119
104 if (qlcnic_83xx_set_vnic_opmode(adapter)) 120 if (qlcnic_83xx_set_vnic_opmode(adapter))
105 return err; 121 return err;
106 122
@@ -115,12 +131,12 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
115 return err; 131 return err;
116 132
117 qlcnic_83xx_config_vnic_buff_descriptors(adapter); 133 qlcnic_83xx_config_vnic_buff_descriptors(adapter);
118 adapter->ahw->msix_supported = !!qlcnic_use_msi_x; 134 ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
119 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; 135 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
120 qlcnic_83xx_enable_vnic_mode(adapter, 1); 136 qlcnic_83xx_enable_vnic_mode(adapter, 1);
121 137
122 dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n", 138 dev_info(dev, "HAL Version: %d, Management function\n",
123 adapter->ahw->fw_hal_version); 139 ahw->fw_hal_version);
124 140
125 return 0; 141 return 0;
126} 142}
@@ -240,8 +256,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
240 return 0; 256 return 0;
241} 257}
242 258
243static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter, 259int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
244 int func, int *port_id) 260 int func, int *port_id)
245{ 261{
246 struct qlcnic_info nic_info; 262 struct qlcnic_info nic_info;
247 int err = 0; 263 int err = 0;
@@ -257,23 +273,8 @@ static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
257 else 273 else
258 err = -EIO; 274 err = -EIO;
259 275
260 return err; 276 if (!err)
261} 277 adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
262
263int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
264{
265 int id, err = 0;
266
267 err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
268 if (err)
269 return err;
270
271 if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
272 if (!qlcnic_enable_eswitch(adapter, id, 1))
273 adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
274 else
275 err = -EIO;
276 }
277 278
278 return err; 279 return err;
279} 280}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index d62d5ce432ec..86bca7c14f99 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -57,22 +57,22 @@ static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
57static void qlcnic_dcb_aen_work(struct work_struct *); 57static void qlcnic_dcb_aen_work(struct work_struct *);
58static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *); 58static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
59 59
60static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *); 60static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
61static void __qlcnic_dcb_free(struct qlcnic_adapter *); 61static void __qlcnic_dcb_free(struct qlcnic_dcb *);
62static int __qlcnic_dcb_attach(struct qlcnic_adapter *); 62static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
63static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *); 63static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
64static void __qlcnic_dcb_get_info(struct qlcnic_adapter *); 64static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
65 65
66static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *); 66static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
67static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8); 67static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
68static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *); 68static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
69static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *); 69static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
70 70
71static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *); 71static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
72static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8); 72static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
73static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *); 73static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
74static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool); 74static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
75static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *); 75static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
76 76
77struct qlcnic_dcb_capability { 77struct qlcnic_dcb_capability {
78 bool tsa_capability; 78 bool tsa_capability;
@@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
180 .query_cee_param = qlcnic_83xx_dcb_query_cee_param, 180 .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
181 .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg, 181 .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
182 .register_aen = qlcnic_83xx_dcb_register_aen, 182 .register_aen = qlcnic_83xx_dcb_register_aen,
183 .handle_aen = qlcnic_83xx_dcb_handle_aen, 183 .aen_handler = qlcnic_83xx_dcb_aen_handler,
184}; 184};
185 185
186static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { 186static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
@@ -193,7 +193,7 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
193 .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability, 193 .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
194 .query_cee_param = qlcnic_82xx_dcb_query_cee_param, 194 .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
195 .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg, 195 .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
196 .handle_aen = qlcnic_82xx_dcb_handle_aen, 196 .aen_handler = qlcnic_82xx_dcb_aen_handler,
197}; 197};
198 198
199static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val) 199static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
@@ -242,10 +242,10 @@ static int qlcnic_dcb_prio_count(u8 up_tc_map)
242 return j; 242 return j;
243} 243}
244 244
245static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter) 245static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
246{ 246{
247 if (test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 247 if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
248 adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops; 248 dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
249} 249}
250 250
251static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) 251static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
@@ -256,7 +256,7 @@ static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
256 adapter->dcb->ops = &qlcnic_83xx_dcb_ops; 256 adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
257} 257}
258 258
259int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) 259int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
260{ 260{
261 struct qlcnic_dcb *dcb; 261 struct qlcnic_dcb *dcb;
262 262
@@ -267,20 +267,22 @@ int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
267 adapter->dcb = dcb; 267 adapter->dcb = dcb;
268 dcb->adapter = adapter; 268 dcb->adapter = adapter;
269 qlcnic_set_dcb_ops(adapter); 269 qlcnic_set_dcb_ops(adapter);
270 dcb->state = 0;
270 271
271 return 0; 272 return 0;
272} 273}
273 274
274static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter) 275static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
275{ 276{
276 struct qlcnic_dcb *dcb = adapter->dcb; 277 struct qlcnic_adapter *adapter;
277 278
278 if (!dcb) 279 if (!dcb)
279 return; 280 return;
280 281
281 qlcnic_dcb_register_aen(adapter, 0); 282 adapter = dcb->adapter;
283 qlcnic_dcb_register_aen(dcb, 0);
282 284
283 while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state)) 285 while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
284 usleep_range(10000, 11000); 286 usleep_range(10000, 11000);
285 287
286 cancel_delayed_work_sync(&dcb->aen_work); 288 cancel_delayed_work_sync(&dcb->aen_work);
@@ -298,23 +300,22 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
298 adapter->dcb = NULL; 300 adapter->dcb = NULL;
299} 301}
300 302
301static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter) 303static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
302{ 304{
303 qlcnic_dcb_get_hw_capability(adapter); 305 qlcnic_dcb_get_hw_capability(dcb);
304 qlcnic_dcb_get_cee_cfg(adapter); 306 qlcnic_dcb_get_cee_cfg(dcb);
305 qlcnic_dcb_register_aen(adapter, 1); 307 qlcnic_dcb_register_aen(dcb, 1);
306} 308}
307 309
308static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter) 310static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
309{ 311{
310 struct qlcnic_dcb *dcb = adapter->dcb;
311 int err = 0; 312 int err = 0;
312 313
313 INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work); 314 INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
314 315
315 dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); 316 dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
316 if (!dcb->wq) { 317 if (!dcb->wq) {
317 dev_err(&adapter->pdev->dev, 318 dev_err(&dcb->adapter->pdev->dev,
318 "DCB workqueue allocation failed. DCB will be disabled\n"); 319 "DCB workqueue allocation failed. DCB will be disabled\n");
319 return -1; 320 return -1;
320 } 321 }
@@ -331,7 +332,7 @@ static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
331 goto out_free_cfg; 332 goto out_free_cfg;
332 } 333 }
333 334
334 qlcnic_dcb_get_info(adapter); 335 qlcnic_dcb_get_info(dcb);
335 336
336 return 0; 337 return 0;
337out_free_cfg: 338out_free_cfg:
@@ -345,9 +346,9 @@ out_free_wq:
345 return err; 346 return err;
346} 347}
347 348
348static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, 349static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
349 char *buf)
350{ 350{
351 struct qlcnic_adapter *adapter = dcb->adapter;
351 struct qlcnic_cmd_args cmd; 352 struct qlcnic_cmd_args cmd;
352 u32 mbx_out; 353 u32 mbx_out;
353 int err; 354 int err;
@@ -371,15 +372,15 @@ static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
371 return err; 372 return err;
372} 373}
373 374
374static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val) 375static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
375{ 376{
376 struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability; 377 struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
377 u32 mbx_out; 378 u32 mbx_out;
378 int err; 379 int err;
379 380
380 memset(cap, 0, sizeof(struct qlcnic_dcb_capability)); 381 memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
381 382
382 err = qlcnic_dcb_query_hw_capability(adapter, (char *)val); 383 err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
383 if (err) 384 if (err)
384 return err; 385 return err;
385 386
@@ -397,21 +398,21 @@ static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
397 if (cap->max_num_tc > QLC_DCB_MAX_TC || 398 if (cap->max_num_tc > QLC_DCB_MAX_TC ||
398 cap->max_ets_tc > cap->max_num_tc || 399 cap->max_ets_tc > cap->max_num_tc ||
399 cap->max_pfc_tc > cap->max_num_tc) { 400 cap->max_pfc_tc > cap->max_num_tc) {
400 dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n"); 401 dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
401 return -EINVAL; 402 return -EINVAL;
402 } 403 }
403 404
404 return err; 405 return err;
405} 406}
406 407
407static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) 408static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
408{ 409{
409 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; 410 struct qlcnic_dcb_cfg *cfg = dcb->cfg;
410 struct qlcnic_dcb_capability *cap; 411 struct qlcnic_dcb_capability *cap;
411 u32 mbx_out; 412 u32 mbx_out;
412 int err; 413 int err;
413 414
414 err = __qlcnic_dcb_get_capability(adapter, &mbx_out); 415 err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
415 if (err) 416 if (err)
416 return err; 417 return err;
417 418
@@ -419,15 +420,16 @@ static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
419 cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED; 420 cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
420 421
421 if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) 422 if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
422 set_bit(__QLCNIC_DCB_STATE, &adapter->state); 423 set_bit(QLCNIC_DCB_STATE, &dcb->state);
423 424
424 return err; 425 return err;
425} 426}
426 427
427static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter, 428static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
428 char *buf, u8 type) 429 char *buf, u8 type)
429{ 430{
430 u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le); 431 u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
432 struct qlcnic_adapter *adapter = dcb->adapter;
431 struct qlcnic_82xx_dcb_param_mbx_le *prsp_le; 433 struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
432 struct device *dev = &adapter->pdev->dev; 434 struct device *dev = &adapter->pdev->dev;
433 dma_addr_t cardrsp_phys_addr; 435 dma_addr_t cardrsp_phys_addr;
@@ -447,8 +449,7 @@ static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
447 return -EINVAL; 449 return -EINVAL;
448 } 450 }
449 451
450 addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr, 452 addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
451 GFP_KERNEL);
452 if (addr == NULL) 453 if (addr == NULL)
453 return -ENOMEM; 454 return -ENOMEM;
454 455
@@ -488,72 +489,67 @@ out:
488 qlcnic_free_mbx_args(&cmd); 489 qlcnic_free_mbx_args(&cmd);
489 490
490out_free_rsp: 491out_free_rsp:
491 dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr); 492 dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
492 493
493 return err; 494 return err;
494} 495}
495 496
496static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) 497static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
497{ 498{
498 struct qlcnic_dcb_mbx_params *mbx; 499 struct qlcnic_dcb_mbx_params *mbx;
499 int err; 500 int err;
500 501
501 mbx = adapter->dcb->param; 502 mbx = dcb->param;
502 if (!mbx) 503 if (!mbx)
503 return 0; 504 return 0;
504 505
505 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0], 506 err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
506 QLC_DCB_LOCAL_PARAM_FWID); 507 QLC_DCB_LOCAL_PARAM_FWID);
507 if (err) 508 if (err)
508 return err; 509 return err;
509 510
510 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1], 511 err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
511 QLC_DCB_OPER_PARAM_FWID); 512 QLC_DCB_OPER_PARAM_FWID);
512 if (err) 513 if (err)
513 return err; 514 return err;
514 515
515 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2], 516 err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
516 QLC_DCB_PEER_PARAM_FWID); 517 QLC_DCB_PEER_PARAM_FWID);
517 if (err) 518 if (err)
518 return err; 519 return err;
519 520
520 mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP; 521 mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
521 522
522 qlcnic_dcb_data_cee_param_map(adapter); 523 qlcnic_dcb_data_cee_param_map(dcb->adapter);
523 524
524 return err; 525 return err;
525} 526}
526 527
527static void qlcnic_dcb_aen_work(struct work_struct *work) 528static void qlcnic_dcb_aen_work(struct work_struct *work)
528{ 529{
529 struct qlcnic_adapter *adapter;
530 struct qlcnic_dcb *dcb; 530 struct qlcnic_dcb *dcb;
531 531
532 dcb = container_of(work, struct qlcnic_dcb, aen_work.work); 532 dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
533 adapter = dcb->adapter;
534 533
535 qlcnic_dcb_get_cee_cfg(adapter); 534 qlcnic_dcb_get_cee_cfg(dcb);
536 clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state); 535 clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
537} 536}
538 537
539static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter, 538static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
540 void *data)
541{ 539{
542 struct qlcnic_dcb *dcb = adapter->dcb; 540 if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
543
544 if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
545 return; 541 return;
546 542
547 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); 543 queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
548} 544}
549 545
550static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) 546static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
551{ 547{
552 struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability; 548 struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
553 u32 mbx_out; 549 u32 mbx_out;
554 int err; 550 int err;
555 551
556 err = __qlcnic_dcb_get_capability(adapter, &mbx_out); 552 err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
557 if (err) 553 if (err)
558 return err; 554 return err;
559 555
@@ -565,14 +561,15 @@ static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
565 cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED; 561 cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
566 562
567 if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) 563 if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
568 set_bit(__QLCNIC_DCB_STATE, &adapter->state); 564 set_bit(QLCNIC_DCB_STATE, &dcb->state);
569 565
570 return err; 566 return err;
571} 567}
572 568
573static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter, 569static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
574 char *buf, u8 idx) 570 char *buf, u8 idx)
575{ 571{
572 struct qlcnic_adapter *adapter = dcb->adapter;
576 struct qlcnic_dcb_mbx_params mbx_out; 573 struct qlcnic_dcb_mbx_params mbx_out;
577 int err, i, j, k, max_app, size; 574 int err, i, j, k, max_app, size;
578 struct qlcnic_dcb_param *each; 575 struct qlcnic_dcb_param *each;
@@ -632,24 +629,23 @@ out:
632 return err; 629 return err;
633} 630}
634 631
635static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) 632static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
636{ 633{
637 struct qlcnic_dcb *dcb = adapter->dcb;
638 int err; 634 int err;
639 635
640 err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0); 636 err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
641 if (err) 637 if (err)
642 return err; 638 return err;
643 639
644 qlcnic_dcb_data_cee_param_map(adapter); 640 qlcnic_dcb_data_cee_param_map(dcb->adapter);
645 641
646 return err; 642 return err;
647} 643}
648 644
649static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter, 645static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
650 bool flag)
651{ 646{
652 u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC); 647 u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
648 struct qlcnic_adapter *adapter = dcb->adapter;
653 struct qlcnic_cmd_args cmd; 649 struct qlcnic_cmd_args cmd;
654 int err; 650 int err;
655 651
@@ -669,19 +665,17 @@ static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
669 return err; 665 return err;
670} 666}
671 667
672static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter, 668static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
673 void *data)
674{ 669{
675 struct qlcnic_dcb *dcb = adapter->dcb;
676 u32 *val = data; 670 u32 *val = data;
677 671
678 if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state)) 672 if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
679 return; 673 return;
680 674
681 if (*val & BIT_8) 675 if (*val & BIT_8)
682 set_bit(__QLCNIC_DCB_STATE, &adapter->state); 676 set_bit(QLCNIC_DCB_STATE, &dcb->state);
683 else 677 else
684 clear_bit(__QLCNIC_DCB_STATE, &adapter->state); 678 clear_bit(QLCNIC_DCB_STATE, &dcb->state);
685 679
686 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); 680 queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
687} 681}
@@ -814,12 +808,12 @@ static u8 qlcnic_dcb_get_state(struct net_device *netdev)
814{ 808{
815 struct qlcnic_adapter *adapter = netdev_priv(netdev); 809 struct qlcnic_adapter *adapter = netdev_priv(netdev);
816 810
817 return test_bit(__QLCNIC_DCB_STATE, &adapter->state); 811 return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
818} 812}
819 813
820static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr) 814static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
821{ 815{
822 memcpy(addr, netdev->dev_addr, netdev->addr_len); 816 memcpy(addr, netdev->perm_addr, netdev->addr_len);
823} 817}
824 818
825static void 819static void
@@ -834,7 +828,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
834 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; 828 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
835 *prio = *pgid = *bw_per = *up_tc_map = 0; 829 *prio = *pgid = *bw_per = *up_tc_map = 0;
836 830
837 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || 831 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
838 !type->tc_param_valid) 832 !type->tc_param_valid)
839 return; 833 return;
840 834
@@ -870,7 +864,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
870 *bw_pct = 0; 864 *bw_pct = 0;
871 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; 865 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
872 866
873 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || 867 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
874 !type->tc_param_valid) 868 !type->tc_param_valid)
875 return; 869 return;
876 870
@@ -896,7 +890,7 @@ static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
896 *setting = 0; 890 *setting = 0;
897 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; 891 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
898 892
899 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || 893 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
900 !type->pfc_mode_enable) 894 !type->pfc_mode_enable)
901 return; 895 return;
902 896
@@ -915,7 +909,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
915{ 909{
916 struct qlcnic_adapter *adapter = netdev_priv(netdev); 910 struct qlcnic_adapter *adapter = netdev_priv(netdev);
917 911
918 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 912 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
919 return 0; 913 return 0;
920 914
921 switch (capid) { 915 switch (capid) {
@@ -944,7 +938,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
944 struct qlcnic_adapter *adapter = netdev_priv(netdev); 938 struct qlcnic_adapter *adapter = netdev_priv(netdev);
945 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; 939 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
946 940
947 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 941 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
948 return -EINVAL; 942 return -EINVAL;
949 943
950 switch (attr) { 944 switch (attr) {
@@ -967,7 +961,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
967 .protocol = id, 961 .protocol = id,
968 }; 962 };
969 963
970 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 964 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
971 return 0; 965 return 0;
972 966
973 return dcb_getapp(netdev, &app); 967 return dcb_getapp(netdev, &app);
@@ -978,7 +972,7 @@ static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
978 struct qlcnic_adapter *adapter = netdev_priv(netdev); 972 struct qlcnic_adapter *adapter = netdev_priv(netdev);
979 struct qlcnic_dcb *dcb = adapter->dcb; 973 struct qlcnic_dcb *dcb = adapter->dcb;
980 974
981 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 975 if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
982 return 0; 976 return 0;
983 977
984 return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable; 978 return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
@@ -989,7 +983,7 @@ static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
989 struct qlcnic_adapter *adapter = netdev_priv(netdev); 983 struct qlcnic_adapter *adapter = netdev_priv(netdev);
990 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; 984 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
991 985
992 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 986 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
993 return 0; 987 return 0;
994 988
995 return cfg->capability.dcb_capability; 989 return cfg->capability.dcb_capability;
@@ -1000,7 +994,7 @@ static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
1000 struct qlcnic_adapter *adapter = netdev_priv(netdev); 994 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1001 struct qlcnic_dcb_cee *type; 995 struct qlcnic_dcb_cee *type;
1002 996
1003 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 997 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
1004 return 1; 998 return 1;
1005 999
1006 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; 1000 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
@@ -1055,7 +1049,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
1055 1049
1056 *app_count = 0; 1050 *app_count = 0;
1057 1051
1058 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 1052 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
1059 return 0; 1053 return 0;
1060 1054
1061 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; 1055 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1076,7 +1070,7 @@ static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
1076 struct qlcnic_dcb_app *app; 1070 struct qlcnic_dcb_app *app;
1077 int i, j; 1071 int i, j;
1078 1072
1079 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 1073 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
1080 return 0; 1074 return 0;
1081 1075
1082 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; 1076 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1101,7 +1095,7 @@ static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
1101 struct qlcnic_dcb_cee *peer; 1095 struct qlcnic_dcb_cee *peer;
1102 u8 i, j, k, map; 1096 u8 i, j, k, map;
1103 1097
1104 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 1098 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
1105 return 0; 1099 return 0;
1106 1100
1107 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; 1101 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1136,7 +1130,7 @@ static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
1136 1130
1137 pfc->pfc_en = 0; 1131 pfc->pfc_en = 0;
1138 1132
1139 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) 1133 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
1140 return 0; 1134 return 0;
1141 1135
1142 peer = &cfg->type[QLC_DCB_PEER_IDX]; 1136 peer = &cfg->type[QLC_DCB_PEER_IDX];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index b87ce9fb503e..c04ae0cdc108 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -8,26 +8,29 @@
8#ifndef __QLCNIC_DCBX_H 8#ifndef __QLCNIC_DCBX_H
9#define __QLCNIC_DCBX_H 9#define __QLCNIC_DCBX_H
10 10
11void qlcnic_clear_dcb_ops(struct qlcnic_adapter *); 11#define QLCNIC_DCB_STATE 0
12#define QLCNIC_DCB_AEN_MODE 1
12 13
13#ifdef CONFIG_QLCNIC_DCB 14#ifdef CONFIG_QLCNIC_DCB
14int __qlcnic_register_dcb(struct qlcnic_adapter *); 15int qlcnic_register_dcb(struct qlcnic_adapter *);
15#else 16#else
16static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) 17static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
17{ return 0; } 18{ return 0; }
18#endif 19#endif
19 20
21struct qlcnic_dcb;
22
20struct qlcnic_dcb_ops { 23struct qlcnic_dcb_ops {
21 void (*init_dcbnl_ops) (struct qlcnic_adapter *); 24 int (*query_hw_capability) (struct qlcnic_dcb *, char *);
22 void (*free) (struct qlcnic_adapter *); 25 int (*get_hw_capability) (struct qlcnic_dcb *);
23 int (*attach) (struct qlcnic_adapter *); 26 int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
24 int (*query_hw_capability) (struct qlcnic_adapter *, char *); 27 void (*init_dcbnl_ops) (struct qlcnic_dcb *);
25 int (*get_hw_capability) (struct qlcnic_adapter *); 28 int (*register_aen) (struct qlcnic_dcb *, bool);
26 void (*get_info) (struct qlcnic_adapter *); 29 void (*aen_handler) (struct qlcnic_dcb *, void *);
27 int (*query_cee_param) (struct qlcnic_adapter *, char *, u8); 30 int (*get_cee_cfg) (struct qlcnic_dcb *);
28 int (*get_cee_cfg) (struct qlcnic_adapter *); 31 void (*get_info) (struct qlcnic_dcb *);
29 int (*register_aen) (struct qlcnic_adapter *, bool); 32 int (*attach) (struct qlcnic_dcb *);
30 void (*handle_aen) (struct qlcnic_adapter *, void *); 33 void (*free) (struct qlcnic_dcb *);
31}; 34};
32 35
33struct qlcnic_dcb { 36struct qlcnic_dcb {
@@ -37,5 +40,85 @@ struct qlcnic_dcb {
37 struct workqueue_struct *wq; 40 struct workqueue_struct *wq;
38 struct qlcnic_dcb_ops *ops; 41 struct qlcnic_dcb_ops *ops;
39 struct qlcnic_dcb_cfg *cfg; 42 struct qlcnic_dcb_cfg *cfg;
43 unsigned long state;
40}; 44};
45
46static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
47{
48 kfree(dcb);
49 dcb = NULL;
50}
51
52static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
53{
54 if (dcb && dcb->ops->get_hw_capability)
55 return dcb->ops->get_hw_capability(dcb);
56
57 return 0;
58}
59
60static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
61{
62 if (dcb && dcb->ops->free)
63 dcb->ops->free(dcb);
64}
65
66static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
67{
68 if (dcb && dcb->ops->attach)
69 return dcb->ops->attach(dcb);
70
71 return 0;
72}
73
74static inline int
75qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
76{
77 if (dcb && dcb->ops->query_hw_capability)
78 return dcb->ops->query_hw_capability(dcb, buf);
79
80 return 0;
81}
82
83static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
84{
85 if (dcb && dcb->ops->get_info)
86 dcb->ops->get_info(dcb);
87}
88
89static inline int
90qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
91{
92 if (dcb && dcb->ops->query_cee_param)
93 return dcb->ops->query_cee_param(dcb, buf, type);
94
95 return 0;
96}
97
98static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
99{
100 if (dcb && dcb->ops->get_cee_cfg)
101 return dcb->ops->get_cee_cfg(dcb);
102
103 return 0;
104}
105
106static inline void
107qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
108{
109 if (dcb && dcb->ops->register_aen)
110 dcb->ops->register_aen(dcb, flag);
111}
112
113static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
114{
115 if (dcb && dcb->ops->aen_handler)
116 dcb->ops->aen_handler(dcb, msg);
117}
118
119static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
120{
121 if (dcb && dcb->ops->init_dcbnl_ops)
122 dcb->ops->init_dcbnl_ops(dcb);
123}
41#endif 124#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index ff83a9fcd4c5..b2a8805997ca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -187,8 +187,8 @@ static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
187 return -1; 187 return -1;
188} 188}
189 189
190#define QLCNIC_RING_REGS_COUNT 20 190#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
191#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32)) 191
192#define QLCNIC_MAX_EEPROM_LEN 1024 192#define QLCNIC_MAX_EEPROM_LEN 1024
193 193
194static const u32 diag_registers[] = { 194static const u32 diag_registers[] = {
@@ -219,7 +219,15 @@ static const u32 ext_diag_registers[] = {
219}; 219};
220 220
221#define QLCNIC_MGMT_API_VERSION 2 221#define QLCNIC_MGMT_API_VERSION 2
222#define QLCNIC_ETHTOOL_REGS_VER 3 222#define QLCNIC_ETHTOOL_REGS_VER 4
223
224static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
225{
226 int ring_regs_cnt = (adapter->max_drv_tx_rings * 5) +
227 (adapter->max_rds_rings * 2) +
228 (adapter->max_sds_rings * 3) + 5;
229 return ring_regs_cnt * sizeof(u32);
230}
223 231
224static int qlcnic_get_regs_len(struct net_device *dev) 232static int qlcnic_get_regs_len(struct net_device *dev)
225{ 233{
@@ -231,7 +239,9 @@ static int qlcnic_get_regs_len(struct net_device *dev)
231 else 239 else
232 len = sizeof(ext_diag_registers) + sizeof(diag_registers); 240 len = sizeof(ext_diag_registers) + sizeof(diag_registers);
233 241
234 return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1; 242 len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
243 len += qlcnic_get_ring_regs_len(adapter);
244 return len;
235} 245}
236 246
237static int qlcnic_get_eeprom_len(struct net_device *dev) 247static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -493,6 +503,8 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
493 struct qlcnic_adapter *adapter = netdev_priv(dev); 503 struct qlcnic_adapter *adapter = netdev_priv(dev);
494 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 504 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
495 struct qlcnic_host_sds_ring *sds_ring; 505 struct qlcnic_host_sds_ring *sds_ring;
506 struct qlcnic_host_rds_ring *rds_rings;
507 struct qlcnic_host_tx_ring *tx_ring;
496 u32 *regs_buff = p; 508 u32 *regs_buff = p;
497 int ring, i = 0; 509 int ring, i = 0;
498 510
@@ -512,21 +524,35 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
512 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 524 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
513 return; 525 return;
514 526
515 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/ 527 /* Marker btw regs and TX ring count */
516 528 regs_buff[i++] = 0xFFEFCDAB;
517 regs_buff[i++] = 1; /* No. of tx ring */ 529
518 regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); 530 regs_buff[i++] = adapter->max_drv_tx_rings; /* No. of TX ring */
519 regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer); 531 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
520 532 tx_ring = &adapter->tx_ring[ring];
521 regs_buff[i++] = 2; /* No. of rx ring */ 533 regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
522 regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer); 534 regs_buff[i++] = tx_ring->sw_consumer;
523 regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer); 535 regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
536 regs_buff[i++] = tx_ring->producer;
537 if (tx_ring->crb_intr_mask)
538 regs_buff[i++] = readl(tx_ring->crb_intr_mask);
539 else
540 regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
541 }
524 542
525 regs_buff[i++] = adapter->max_sds_rings; 543 regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
544 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
545 rds_rings = &recv_ctx->rds_rings[ring];
546 regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
547 regs_buff[i++] = rds_rings->producer;
548 }
526 549
550 regs_buff[i++] = adapter->max_sds_rings; /* No. of SDS ring */
527 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 551 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
528 sds_ring = &(recv_ctx->sds_rings[ring]); 552 sds_ring = &(recv_ctx->sds_rings[ring]);
529 regs_buff[i++] = readl(sds_ring->crb_sts_consumer); 553 regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
554 regs_buff[i++] = sds_ring->consumer;
555 regs_buff[i++] = readl(sds_ring->crb_intr_mask);
530 } 556 }
531} 557}
532 558
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index f8adc7b01f1f..73e72eb83bdf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -445,7 +445,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
445 445
446 mac_req = (struct qlcnic_mac_req *)&req.words[0]; 446 mac_req = (struct qlcnic_mac_req *)&req.words[0];
447 mac_req->op = op; 447 mac_req->op = op;
448 memcpy(mac_req->mac_addr, addr, 6); 448 memcpy(mac_req->mac_addr, addr, ETH_ALEN);
449 449
450 vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; 450 vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
451 vlan_req->vlan_id = cpu_to_le16(vlan_id); 451 vlan_req->vlan_id = cpu_to_le16(vlan_id);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 11b4bb83b930..897627dd1d04 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1011,7 +1011,7 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
1011 } 1011 }
1012 break; 1012 break;
1013 case QLCNIC_C2H_OPCODE_GET_DCB_AEN: 1013 case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
1014 qlcnic_dcb_handle_aen(adapter, (void *)&msg); 1014 qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
1015 break; 1015 break;
1016 default: 1016 default:
1017 break; 1017 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 9e61eb867452..dcf4a4e7ce23 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -819,7 +819,7 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
819int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) 819int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
820{ 820{
821 struct qlcnic_pci_info *pci_info; 821 struct qlcnic_pci_info *pci_info;
822 int i, ret = 0, j = 0; 822 int i, id = 0, ret = 0, j = 0;
823 u16 act_pci_func; 823 u16 act_pci_func;
824 u8 pfn; 824 u8 pfn;
825 825
@@ -860,7 +860,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
860 continue; 860 continue;
861 861
862 if (qlcnic_port_eswitch_cfg_capability(adapter)) { 862 if (qlcnic_port_eswitch_cfg_capability(adapter)) {
863 if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn)) 863 if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
864 &id))
864 adapter->npars[j].eswitch_status = true; 865 adapter->npars[j].eswitch_status = true;
865 else 866 else
866 continue; 867 continue;
@@ -875,15 +876,16 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
875 adapter->npars[j].min_bw = pci_info[i].tx_min_bw; 876 adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
876 adapter->npars[j].max_bw = pci_info[i].tx_max_bw; 877 adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
877 878
879 memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
878 j++; 880 j++;
879 } 881 }
880 882
881 if (qlcnic_82xx_check(adapter)) { 883 /* Update eSwitch status for adapters without per port eSwitch
884 * configuration capability
885 */
886 if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
882 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) 887 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
883 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; 888 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
884 } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
885 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
886 qlcnic_enable_eswitch(adapter, i, 1);
887 } 889 }
888 890
889 kfree(pci_info); 891 kfree(pci_info);
@@ -2069,7 +2071,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2069 return err; 2071 return err;
2070 } 2072 }
2071 2073
2072 qlcnic_dcb_init_dcbnl_ops(adapter); 2074 qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
2073 2075
2074 return 0; 2076 return 0;
2075} 2077}
@@ -2164,17 +2166,6 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
2164 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2166 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2165} 2167}
2166 2168
2167static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
2168{
2169 return __qlcnic_register_dcb(adapter);
2170}
2171
2172void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
2173{
2174 kfree(adapter->dcb);
2175 adapter->dcb = NULL;
2176}
2177
2178static int 2169static int
2179qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2170qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2180{ 2171{
@@ -2183,6 +2174,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2183 struct qlcnic_hardware_context *ahw; 2174 struct qlcnic_hardware_context *ahw;
2184 int err, pci_using_dac = -1; 2175 int err, pci_using_dac = -1;
2185 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ 2176 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
2177 struct qlcnic_dcb *dcb;
2186 2178
2187 if (pdev->is_virtfn) 2179 if (pdev->is_virtfn)
2188 return -ENODEV; 2180 return -ENODEV;
@@ -2303,8 +2295,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2303 2295
2304 adapter->flags |= QLCNIC_NEED_FLR; 2296 adapter->flags |= QLCNIC_NEED_FLR;
2305 2297
2306 if (adapter->dcb && qlcnic_dcb_attach(adapter)) 2298 dcb = adapter->dcb;
2307 qlcnic_clear_dcb_ops(adapter); 2299
2300 if (dcb && qlcnic_dcb_attach(dcb))
2301 qlcnic_clear_dcb_ops(dcb);
2308 2302
2309 } else if (qlcnic_83xx_check(adapter)) { 2303 } else if (qlcnic_83xx_check(adapter)) {
2310 adapter->max_drv_tx_rings = 1; 2304 adapter->max_drv_tx_rings = 1;
@@ -2412,7 +2406,6 @@ err_out_free_res:
2412 pci_release_regions(pdev); 2406 pci_release_regions(pdev);
2413 2407
2414err_out_disable_pdev: 2408err_out_disable_pdev:
2415 pci_set_drvdata(pdev, NULL);
2416 pci_disable_device(pdev); 2409 pci_disable_device(pdev);
2417 return err; 2410 return err;
2418 2411
@@ -2449,7 +2442,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
2449 qlcnic_cancel_idc_work(adapter); 2442 qlcnic_cancel_idc_work(adapter);
2450 ahw = adapter->ahw; 2443 ahw = adapter->ahw;
2451 2444
2452 qlcnic_dcb_free(adapter); 2445 qlcnic_dcb_free(adapter->dcb);
2453 2446
2454 unregister_netdev(netdev); 2447 unregister_netdev(netdev);
2455 qlcnic_sriov_cleanup(adapter); 2448 qlcnic_sriov_cleanup(adapter);
@@ -2488,7 +2481,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
2488 pci_disable_pcie_error_reporting(pdev); 2481 pci_disable_pcie_error_reporting(pdev);
2489 pci_release_regions(pdev); 2482 pci_release_regions(pdev);
2490 pci_disable_device(pdev); 2483 pci_disable_device(pdev);
2491 pci_set_drvdata(pdev, NULL);
2492 2484
2493 if (adapter->qlcnic_wq) { 2485 if (adapter->qlcnic_wq) {
2494 destroy_workqueue(adapter->qlcnic_wq); 2486 destroy_workqueue(adapter->qlcnic_wq);
@@ -3327,7 +3319,7 @@ qlcnic_attach_work(struct work_struct *work)
3327 return; 3319 return;
3328 } 3320 }
3329attach: 3321attach:
3330 qlcnic_dcb_get_info(adapter); 3322 qlcnic_dcb_get_info(adapter->dcb);
3331 3323
3332 if (netif_running(netdev)) { 3324 if (netif_running(netdev)) {
3333 if (qlcnic_up(adapter, netdev)) 3325 if (qlcnic_up(adapter, netdev))
@@ -3352,6 +3344,8 @@ done:
3352static int 3344static int
3353qlcnic_check_health(struct qlcnic_adapter *adapter) 3345qlcnic_check_health(struct qlcnic_adapter *adapter)
3354{ 3346{
3347 struct qlcnic_hardware_context *ahw = adapter->ahw;
3348 struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
3355 u32 state = 0, heartbeat; 3349 u32 state = 0, heartbeat;
3356 u32 peg_status; 3350 u32 peg_status;
3357 int err = 0; 3351 int err = 0;
@@ -3376,7 +3370,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3376 if (adapter->need_fw_reset) 3370 if (adapter->need_fw_reset)
3377 goto detach; 3371 goto detach;
3378 3372
3379 if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) 3373 if (ahw->reset_context && qlcnic_auto_fw_reset)
3380 qlcnic_reset_hw_context(adapter); 3374 qlcnic_reset_hw_context(adapter);
3381 3375
3382 return 0; 3376 return 0;
@@ -3419,6 +3413,9 @@ detach:
3419 3413
3420 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); 3414 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
3421 QLCDB(adapter, DRV, "fw recovery scheduled.\n"); 3415 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3416 } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
3417 adapter->flags & QLCNIC_FW_RESET_OWNER) {
3418 qlcnic_dump_fw(adapter);
3422 } 3419 }
3423 3420
3424 return 1; 3421 return 1;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 15513608d480..7763962e2ec4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1187,41 +1187,38 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1187 } 1187 }
1188 1188
1189 if (ops_index == ops_cnt) { 1189 if (ops_index == ops_cnt) {
1190 dev_info(&adapter->pdev->dev, 1190 dev_info(dev, "Skipping unknown entry opcode %d\n",
1191 "Invalid entry type %d, exiting dump\n",
1192 entry->hdr.type); 1191 entry->hdr.type);
1193 goto error; 1192 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1193 entry_offset += entry->hdr.offset;
1194 continue;
1194 } 1195 }
1195 1196
1196 /* Collect dump for this entry */ 1197 /* Collect dump for this entry */
1197 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); 1198 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1198 if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump)) 1199 if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
1199 entry->hdr.flags |= QLCNIC_DUMP_SKIP; 1200 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1201 entry_offset += entry->hdr.offset;
1202 continue;
1203 }
1204
1200 buf_offset += entry->hdr.cap_size; 1205 buf_offset += entry->hdr.cap_size;
1201 entry_offset += entry->hdr.offset; 1206 entry_offset += entry->hdr.offset;
1202 buffer = fw_dump->data + buf_offset; 1207 buffer = fw_dump->data + buf_offset;
1203 } 1208 }
1204 if (dump_size != buf_offset) { 1209
1205 dev_info(&adapter->pdev->dev, 1210 fw_dump->clr = 1;
1206 "Captured(%d) and expected size(%d) do not match\n", 1211 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
1207 buf_offset, dump_size); 1212 dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
1208 goto error; 1213 adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
1209 } else { 1214 /* Send a udev event to notify availability of FW dump */
1210 fw_dump->clr = 1; 1215 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1211 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", 1216
1212 adapter->netdev->name);
1213 dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
1214 adapter->netdev->name, fw_dump->size);
1215 /* Send a udev event to notify availability of FW dump */
1216 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1217 return 0;
1218 }
1219error:
1220 if (fw_dump->use_pex_dma) 1217 if (fw_dump->use_pex_dma)
1221 dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE, 1218 dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1222 fw_dump->dma_buffer, fw_dump->phys_addr); 1219 fw_dump->dma_buffer, fw_dump->phys_addr);
1223 vfree(fw_dump->data); 1220
1224 return -EINVAL; 1221 return 0;
1225} 1222}
1226 1223
1227void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) 1224void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 392b9bd12b4f..8b96e29df30f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -500,6 +500,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
500static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, 500static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
501 int pci_using_dac) 501 int pci_using_dac)
502{ 502{
503 struct qlcnic_dcb *dcb;
503 int err; 504 int err;
504 505
505 INIT_LIST_HEAD(&adapter->vf_mc_list); 506 INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -533,8 +534,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
533 if (err) 534 if (err)
534 goto err_out_send_channel_term; 535 goto err_out_send_channel_term;
535 536
536 if (adapter->dcb && qlcnic_dcb_attach(adapter)) 537 dcb = adapter->dcb;
537 qlcnic_clear_dcb_ops(adapter); 538
539 if (dcb && qlcnic_dcb_attach(dcb))
540 qlcnic_clear_dcb_ops(dcb);
538 541
539 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); 542 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
540 if (err) 543 if (err)
@@ -1577,7 +1580,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1577 if (err) 1580 if (err)
1578 goto err_out_term_channel; 1581 goto err_out_term_channel;
1579 1582
1580 qlcnic_dcb_get_info(adapter); 1583 qlcnic_dcb_get_info(adapter->dcb);
1581 1584
1582 return 0; 1585 return 0;
1583 1586
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 899433778466..0c9c4e895595 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "v1.00.00.32" 21#define DRV_VERSION "1.00.00.33"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
@@ -2206,14 +2206,14 @@ extern char qlge_driver_name[];
2206extern const char qlge_driver_version[]; 2206extern const char qlge_driver_version[];
2207extern const struct ethtool_ops qlge_ethtool_ops; 2207extern const struct ethtool_ops qlge_ethtool_ops;
2208 2208
2209extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); 2209int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
2210extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); 2210void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
2211extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 2211int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
2212extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, 2212int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
2213 u32 *value); 2213 u32 *value);
2214extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); 2214int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
2215extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, 2215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
2216 u16 q_id); 2216 u16 q_id);
2217void ql_queue_fw_error(struct ql_adapter *qdev); 2217void ql_queue_fw_error(struct ql_adapter *qdev);
2218void ql_mpi_work(struct work_struct *work); 2218void ql_mpi_work(struct work_struct *work);
2219void ql_mpi_reset_work(struct work_struct *work); 2219void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2233int ql_pause_mpi_risc(struct ql_adapter *qdev); 2233int ql_pause_mpi_risc(struct ql_adapter *qdev);
2234int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); 2234int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2235int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); 2235int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
2236int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, 2236int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
2237 u32 ram_addr, int word_count); 2237 int word_count);
2238int ql_core_dump(struct ql_adapter *qdev, 2238int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
2239 struct ql_mpi_coredump *mpi_coredump);
2240int ql_mb_about_fw(struct ql_adapter *qdev); 2239int ql_mb_about_fw(struct ql_adapter *qdev);
2241int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2240int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
2242int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); 2241int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
2249int ql_mb_set_port_cfg(struct ql_adapter *qdev); 2248int ql_mb_set_port_cfg(struct ql_adapter *qdev);
2250int ql_wait_fifo_empty(struct ql_adapter *qdev); 2249int ql_wait_fifo_empty(struct ql_adapter *qdev);
2251void ql_get_dump(struct ql_adapter *qdev, void *buff); 2250void ql_get_dump(struct ql_adapter *qdev, void *buff);
2252void ql_gen_reg_dump(struct ql_adapter *qdev, 2251void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
2253 struct ql_reg_dump *mpi_coredump);
2254netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2252netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
2255void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2253void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2256int ql_own_firmware(struct ql_adapter *qdev); 2254int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2262,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
2264/* #define QL_OB_DUMP */ 2262/* #define QL_OB_DUMP */
2265 2263
2266#ifdef QL_REG_DUMP 2264#ifdef QL_REG_DUMP
2267extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); 2265void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
2268extern void ql_dump_routing_entries(struct ql_adapter *qdev); 2266void ql_dump_routing_entries(struct ql_adapter *qdev);
2269extern void ql_dump_regs(struct ql_adapter *qdev); 2267void ql_dump_regs(struct ql_adapter *qdev);
2270#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) 2268#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
2271#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) 2269#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
2272#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) 2270#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2275,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev);
2277#endif 2275#endif
2278 2276
2279#ifdef QL_STAT_DUMP 2277#ifdef QL_STAT_DUMP
2280extern void ql_dump_stat(struct ql_adapter *qdev); 2278void ql_dump_stat(struct ql_adapter *qdev);
2281#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) 2279#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
2282#else 2280#else
2283#define QL_DUMP_STAT(qdev) 2281#define QL_DUMP_STAT(qdev)
2284#endif 2282#endif
2285 2283
2286#ifdef QL_DEV_DUMP 2284#ifdef QL_DEV_DUMP
2287extern void ql_dump_qdev(struct ql_adapter *qdev); 2285void ql_dump_qdev(struct ql_adapter *qdev);
2288#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) 2286#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
2289#else 2287#else
2290#define QL_DUMP_QDEV(qdev) 2288#define QL_DUMP_QDEV(qdev)
2291#endif 2289#endif
2292 2290
2293#ifdef QL_CB_DUMP 2291#ifdef QL_CB_DUMP
2294extern void ql_dump_wqicb(struct wqicb *wqicb); 2292void ql_dump_wqicb(struct wqicb *wqicb);
2295extern void ql_dump_tx_ring(struct tx_ring *tx_ring); 2293void ql_dump_tx_ring(struct tx_ring *tx_ring);
2296extern void ql_dump_ricb(struct ricb *ricb); 2294void ql_dump_ricb(struct ricb *ricb);
2297extern void ql_dump_cqicb(struct cqicb *cqicb); 2295void ql_dump_cqicb(struct cqicb *cqicb);
2298extern void ql_dump_rx_ring(struct rx_ring *rx_ring); 2296void ql_dump_rx_ring(struct rx_ring *rx_ring);
2299extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); 2297void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
2300#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) 2298#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
2301#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) 2299#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
2302#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) 2300#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2312,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
2314#endif 2312#endif
2315 2313
2316#ifdef QL_OB_DUMP 2314#ifdef QL_OB_DUMP
2317extern void ql_dump_tx_desc(struct tx_buf_desc *tbd); 2315void ql_dump_tx_desc(struct tx_buf_desc *tbd);
2318extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); 2316void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
2319extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); 2317void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
2320#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) 2318#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
2321#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) 2319#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
2322#else 2320#else
@@ -2325,14 +2323,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
2325#endif 2323#endif
2326 2324
2327#ifdef QL_IB_DUMP 2325#ifdef QL_IB_DUMP
2328extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); 2326void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
2329#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) 2327#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
2330#else 2328#else
2331#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) 2329#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
2332#endif 2330#endif
2333 2331
2334#ifdef QL_ALL_DUMP 2332#ifdef QL_ALL_DUMP
2335extern void ql_dump_all(struct ql_adapter *qdev); 2333void ql_dump_all(struct ql_adapter *qdev);
2336#define QL_DUMP_ALL(qdev) ql_dump_all(qdev) 2334#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
2337#else 2335#else
2338#define QL_DUMP_ALL(qdev) 2336#define QL_DUMP_ALL(qdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 2553cf4503b9..a245dc18d769 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
96 96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); 97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 98
99static int ql_wol(struct ql_adapter *qdev); 99static int ql_wol(struct ql_adapter *);
100static void qlge_set_multicast_list(struct net_device *ndev); 100static void qlge_set_multicast_list(struct net_device *);
101static int ql_adapter_down(struct ql_adapter *);
102static int ql_adapter_up(struct ql_adapter *);
101 103
102/* This hardware semaphore causes exclusive access to 104/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware, 105 * resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1464 } 1466 }
1465} 1467}
1466 1468
1469/**
1470 * ql_update_mac_hdr_len - helper routine to update the mac header length
1471 * based on vlan tags if present
1472 */
1473static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1474 struct ib_mac_iocb_rsp *ib_mac_rsp,
1475 void *page, size_t *len)
1476{
1477 u16 *tags;
1478
1479 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1480 return;
1481 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1482 tags = (u16 *)page;
1483 /* Look for stacked vlan tags in ethertype field */
1484 if (tags[6] == ETH_P_8021Q &&
1485 tags[8] == ETH_P_8021Q)
1486 *len += 2 * VLAN_HLEN;
1487 else
1488 *len += VLAN_HLEN;
1489 }
1490}
1491
1467/* Process an inbound completion from an rx ring. */ 1492/* Process an inbound completion from an rx ring. */
1468static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, 1493static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1469 struct rx_ring *rx_ring, 1494 struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1523 void *addr; 1548 void *addr;
1524 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1549 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1525 struct napi_struct *napi = &rx_ring->napi; 1550 struct napi_struct *napi = &rx_ring->napi;
1551 size_t hlen = ETH_HLEN;
1526 1552
1527 skb = netdev_alloc_skb(ndev, length); 1553 skb = netdev_alloc_skb(ndev, length);
1528 if (!skb) { 1554 if (!skb) {
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 goto err_out; 1566 goto err_out;
1541 } 1567 }
1542 1568
1569 /* Update the MAC header length*/
1570 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1571
1543 /* The max framesize filter on this chip is set higher than 1572 /* The max framesize filter on this chip is set higher than
1544 * MTU since FCoE uses 2k frames. 1573 * MTU since FCoE uses 2k frames.
1545 */ 1574 */
1546 if (skb->len > ndev->mtu + ETH_HLEN) { 1575 if (skb->len > ndev->mtu + hlen) {
1547 netif_err(qdev, drv, qdev->ndev, 1576 netif_err(qdev, drv, qdev->ndev,
1548 "Segment too small, dropping.\n"); 1577 "Segment too small, dropping.\n");
1549 rx_ring->rx_dropped++; 1578 rx_ring->rx_dropped++;
1550 goto err_out; 1579 goto err_out;
1551 } 1580 }
1552 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); 1581 memcpy(skb_put(skb, hlen), addr, hlen);
1553 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1582 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1554 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", 1583 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1555 length); 1584 length);
1556 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, 1585 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1557 lbq_desc->p.pg_chunk.offset+ETH_HLEN, 1586 lbq_desc->p.pg_chunk.offset + hlen,
1558 length-ETH_HLEN); 1587 length - hlen);
1559 skb->len += length-ETH_HLEN; 1588 skb->len += length - hlen;
1560 skb->data_len += length-ETH_HLEN; 1589 skb->data_len += length - hlen;
1561 skb->truesize += length-ETH_HLEN; 1590 skb->truesize += length - hlen;
1562 1591
1563 rx_ring->rx_packets++; 1592 rx_ring->rx_packets++;
1564 rx_ring->rx_bytes += skb->len; 1593 rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1576 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1605 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1577 /* Unfragmented ipv4 UDP frame. */ 1606 /* Unfragmented ipv4 UDP frame. */
1578 struct iphdr *iph = 1607 struct iphdr *iph =
1579 (struct iphdr *) ((u8 *)addr + ETH_HLEN); 1608 (struct iphdr *)((u8 *)addr + hlen);
1580 if (!(iph->frag_off & 1609 if (!(iph->frag_off &
1581 htons(IP_MF|IP_OFFSET))) { 1610 htons(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY; 1611 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1726 struct bq_desc *sbq_desc; 1755 struct bq_desc *sbq_desc;
1727 struct sk_buff *skb = NULL; 1756 struct sk_buff *skb = NULL;
1728 u32 length = le32_to_cpu(ib_mac_rsp->data_len); 1757 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1729 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); 1758 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1759 size_t hlen = ETH_HLEN;
1730 1760
1731 /* 1761 /*
1732 * Handle the header buffer if present. 1762 * Handle the header buffer if present.
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1853 skb->data_len += length; 1883 skb->data_len += length;
1854 skb->truesize += length; 1884 skb->truesize += length;
1855 length -= length; 1885 length -= length;
1856 __pskb_pull_tail(skb, 1886 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1857 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1887 lbq_desc->p.pg_chunk.va,
1858 VLAN_ETH_HLEN : ETH_HLEN); 1888 &hlen);
1889 __pskb_pull_tail(skb, hlen);
1859 } 1890 }
1860 } else { 1891 } else {
1861 /* 1892 /*
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1910 length -= size; 1941 length -= size;
1911 i++; 1942 i++;
1912 } 1943 }
1913 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1944 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1914 VLAN_ETH_HLEN : ETH_HLEN); 1945 &hlen);
1946 __pskb_pull_tail(skb, hlen);
1915 } 1947 }
1916 return skb; 1948 return skb;
1917} 1949}
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
2003 rx_ring->rx_packets++; 2035 rx_ring->rx_packets++;
2004 rx_ring->rx_bytes += skb->len; 2036 rx_ring->rx_bytes += skb->len;
2005 skb_record_rx_queue(skb, rx_ring->cq_id); 2037 skb_record_rx_queue(skb, rx_ring->cq_id);
2006 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) 2038 if (vlan_id != 0xffff)
2007 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); 2039 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2008 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 2040 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2009 napi_gro_receive(&rx_ring->napi, skb); 2041 napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2017 struct ib_mac_iocb_rsp *ib_mac_rsp) 2049 struct ib_mac_iocb_rsp *ib_mac_rsp)
2018{ 2050{
2019 u32 length = le32_to_cpu(ib_mac_rsp->data_len); 2051 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2020 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 2052 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2053 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2021 ((le16_to_cpu(ib_mac_rsp->vlan_id) & 2054 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2022 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; 2055 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2023 2056
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2310 } 2343 }
2311} 2344}
2312 2345
2346/**
2347 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2348 * based on the features to enable/disable hardware vlan accel
2349 */
2350static int qlge_update_hw_vlan_features(struct net_device *ndev,
2351 netdev_features_t features)
2352{
2353 struct ql_adapter *qdev = netdev_priv(ndev);
2354 int status = 0;
2355
2356 status = ql_adapter_down(qdev);
2357 if (status) {
2358 netif_err(qdev, link, qdev->ndev,
2359 "Failed to bring down the adapter\n");
2360 return status;
2361 }
2362
2363 /* update the features with resent change */
2364 ndev->features = features;
2365
2366 status = ql_adapter_up(qdev);
2367 if (status) {
2368 netif_err(qdev, link, qdev->ndev,
2369 "Failed to bring up the adapter\n");
2370 return status;
2371 }
2372 return status;
2373}
2374
2313static netdev_features_t qlge_fix_features(struct net_device *ndev, 2375static netdev_features_t qlge_fix_features(struct net_device *ndev,
2314 netdev_features_t features) 2376 netdev_features_t features)
2315{ 2377{
2378 int err;
2316 /* 2379 /*
2317 * Since there is no support for separate rx/tx vlan accel 2380 * Since there is no support for separate rx/tx vlan accel
2318 * enable/disable make sure tx flag is always in same state as rx. 2381 * enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
2322 else 2385 else
2323 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2386 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2324 2387
2388 /* Update the behavior of vlan accel in the adapter */
2389 err = qlge_update_hw_vlan_features(ndev, features);
2390 if (err)
2391 return err;
2392
2325 return features; 2393 return features;
2326} 2394}
2327 2395
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3704 ql_write32(qdev, SYS, mask | value); 3772 ql_write32(qdev, SYS, mask | value);
3705 3773
3706 /* Set the default queue, and VLAN behavior. */ 3774 /* Set the default queue, and VLAN behavior. */
3707 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; 3775 value = NIC_RCV_CFG_DFQ;
3708 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); 3776 mask = NIC_RCV_CFG_DFQ_MASK;
3777 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3778 value |= NIC_RCV_CFG_RV;
3779 mask |= (NIC_RCV_CFG_RV << 16);
3780 }
3709 ql_write32(qdev, NIC_RCV_CFG, (mask | value)); 3781 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3710 3782
3711 /* Set the MPI interrupt to enabled. */ 3783 /* Set the MPI interrupt to enabled. */
@@ -4505,7 +4577,6 @@ static void ql_release_all(struct pci_dev *pdev)
4505 iounmap(qdev->doorbell_area); 4577 iounmap(qdev->doorbell_area);
4506 vfree(qdev->mpi_coredump); 4578 vfree(qdev->mpi_coredump);
4507 pci_release_regions(pdev); 4579 pci_release_regions(pdev);
4508 pci_set_drvdata(pdev, NULL);
4509} 4580}
4510 4581
4511static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, 4582static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
@@ -4692,11 +4763,15 @@ static int qlge_probe(struct pci_dev *pdev,
4692 4763
4693 qdev = netdev_priv(ndev); 4764 qdev = netdev_priv(ndev);
4694 SET_NETDEV_DEV(ndev, &pdev->dev); 4765 SET_NETDEV_DEV(ndev, &pdev->dev);
4695 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 4766 ndev->hw_features = NETIF_F_SG |
4696 NETIF_F_TSO | NETIF_F_TSO_ECN | 4767 NETIF_F_IP_CSUM |
4697 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM; 4768 NETIF_F_TSO |
4698 ndev->features = ndev->hw_features | 4769 NETIF_F_TSO_ECN |
4699 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; 4770 NETIF_F_HW_VLAN_CTAG_TX |
4771 NETIF_F_HW_VLAN_CTAG_RX |
4772 NETIF_F_HW_VLAN_CTAG_FILTER |
4773 NETIF_F_RXCSUM;
4774 ndev->features = ndev->hw_features;
4700 ndev->vlan_features = ndev->hw_features; 4775 ndev->vlan_features = ndev->hw_features;
4701 4776
4702 if (test_bit(QL_DMA64, &qdev->flags)) 4777 if (test_bit(QL_DMA64, &qdev->flags))
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index e9dc84943cfc..1e49ec5b2232 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1231,7 +1231,6 @@ err_out_mdio:
1231 mdiobus_free(lp->mii_bus); 1231 mdiobus_free(lp->mii_bus);
1232err_out_unmap: 1232err_out_unmap:
1233 netif_napi_del(&lp->napi); 1233 netif_napi_del(&lp->napi);
1234 pci_set_drvdata(pdev, NULL);
1235 pci_iounmap(pdev, ioaddr); 1234 pci_iounmap(pdev, ioaddr);
1236err_out_free_res: 1235err_out_free_res:
1237 pci_release_regions(pdev); 1236 pci_release_regions(pdev);
@@ -1257,7 +1256,6 @@ static void r6040_remove_one(struct pci_dev *pdev)
1257 pci_release_regions(pdev); 1256 pci_release_regions(pdev);
1258 free_netdev(dev); 1257 free_netdev(dev);
1259 pci_disable_device(pdev); 1258 pci_disable_device(pdev);
1260 pci_set_drvdata(pdev, NULL);
1261} 1259}
1262 1260
1263 1261
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d2e591955bdd..f2a2128165dd 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -2052,7 +2052,6 @@ static void cp_remove_one (struct pci_dev *pdev)
2052 pci_release_regions(pdev); 2052 pci_release_regions(pdev);
2053 pci_clear_mwi(pdev); 2053 pci_clear_mwi(pdev);
2054 pci_disable_device(pdev); 2054 pci_disable_device(pdev);
2055 pci_set_drvdata(pdev, NULL);
2056 free_netdev(dev); 2055 free_netdev(dev);
2057} 2056}
2058 2057
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 3ccedeb8aba0..50a92104dd0a 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -727,7 +727,6 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
727 pci_release_regions (pdev); 727 pci_release_regions (pdev);
728 728
729 free_netdev(dev); 729 free_netdev(dev);
730 pci_set_drvdata (pdev, NULL);
731} 730}
732 731
733 732
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3397cee89777..799387570766 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6811,7 +6811,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
6811 6811
6812 rtl_disable_msi(pdev, tp); 6812 rtl_disable_msi(pdev, tp);
6813 rtl8169_release_board(pdev, dev, tp->mmio_addr); 6813 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6814 pci_set_drvdata(pdev, NULL);
6815} 6814}
6816 6815
6817static const struct net_device_ops rtl_netdev_ops = { 6816static const struct net_device_ops rtl_netdev_ops = {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b57c278d3b46..7258366f7e0b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -872,7 +872,7 @@ static void update_mac_address(struct net_device *ndev)
872static void read_mac_address(struct net_device *ndev, unsigned char *mac) 872static void read_mac_address(struct net_device *ndev, unsigned char *mac)
873{ 873{
874 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 874 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
875 memcpy(ndev->dev_addr, mac, 6); 875 memcpy(ndev->dev_addr, mac, ETH_ALEN);
876 } else { 876 } else {
877 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 877 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
878 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 878 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21f9ad6392e9..676c3c057bfb 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx)
285 return rc; 285 return rc;
286} 286}
287 287
288#ifdef EFX_USE_PIO
289
290static void efx_ef10_free_piobufs(struct efx_nic *efx)
291{
292 struct efx_ef10_nic_data *nic_data = efx->nic_data;
293 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
294 unsigned int i;
295 int rc;
296
297 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
298
299 for (i = 0; i < nic_data->n_piobufs; i++) {
300 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
301 nic_data->piobuf_handle[i]);
302 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
303 NULL, 0, NULL);
304 WARN_ON(rc);
305 }
306
307 nic_data->n_piobufs = 0;
308}
309
310static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
311{
312 struct efx_ef10_nic_data *nic_data = efx->nic_data;
313 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
314 unsigned int i;
315 size_t outlen;
316 int rc = 0;
317
318 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
319
320 for (i = 0; i < n; i++) {
321 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
322 outbuf, sizeof(outbuf), &outlen);
323 if (rc)
324 break;
325 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
326 rc = -EIO;
327 break;
328 }
329 nic_data->piobuf_handle[i] =
330 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
331 netif_dbg(efx, probe, efx->net_dev,
332 "allocated PIO buffer %u handle %x\n", i,
333 nic_data->piobuf_handle[i]);
334 }
335
336 nic_data->n_piobufs = i;
337 if (rc)
338 efx_ef10_free_piobufs(efx);
339 return rc;
340}
341
342static int efx_ef10_link_piobufs(struct efx_nic *efx)
343{
344 struct efx_ef10_nic_data *nic_data = efx->nic_data;
345 MCDI_DECLARE_BUF(inbuf,
346 max(MC_CMD_LINK_PIOBUF_IN_LEN,
347 MC_CMD_UNLINK_PIOBUF_IN_LEN));
348 struct efx_channel *channel;
349 struct efx_tx_queue *tx_queue;
350 unsigned int offset, index;
351 int rc;
352
353 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
354 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
355
356 /* Link a buffer to each VI in the write-combining mapping */
357 for (index = 0; index < nic_data->n_piobufs; ++index) {
358 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
359 nic_data->piobuf_handle[index]);
360 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
361 nic_data->pio_write_vi_base + index);
362 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
363 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
364 NULL, 0, NULL);
365 if (rc) {
366 netif_err(efx, drv, efx->net_dev,
367 "failed to link VI %u to PIO buffer %u (%d)\n",
368 nic_data->pio_write_vi_base + index, index,
369 rc);
370 goto fail;
371 }
372 netif_dbg(efx, probe, efx->net_dev,
373 "linked VI %u to PIO buffer %u\n",
374 nic_data->pio_write_vi_base + index, index);
375 }
376
377 /* Link a buffer to each TX queue */
378 efx_for_each_channel(channel, efx) {
379 efx_for_each_channel_tx_queue(tx_queue, channel) {
380 /* We assign the PIO buffers to queues in
381 * reverse order to allow for the following
382 * special case.
383 */
384 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
385 tx_queue->channel->channel - 1) *
386 efx_piobuf_size);
387 index = offset / ER_DZ_TX_PIOBUF_SIZE;
388 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
389
390 /* When the host page size is 4K, the first
391 * host page in the WC mapping may be within
392 * the same VI page as the last TX queue. We
393 * can only link one buffer to each VI.
394 */
395 if (tx_queue->queue == nic_data->pio_write_vi_base) {
396 BUG_ON(index != 0);
397 rc = 0;
398 } else {
399 MCDI_SET_DWORD(inbuf,
400 LINK_PIOBUF_IN_PIOBUF_HANDLE,
401 nic_data->piobuf_handle[index]);
402 MCDI_SET_DWORD(inbuf,
403 LINK_PIOBUF_IN_TXQ_INSTANCE,
404 tx_queue->queue);
405 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
406 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
407 NULL, 0, NULL);
408 }
409
410 if (rc) {
411 /* This is non-fatal; the TX path just
412 * won't use PIO for this queue
413 */
414 netif_err(efx, drv, efx->net_dev,
415 "failed to link VI %u to PIO buffer %u (%d)\n",
416 tx_queue->queue, index, rc);
417 tx_queue->piobuf = NULL;
418 } else {
419 tx_queue->piobuf =
420 nic_data->pio_write_base +
421 index * EFX_VI_PAGE_SIZE + offset;
422 tx_queue->piobuf_offset = offset;
423 netif_dbg(efx, probe, efx->net_dev,
424 "linked VI %u to PIO buffer %u offset %x addr %p\n",
425 tx_queue->queue, index,
426 tx_queue->piobuf_offset,
427 tx_queue->piobuf);
428 }
429 }
430 }
431
432 return 0;
433
434fail:
435 while (index--) {
436 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
437 nic_data->pio_write_vi_base + index);
438 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
439 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
440 NULL, 0, NULL);
441 }
442 return rc;
443}
444
445#else /* !EFX_USE_PIO */
446
447static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
448{
449 return n == 0 ? 0 : -ENOBUFS;
450}
451
452static int efx_ef10_link_piobufs(struct efx_nic *efx)
453{
454 return 0;
455}
456
457static void efx_ef10_free_piobufs(struct efx_nic *efx)
458{
459}
460
461#endif /* EFX_USE_PIO */
462
288static void efx_ef10_remove(struct efx_nic *efx) 463static void efx_ef10_remove(struct efx_nic *efx)
289{ 464{
290 struct efx_ef10_nic_data *nic_data = efx->nic_data; 465 struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
295 /* This needs to be after efx_ptp_remove_channel() with no filters */ 470 /* This needs to be after efx_ptp_remove_channel() with no filters */
296 efx_ef10_rx_free_indir_table(efx); 471 efx_ef10_rx_free_indir_table(efx);
297 472
473 if (nic_data->wc_membase)
474 iounmap(nic_data->wc_membase);
475
298 rc = efx_ef10_free_vis(efx); 476 rc = efx_ef10_free_vis(efx);
299 WARN_ON(rc != 0); 477 WARN_ON(rc != 0);
300 478
479 if (!nic_data->must_restore_piobufs)
480 efx_ef10_free_piobufs(efx);
481
301 efx_mcdi_fini(efx); 482 efx_mcdi_fini(efx);
302 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 483 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
303 kfree(nic_data); 484 kfree(nic_data);
@@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
330 return 0; 511 return 0;
331} 512}
332 513
514/* Note that the failure path of this function does not free
515 * resources, as this will be done by efx_ef10_remove().
516 */
333static int efx_ef10_dimension_resources(struct efx_nic *efx) 517static int efx_ef10_dimension_resources(struct efx_nic *efx)
334{ 518{
335 unsigned int n_vis = 519 struct efx_ef10_nic_data *nic_data = efx->nic_data;
336 max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 520 unsigned int uc_mem_map_size, wc_mem_map_size;
521 unsigned int min_vis, pio_write_vi_base, max_vis;
522 void __iomem *membase;
523 int rc;
524
525 min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
526
527#ifdef EFX_USE_PIO
528 /* Try to allocate PIO buffers if wanted and if the full
529 * number of PIO buffers would be sufficient to allocate one
530 * copy-buffer per TX channel. Failure is non-fatal, as there
531 * are only a small number of PIO buffers shared between all
532 * functions of the controller.
533 */
534 if (efx_piobuf_size != 0 &&
535 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
536 efx->n_tx_channels) {
537 unsigned int n_piobufs =
538 DIV_ROUND_UP(efx->n_tx_channels,
539 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
540
541 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
542 if (rc)
543 netif_err(efx, probe, efx->net_dev,
544 "failed to allocate PIO buffers (%d)\n", rc);
545 else
546 netif_dbg(efx, probe, efx->net_dev,
547 "allocated %u PIO buffers\n", n_piobufs);
548 }
549#else
550 nic_data->n_piobufs = 0;
551#endif
337 552
338 return efx_ef10_alloc_vis(efx, n_vis, n_vis); 553 /* PIO buffers should be mapped with write-combining enabled,
554 * and we want to make single UC and WC mappings rather than
555 * several of each (in fact that's the only option if host
556 * page size is >4K). So we may allocate some extra VIs just
557 * for writing PIO buffers through.
558 */
559 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
560 ER_DZ_TX_PIOBUF);
561 if (nic_data->n_piobufs) {
562 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
563 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
564 nic_data->n_piobufs) *
565 EFX_VI_PAGE_SIZE) -
566 uc_mem_map_size);
567 max_vis = pio_write_vi_base + nic_data->n_piobufs;
568 } else {
569 pio_write_vi_base = 0;
570 wc_mem_map_size = 0;
571 max_vis = min_vis;
572 }
573
574 /* In case the last attached driver failed to free VIs, do it now */
575 rc = efx_ef10_free_vis(efx);
576 if (rc != 0)
577 return rc;
578
579 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
580 if (rc != 0)
581 return rc;
582
583 /* If we didn't get enough VIs to map all the PIO buffers, free the
584 * PIO buffers
585 */
586 if (nic_data->n_piobufs &&
587 nic_data->n_allocated_vis <
588 pio_write_vi_base + nic_data->n_piobufs) {
589 netif_dbg(efx, probe, efx->net_dev,
590 "%u VIs are not sufficient to map %u PIO buffers\n",
591 nic_data->n_allocated_vis, nic_data->n_piobufs);
592 efx_ef10_free_piobufs(efx);
593 }
594
595 /* Shrink the original UC mapping of the memory BAR */
596 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
597 if (!membase) {
598 netif_err(efx, probe, efx->net_dev,
599 "could not shrink memory BAR to %x\n",
600 uc_mem_map_size);
601 return -ENOMEM;
602 }
603 iounmap(efx->membase);
604 efx->membase = membase;
605
606 /* Set up the WC mapping if needed */
607 if (wc_mem_map_size) {
608 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
609 uc_mem_map_size,
610 wc_mem_map_size);
611 if (!nic_data->wc_membase) {
612 netif_err(efx, probe, efx->net_dev,
613 "could not allocate WC mapping of size %x\n",
614 wc_mem_map_size);
615 return -ENOMEM;
616 }
617 nic_data->pio_write_vi_base = pio_write_vi_base;
618 nic_data->pio_write_base =
619 nic_data->wc_membase +
620 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
621 uc_mem_map_size);
622
623 rc = efx_ef10_link_piobufs(efx);
624 if (rc)
625 efx_ef10_free_piobufs(efx);
626 }
627
628 netif_dbg(efx, probe, efx->net_dev,
629 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
630 &efx->membase_phys, efx->membase, uc_mem_map_size,
631 nic_data->wc_membase, wc_mem_map_size);
632
633 return 0;
339} 634}
340 635
341static int efx_ef10_init_nic(struct efx_nic *efx) 636static int efx_ef10_init_nic(struct efx_nic *efx)
@@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
359 nic_data->must_realloc_vis = false; 654 nic_data->must_realloc_vis = false;
360 } 655 }
361 656
657 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
658 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
659 if (rc == 0) {
660 rc = efx_ef10_link_piobufs(efx);
661 if (rc)
662 efx_ef10_free_piobufs(efx);
663 }
664
665 /* Log an error on failure, but this is non-fatal */
666 if (rc)
667 netif_err(efx, drv, efx->net_dev,
668 "failed to restore PIO buffers (%d)\n", rc);
669 nic_data->must_restore_piobufs = false;
670 }
671
362 efx_ef10_rx_push_indir_table(efx); 672 efx_ef10_rx_push_indir_table(efx);
363 return 0; 673 return 0;
364} 674}
@@ -759,6 +1069,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
759 /* All our allocations have been reset */ 1069 /* All our allocations have been reset */
760 nic_data->must_realloc_vis = true; 1070 nic_data->must_realloc_vis = true;
761 nic_data->must_restore_filters = true; 1071 nic_data->must_restore_filters = true;
1072 nic_data->must_restore_piobufs = true;
762 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 1073 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
763 1074
764 /* The datapath firmware might have been changed */ 1075 /* The datapath firmware might have been changed */
@@ -2180,7 +2491,7 @@ out_unlock:
2180 return rc; 2491 return rc;
2181} 2492}
2182 2493
2183void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) 2494static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2184{ 2495{
2185 /* no need to do anything here on EF10 */ 2496 /* no need to do anything here on EF10 */
2186} 2497}
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index b3f4e3755fd9..207ac9a1e3de 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -315,6 +315,7 @@
315#define ESF_DZ_TX_PIO_TYPE_WIDTH 1 315#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
316#define ESF_DZ_TX_PIO_OPT_LBN 60 316#define ESF_DZ_TX_PIO_OPT_LBN 60
317#define ESF_DZ_TX_PIO_OPT_WIDTH 3 317#define ESF_DZ_TX_PIO_OPT_WIDTH 3
318#define ESE_DZ_TX_OPTION_DESC_PIO 1
318#define ESF_DZ_TX_PIO_CONT_LBN 59 319#define ESF_DZ_TX_PIO_CONT_LBN 59
319#define ESF_DZ_TX_PIO_CONT_WIDTH 1 320#define ESF_DZ_TX_PIO_CONT_WIDTH 1
320#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32 321#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 34d00f5771fe..b8235ee5d7d7 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -18,37 +18,36 @@
18#define EFX_MEM_BAR 2 18#define EFX_MEM_BAR 2
19 19
20/* TX */ 20/* TX */
21extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 21int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
22extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 22void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
23extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 23void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
24extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); 24void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
25extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 25void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
26extern netdev_tx_t 26netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
27efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); 27 struct net_device *net_dev);
28extern netdev_tx_t 28netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
29efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 29void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
30extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 30int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
31extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); 31unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
32extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); 32extern unsigned int efx_piobuf_size;
33 33
34/* RX */ 34/* RX */
35extern void efx_rx_config_page_split(struct efx_nic *efx); 35void efx_rx_config_page_split(struct efx_nic *efx);
36extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 36int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
37extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); 37void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
38extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 38void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
39extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 39void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
40extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 40void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
41extern void efx_rx_slow_fill(unsigned long context); 41void efx_rx_slow_fill(unsigned long context);
42extern void __efx_rx_packet(struct efx_channel *channel); 42void __efx_rx_packet(struct efx_channel *channel);
43extern void efx_rx_packet(struct efx_rx_queue *rx_queue, 43void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
44 unsigned int index, unsigned int n_frags, 44 unsigned int n_frags, unsigned int len, u16 flags);
45 unsigned int len, u16 flags);
46static inline void efx_rx_flush_packet(struct efx_channel *channel) 45static inline void efx_rx_flush_packet(struct efx_channel *channel)
47{ 46{
48 if (channel->rx_pkt_n_frags) 47 if (channel->rx_pkt_n_frags)
49 __efx_rx_packet(channel); 48 __efx_rx_packet(channel);
50} 49}
51extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 50void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
52 51
53#define EFX_MAX_DMAQ_SIZE 4096UL 52#define EFX_MAX_DMAQ_SIZE 4096UL
54#define EFX_DEFAULT_DMAQ_SIZE 1024UL 53#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -162,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
162 return efx->type->filter_get_rx_ids(efx, priority, buf, size); 161 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
163} 162}
164#ifdef CONFIG_RFS_ACCEL 163#ifdef CONFIG_RFS_ACCEL
165extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 164int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
166 u16 rxq_index, u32 flow_id); 165 u16 rxq_index, u32 flow_id);
167extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); 166bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
168static inline void efx_filter_rfs_expire(struct efx_channel *channel) 167static inline void efx_filter_rfs_expire(struct efx_channel *channel)
169{ 168{
170 if (channel->rfs_filters_added >= 60 && 169 if (channel->rfs_filters_added >= 60 &&
@@ -176,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
176static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} 175static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
177#define efx_filter_rfs_enabled() 0 176#define efx_filter_rfs_enabled() 0
178#endif 177#endif
179extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); 178bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
180 179
181/* Channels */ 180/* Channels */
182extern int efx_channel_dummy_op_int(struct efx_channel *channel); 181int efx_channel_dummy_op_int(struct efx_channel *channel);
183extern void efx_channel_dummy_op_void(struct efx_channel *channel); 182void efx_channel_dummy_op_void(struct efx_channel *channel);
184extern int 183int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
185efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
186 184
187/* Ports */ 185/* Ports */
188extern int efx_reconfigure_port(struct efx_nic *efx); 186int efx_reconfigure_port(struct efx_nic *efx);
189extern int __efx_reconfigure_port(struct efx_nic *efx); 187int __efx_reconfigure_port(struct efx_nic *efx);
190 188
191/* Ethtool support */ 189/* Ethtool support */
192extern const struct ethtool_ops efx_ethtool_ops; 190extern const struct ethtool_ops efx_ethtool_ops;
193 191
194/* Reset handling */ 192/* Reset handling */
195extern int efx_reset(struct efx_nic *efx, enum reset_type method); 193int efx_reset(struct efx_nic *efx, enum reset_type method);
196extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); 194void efx_reset_down(struct efx_nic *efx, enum reset_type method);
197extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); 195int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
198extern int efx_try_recovery(struct efx_nic *efx); 196int efx_try_recovery(struct efx_nic *efx);
199 197
200/* Global */ 198/* Global */
201extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 199void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
202extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 200int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
203 unsigned int rx_usecs, bool rx_adaptive, 201 unsigned int rx_usecs, bool rx_adaptive,
204 bool rx_may_override_tx); 202 bool rx_may_override_tx);
205extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 203void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
206 unsigned int *rx_usecs, bool *rx_adaptive); 204 unsigned int *rx_usecs, bool *rx_adaptive);
207 205
208/* Dummy PHY ops for PHY drivers */ 206/* Dummy PHY ops for PHY drivers */
209extern int efx_port_dummy_op_int(struct efx_nic *efx); 207int efx_port_dummy_op_int(struct efx_nic *efx);
210extern void efx_port_dummy_op_void(struct efx_nic *efx); 208void efx_port_dummy_op_void(struct efx_nic *efx);
211
212 209
213/* MTD */ 210/* MTD */
214#ifdef CONFIG_SFC_MTD 211#ifdef CONFIG_SFC_MTD
215extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, 212int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
216 size_t n_parts, size_t sizeof_part); 213 size_t n_parts, size_t sizeof_part);
217static inline int efx_mtd_probe(struct efx_nic *efx) 214static inline int efx_mtd_probe(struct efx_nic *efx)
218{ 215{
219 return efx->type->mtd_probe(efx); 216 return efx->type->mtd_probe(efx);
220} 217}
221extern void efx_mtd_rename(struct efx_nic *efx); 218void efx_mtd_rename(struct efx_nic *efx);
222extern void efx_mtd_remove(struct efx_nic *efx); 219void efx_mtd_remove(struct efx_nic *efx);
223#else 220#else
224static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } 221static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
225static inline void efx_mtd_rename(struct efx_nic *efx) {} 222static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -241,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
241 efx_schedule_channel(channel); 238 efx_schedule_channel(channel);
242} 239}
243 240
244extern void efx_link_status_changed(struct efx_nic *efx); 241void efx_link_status_changed(struct efx_nic *efx);
245extern void efx_link_set_advertising(struct efx_nic *efx, u32); 242void efx_link_set_advertising(struct efx_nic *efx, u32);
246extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); 243void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
247 244
248static inline void efx_device_detach_sync(struct efx_nic *efx) 245static inline void efx_device_detach_sync(struct efx_nic *efx)
249{ 246{
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5b471cf5c323..1f529fa2edb1 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -70,6 +70,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
70 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), 70 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
71 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), 71 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
72 EFX_ETHTOOL_UINT_TXQ_STAT(pushes), 72 EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
73 EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
73 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), 74 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
74 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 75 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
75 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), 76 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -1035,8 +1036,8 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
1035 return 0; 1036 return 0;
1036} 1037}
1037 1038
1038int efx_ethtool_get_ts_info(struct net_device *net_dev, 1039static int efx_ethtool_get_ts_info(struct net_device *net_dev,
1039 struct ethtool_ts_info *ts_info) 1040 struct ethtool_ts_info *ts_info)
1040{ 1041{
1041 struct efx_nic *efx = netdev_priv(net_dev); 1042 struct efx_nic *efx = netdev_priv(net_dev);
1042 1043
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96ce507d8602..4d3f119b67b3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,6 +66,11 @@
66#define EFX_USE_QWORD_IO 1 66#define EFX_USE_QWORD_IO 1
67#endif 67#endif
68 68
69/* PIO is a win only if write-combining is possible */
70#ifdef ARCH_HAS_IOREMAP_WC
71#define EFX_USE_PIO 1
72#endif
73
69#ifdef EFX_USE_QWORD_IO 74#ifdef EFX_USE_QWORD_IO
70static inline void _efx_writeq(struct efx_nic *efx, __le64 value, 75static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
71 unsigned int reg) 76 unsigned int reg)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index c34d0d4e10ee..656a3277c2b2 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -108,38 +108,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
108} 108}
109#endif 109#endif
110 110
111extern int efx_mcdi_init(struct efx_nic *efx); 111int efx_mcdi_init(struct efx_nic *efx);
112extern void efx_mcdi_fini(struct efx_nic *efx); 112void efx_mcdi_fini(struct efx_nic *efx);
113 113
114extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 114int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
115 const efx_dword_t *inbuf, size_t inlen, 115 size_t inlen, efx_dword_t *outbuf, size_t outlen,
116 size_t *outlen_actual);
117
118int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
119 const efx_dword_t *inbuf, size_t inlen);
120int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
116 efx_dword_t *outbuf, size_t outlen, 121 efx_dword_t *outbuf, size_t outlen,
117 size_t *outlen_actual); 122 size_t *outlen_actual);
118 123
119extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
120 const efx_dword_t *inbuf, size_t inlen);
121extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
122 efx_dword_t *outbuf, size_t outlen,
123 size_t *outlen_actual);
124
125typedef void efx_mcdi_async_completer(struct efx_nic *efx, 124typedef void efx_mcdi_async_completer(struct efx_nic *efx,
126 unsigned long cookie, int rc, 125 unsigned long cookie, int rc,
127 efx_dword_t *outbuf, 126 efx_dword_t *outbuf,
128 size_t outlen_actual); 127 size_t outlen_actual);
129extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, 128int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
130 const efx_dword_t *inbuf, size_t inlen, 129 const efx_dword_t *inbuf, size_t inlen, size_t outlen,
131 size_t outlen, 130 efx_mcdi_async_completer *complete,
132 efx_mcdi_async_completer *complete, 131 unsigned long cookie);
133 unsigned long cookie);
134 132
135extern int efx_mcdi_poll_reboot(struct efx_nic *efx); 133int efx_mcdi_poll_reboot(struct efx_nic *efx);
136extern void efx_mcdi_mode_poll(struct efx_nic *efx); 134void efx_mcdi_mode_poll(struct efx_nic *efx);
137extern void efx_mcdi_mode_event(struct efx_nic *efx); 135void efx_mcdi_mode_event(struct efx_nic *efx);
138extern void efx_mcdi_flush_async(struct efx_nic *efx); 136void efx_mcdi_flush_async(struct efx_nic *efx);
139 137
140extern void efx_mcdi_process_event(struct efx_channel *channel, 138void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
141 efx_qword_t *event); 139void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
142extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
143 140
144/* We expect that 16- and 32-bit fields in MCDI requests and responses 141/* We expect that 16- and 32-bit fields in MCDI requests and responses
145 * are appropriately aligned, but 64-bit fields are only 142 * are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
275#define MCDI_EVENT_FIELD(_ev, _field) \ 272#define MCDI_EVENT_FIELD(_ev, _field) \
276 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 273 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
277 274
278extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 275void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
279extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 276int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
280 u16 *fw_subtype_list, u32 *capabilities); 277 u16 *fw_subtype_list, u32 *capabilities);
281extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, 278int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
282 u32 dest_evq); 279int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
283extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); 280int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
284extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 281 size_t *size_out, size_t *erase_size_out,
285 size_t *size_out, size_t *erase_size_out, 282 bool *protected_out);
286 bool *protected_out); 283int efx_mcdi_nvram_test_all(struct efx_nic *efx);
287extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); 284int efx_mcdi_handle_assertion(struct efx_nic *efx);
288extern int efx_mcdi_handle_assertion(struct efx_nic *efx); 285void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
289extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 286int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
290extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, 287 int *id_out);
291 const u8 *mac, int *id_out); 288int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
292extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 289int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
293extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); 290int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
294extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); 291int efx_mcdi_flush_rxqs(struct efx_nic *efx);
295extern int efx_mcdi_flush_rxqs(struct efx_nic *efx); 292int efx_mcdi_port_probe(struct efx_nic *efx);
296extern int efx_mcdi_port_probe(struct efx_nic *efx); 293void efx_mcdi_port_remove(struct efx_nic *efx);
297extern void efx_mcdi_port_remove(struct efx_nic *efx); 294int efx_mcdi_port_reconfigure(struct efx_nic *efx);
298extern int efx_mcdi_port_reconfigure(struct efx_nic *efx); 295int efx_mcdi_port_get_number(struct efx_nic *efx);
299extern int efx_mcdi_port_get_number(struct efx_nic *efx); 296u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
300extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); 297void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
301extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); 298int efx_mcdi_set_mac(struct efx_nic *efx);
302extern int efx_mcdi_set_mac(struct efx_nic *efx);
303#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) 299#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
304extern void efx_mcdi_mac_start_stats(struct efx_nic *efx); 300void efx_mcdi_mac_start_stats(struct efx_nic *efx);
305extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx); 301void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
306extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); 302bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
307extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); 303enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
308extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); 304int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
309extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); 305int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
310 306
311#ifdef CONFIG_SFC_MCDI_MON 307#ifdef CONFIG_SFC_MCDI_MON
312extern int efx_mcdi_mon_probe(struct efx_nic *efx); 308int efx_mcdi_mon_probe(struct efx_nic *efx);
313extern void efx_mcdi_mon_remove(struct efx_nic *efx); 309void efx_mcdi_mon_remove(struct efx_nic *efx);
314#else 310#else
315static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } 311static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
316static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} 312static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
317#endif 313#endif
318 314
319#ifdef CONFIG_SFC_MTD 315#ifdef CONFIG_SFC_MTD
320extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, 316int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
321 size_t len, size_t *retlen, u8 *buffer); 317 size_t *retlen, u8 *buffer);
322extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); 318int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
323extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, 319int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
324 size_t len, size_t *retlen, const u8 *buffer); 320 size_t *retlen, const u8 *buffer);
325extern int efx_mcdi_mtd_sync(struct mtd_info *mtd); 321int efx_mcdi_mtd_sync(struct mtd_info *mtd);
326extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); 322void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
327#endif 323#endif
328 324
329#endif /* EFX_MCDI_H */ 325#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index 16824fecc5ee..4a2dc4c281b7 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -20,7 +20,7 @@
20 20
21static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } 21static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
22static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } 22static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
23extern unsigned efx_mdio_id_oui(u32 id); 23unsigned efx_mdio_id_oui(u32 id);
24 24
25static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr) 25static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
26{ 26{
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
56 return sync; 56 return sync;
57} 57}
58 58
59extern const char *efx_mdio_mmd_name(int mmd); 59const char *efx_mdio_mmd_name(int mmd);
60 60
61/* 61/*
62 * Reset a specific MMD and wait for reset to clear. 62 * Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd);
64 * 64 *
65 * This function will sleep 65 * This function will sleep
66 */ 66 */
67extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, 67int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
68 int spins, int spintime);
69 68
70/* As efx_mdio_check_mmd but for multiple MMDs */ 69/* As efx_mdio_check_mmd but for multiple MMDs */
71int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask); 70int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
72 71
73/* Check the link status of specified mmds in bit mask */ 72/* Check the link status of specified mmds in bit mask */
74extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); 73bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
75 74
76/* Generic transmit disable support though PMAPMD */ 75/* Generic transmit disable support though PMAPMD */
77extern void efx_mdio_transmit_disable(struct efx_nic *efx); 76void efx_mdio_transmit_disable(struct efx_nic *efx);
78 77
79/* Generic part of reconfigure: set/clear loopback bits */ 78/* Generic part of reconfigure: set/clear loopback bits */
80extern void efx_mdio_phy_reconfigure(struct efx_nic *efx); 79void efx_mdio_phy_reconfigure(struct efx_nic *efx);
81 80
82/* Set the power state of the specified MMDs */ 81/* Set the power state of the specified MMDs */
83extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx, 82void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
84 int low_power, unsigned int mmd_mask); 83 unsigned int mmd_mask);
85 84
86/* Set (some of) the PHY settings over MDIO */ 85/* Set (some of) the PHY settings over MDIO */
87extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); 86int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
88 87
89/* Push advertising flags and restart autonegotiation */ 88/* Push advertising flags and restart autonegotiation */
90extern void efx_mdio_an_reconfigure(struct efx_nic *efx); 89void efx_mdio_an_reconfigure(struct efx_nic *efx);
91 90
92/* Get pause parameters from AN if available (otherwise return 91/* Get pause parameters from AN if available (otherwise return
93 * requested pause parameters) 92 * requested pause parameters)
@@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
95u8 efx_mdio_get_pause(struct efx_nic *efx); 94u8 efx_mdio_get_pause(struct efx_nic *efx);
96 95
97/* Wait for specified MMDs to exit reset within a timeout */ 96/* Wait for specified MMDs to exit reset within a timeout */
98extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx, 97int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
99 unsigned int mmd_mask);
100 98
101/* Set or clear flag, debouncing */ 99/* Set or clear flag, debouncing */
102static inline void 100static inline void
@@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
107} 105}
108 106
109/* Liveness self-test for MDIO PHYs */ 107/* Liveness self-test for MDIO PHYs */
110extern int efx_mdio_test_alive(struct efx_nic *efx); 108int efx_mdio_test_alive(struct efx_nic *efx);
111 109
112#endif /* EFX_MDIO_10G_H */ 110#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b172ed133055..aac22a1e85b8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -182,6 +182,9 @@ struct efx_tx_buffer {
182 * @tsoh_page: Array of pages of TSO header buffers 182 * @tsoh_page: Array of pages of TSO header buffers
183 * @txd: The hardware descriptor ring 183 * @txd: The hardware descriptor ring
184 * @ptr_mask: The size of the ring minus 1. 184 * @ptr_mask: The size of the ring minus 1.
185 * @piobuf: PIO buffer region for this TX queue (shared with its partner).
186 * Size of the region is efx_piobuf_size.
187 * @piobuf_offset: Buffer offset to be specified in PIO descriptors
185 * @initialised: Has hardware queue been initialised? 188 * @initialised: Has hardware queue been initialised?
186 * @read_count: Current read pointer. 189 * @read_count: Current read pointer.
187 * This is the number of buffers that have been removed from both rings. 190 * This is the number of buffers that have been removed from both rings.
@@ -209,6 +212,7 @@ struct efx_tx_buffer {
209 * blocks 212 * blocks
210 * @tso_packets: Number of packets via the TSO xmit path 213 * @tso_packets: Number of packets via the TSO xmit path
211 * @pushes: Number of times the TX push feature has been used 214 * @pushes: Number of times the TX push feature has been used
215 * @pio_packets: Number of times the TX PIO feature has been used
212 * @empty_read_count: If the completion path has seen the queue as empty 216 * @empty_read_count: If the completion path has seen the queue as empty
213 * and the transmission path has not yet checked this, the value of 217 * and the transmission path has not yet checked this, the value of
214 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. 218 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -223,6 +227,8 @@ struct efx_tx_queue {
223 struct efx_buffer *tsoh_page; 227 struct efx_buffer *tsoh_page;
224 struct efx_special_buffer txd; 228 struct efx_special_buffer txd;
225 unsigned int ptr_mask; 229 unsigned int ptr_mask;
230 void __iomem *piobuf;
231 unsigned int piobuf_offset;
226 bool initialised; 232 bool initialised;
227 233
228 /* Members used mainly on the completion path */ 234 /* Members used mainly on the completion path */
@@ -238,6 +244,7 @@ struct efx_tx_queue {
238 unsigned int tso_long_headers; 244 unsigned int tso_long_headers;
239 unsigned int tso_packets; 245 unsigned int tso_packets;
240 unsigned int pushes; 246 unsigned int pushes;
247 unsigned int pio_packets;
241 248
242 /* Members shared between paths and sometimes updated */ 249 /* Members shared between paths and sometimes updated */
243 unsigned int empty_read_count ____cacheline_aligned_in_smp; 250 unsigned int empty_read_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9826594c8a48..9c90bf56090f 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -19,6 +19,7 @@
19#include "bitfield.h" 19#include "bitfield.h"
20#include "efx.h" 20#include "efx.h"
21#include "nic.h" 21#include "nic.h"
22#include "ef10_regs.h"
22#include "farch_regs.h" 23#include "farch_regs.h"
23#include "io.h" 24#include "io.h"
24#include "workarounds.h" 25#include "workarounds.h"
@@ -166,26 +167,30 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
166 167
167/* Register dump */ 168/* Register dump */
168 169
169#define REGISTER_REVISION_A 1 170#define REGISTER_REVISION_FA 1
170#define REGISTER_REVISION_B 2 171#define REGISTER_REVISION_FB 2
171#define REGISTER_REVISION_C 3 172#define REGISTER_REVISION_FC 3
172#define REGISTER_REVISION_Z 3 /* latest revision */ 173#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
174#define REGISTER_REVISION_ED 4
175#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
173 176
174struct efx_nic_reg { 177struct efx_nic_reg {
175 u32 offset:24; 178 u32 offset:24;
176 u32 min_revision:2, max_revision:2; 179 u32 min_revision:3, max_revision:3;
177}; 180};
178 181
179#define REGISTER(name, min_rev, max_rev) { \ 182#define REGISTER(name, arch, min_rev, max_rev) { \
180 FR_ ## min_rev ## max_rev ## _ ## name, \ 183 arch ## R_ ## min_rev ## max_rev ## _ ## name, \
181 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 184 REGISTER_REVISION_ ## arch ## min_rev, \
185 REGISTER_REVISION_ ## arch ## max_rev \
182} 186}
183#define REGISTER_AA(name) REGISTER(name, A, A) 187#define REGISTER_AA(name) REGISTER(name, F, A, A)
184#define REGISTER_AB(name) REGISTER(name, A, B) 188#define REGISTER_AB(name) REGISTER(name, F, A, B)
185#define REGISTER_AZ(name) REGISTER(name, A, Z) 189#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
186#define REGISTER_BB(name) REGISTER(name, B, B) 190#define REGISTER_BB(name) REGISTER(name, F, B, B)
187#define REGISTER_BZ(name) REGISTER(name, B, Z) 191#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
188#define REGISTER_CZ(name) REGISTER(name, C, Z) 192#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
193#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
189 194
190static const struct efx_nic_reg efx_nic_regs[] = { 195static const struct efx_nic_reg efx_nic_regs[] = {
191 REGISTER_AZ(ADR_REGION), 196 REGISTER_AZ(ADR_REGION),
@@ -292,37 +297,42 @@ static const struct efx_nic_reg efx_nic_regs[] = {
292 REGISTER_AB(XX_TXDRV_CTL), 297 REGISTER_AB(XX_TXDRV_CTL),
293 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 298 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
294 /* XX_CORE_STAT is partly RC */ 299 /* XX_CORE_STAT is partly RC */
300 REGISTER_DZ(BIU_HW_REV_ID),
301 REGISTER_DZ(MC_DB_LWRD),
302 REGISTER_DZ(MC_DB_HWRD),
295}; 303};
296 304
297struct efx_nic_reg_table { 305struct efx_nic_reg_table {
298 u32 offset:24; 306 u32 offset:24;
299 u32 min_revision:2, max_revision:2; 307 u32 min_revision:3, max_revision:3;
300 u32 step:6, rows:21; 308 u32 step:6, rows:21;
301}; 309};
302 310
303#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 311#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
304 offset, \ 312 offset, \
305 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 313 REGISTER_REVISION_ ## arch ## min_rev, \
314 REGISTER_REVISION_ ## arch ## max_rev, \
306 step, rows \ 315 step, rows \
307} 316}
308#define REGISTER_TABLE(name, min_rev, max_rev) \ 317#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
309 REGISTER_TABLE_DIMENSIONS( \ 318 REGISTER_TABLE_DIMENSIONS( \
310 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 319 name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
311 min_rev, max_rev, \ 320 arch, min_rev, max_rev, \
312 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 321 arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
313 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 322 arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
314#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 323#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
315#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 324#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
316#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 325#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
317#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 326#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
318#define REGISTER_TABLE_BB_CZ(name) \ 327#define REGISTER_TABLE_BB_CZ(name) \
319 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 328 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
320 FR_BZ_ ## name ## _STEP, \ 329 FR_BZ_ ## name ## _STEP, \
321 FR_BB_ ## name ## _ROWS), \ 330 FR_BB_ ## name ## _ROWS), \
322 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 331 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
323 FR_BZ_ ## name ## _STEP, \ 332 FR_BZ_ ## name ## _STEP, \
324 FR_CZ_ ## name ## _ROWS) 333 FR_CZ_ ## name ## _ROWS)
325#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 334#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
335#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
326 336
327static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 337static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
328 /* DRIVER is not used */ 338 /* DRIVER is not used */
@@ -340,9 +350,9 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
340 * 1K entries allows for some expansion of queue count and 350 * 1K entries allows for some expansion of queue count and
341 * size before we need to change the version. */ 351 * size before we need to change the version. */
342 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 352 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
343 A, A, 8, 1024), 353 F, A, A, 8, 1024),
344 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 354 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
345 B, Z, 8, 1024), 355 F, B, Z, 8, 1024),
346 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 356 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
347 REGISTER_TABLE_BB_CZ(TIMER_TBL), 357 REGISTER_TABLE_BB_CZ(TIMER_TBL),
348 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 358 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -353,6 +363,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
353 /* MSIX_PBA_TABLE is not mapped */ 363 /* MSIX_PBA_TABLE is not mapped */
354 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 364 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
355 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 365 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
366 REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
356}; 367};
357 368
358size_t efx_nic_get_regs_len(struct efx_nic *efx) 369size_t efx_nic_get_regs_len(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 890bbbe8320e..11b6112d9249 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
30 return efx->type->revision; 30 return efx->type->revision;
31} 31}
32 32
33extern u32 efx_farch_fpga_ver(struct efx_nic *efx); 33u32 efx_farch_fpga_ver(struct efx_nic *efx);
34 34
35/* NIC has two interlinked PCI functions for the same port. */ 35/* NIC has two interlinked PCI functions for the same port. */
36static inline bool efx_nic_is_dual_func(struct efx_nic *efx) 36static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -71,6 +71,26 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; 71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
72} 72}
73 73
74/* Report whether the NIC considers this TX queue empty, given the
75 * write_count used for the last doorbell push. May return false
76 * negative.
77 */
78static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
79 unsigned int write_count)
80{
81 unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
82
83 if (empty_read_count == 0)
84 return false;
85
86 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
87}
88
89static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
90{
91 return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count);
92}
93
74/* Decide whether to push a TX descriptor to the NIC vs merely writing 94/* Decide whether to push a TX descriptor to the NIC vs merely writing
75 * the doorbell. This can reduce latency when we are adding a single 95 * the doorbell. This can reduce latency when we are adding a single
76 * descriptor to an empty queue, but is otherwise pointless. Further, 96 * descriptor to an empty queue, but is otherwise pointless. Further,
@@ -80,14 +100,10 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
80static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, 100static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
81 unsigned int write_count) 101 unsigned int write_count)
82{ 102{
83 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 103 bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
84
85 if (empty_read_count == 0)
86 return false;
87 104
88 tx_queue->empty_read_count = 0; 105 tx_queue->empty_read_count = 0;
89 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 106 return was_empty && tx_queue->write_count - write_count == 1;
90 && tx_queue->write_count - write_count == 1;
91} 107}
92 108
93/* Returns a pointer to the specified descriptor in the RX descriptor queue */ 109/* Returns a pointer to the specified descriptor in the RX descriptor queue */
@@ -401,6 +417,12 @@ enum {
401 EF10_STAT_COUNT 417 EF10_STAT_COUNT
402}; 418};
403 419
420/* Maximum number of TX PIO buffers we may allocate to a function.
421 * This matches the total number of buffers on each SFC9100-family
422 * controller.
423 */
424#define EF10_TX_PIOBUF_COUNT 16
425
404/** 426/**
405 * struct efx_ef10_nic_data - EF10 architecture NIC state 427 * struct efx_ef10_nic_data - EF10 architecture NIC state
406 * @mcdi_buf: DMA buffer for MCDI 428 * @mcdi_buf: DMA buffer for MCDI
@@ -409,6 +431,13 @@ enum {
409 * @n_allocated_vis: Number of VIs allocated to this function 431 * @n_allocated_vis: Number of VIs allocated to this function
410 * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot 432 * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
411 * @must_restore_filters: Flag: filters have yet to be restored after MC reboot 433 * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
434 * @n_piobufs: Number of PIO buffers allocated to this function
435 * @wc_membase: Base address of write-combining mapping of the memory BAR
436 * @pio_write_base: Base address for writing PIO buffers
437 * @pio_write_vi_base: Relative VI number for @pio_write_base
438 * @piobuf_handle: Handle of each PIO buffer allocated
439 * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
440 * reboot
412 * @rx_rss_context: Firmware handle for our RSS context 441 * @rx_rss_context: Firmware handle for our RSS context
413 * @stats: Hardware statistics 442 * @stats: Hardware statistics
414 * @workaround_35388: Flag: firmware supports workaround for bug 35388 443 * @workaround_35388: Flag: firmware supports workaround for bug 35388
@@ -424,6 +453,11 @@ struct efx_ef10_nic_data {
424 unsigned int n_allocated_vis; 453 unsigned int n_allocated_vis;
425 bool must_realloc_vis; 454 bool must_realloc_vis;
426 bool must_restore_filters; 455 bool must_restore_filters;
456 unsigned int n_piobufs;
457 void __iomem *wc_membase, *pio_write_base;
458 unsigned int pio_write_vi_base;
459 unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
460 bool must_restore_piobufs;
427 u32 rx_rss_context; 461 u32 rx_rss_context;
428 u64 stats[EF10_STAT_COUNT]; 462 u64 stats[EF10_STAT_COUNT];
429 bool workaround_35388; 463 bool workaround_35388;
@@ -475,18 +509,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
475 return 1 << efx->vi_scale; 509 return 1 << efx->vi_scale;
476} 510}
477 511
478extern int efx_init_sriov(void); 512int efx_init_sriov(void);
479extern void efx_sriov_probe(struct efx_nic *efx); 513void efx_sriov_probe(struct efx_nic *efx);
480extern int efx_sriov_init(struct efx_nic *efx); 514int efx_sriov_init(struct efx_nic *efx);
481extern void efx_sriov_mac_address_changed(struct efx_nic *efx); 515void efx_sriov_mac_address_changed(struct efx_nic *efx);
482extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); 516void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
483extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); 517void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
484extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event); 518void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
485extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); 519void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
486extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr); 520void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
487extern void efx_sriov_reset(struct efx_nic *efx); 521void efx_sriov_reset(struct efx_nic *efx);
488extern void efx_sriov_fini(struct efx_nic *efx); 522void efx_sriov_fini(struct efx_nic *efx);
489extern void efx_fini_sriov(void); 523void efx_fini_sriov(void);
490 524
491#else 525#else
492 526
@@ -512,22 +546,20 @@ static inline void efx_fini_sriov(void) {}
512 546
513#endif 547#endif
514 548
515extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); 549int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
516extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, 550int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
517 u16 vlan, u8 qos); 551int efx_sriov_get_vf_config(struct net_device *dev, int vf,
518extern int efx_sriov_get_vf_config(struct net_device *dev, int vf, 552 struct ifla_vf_info *ivf);
519 struct ifla_vf_info *ivf); 553int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
520extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, 554 bool spoofchk);
521 bool spoofchk);
522 555
523struct ethtool_ts_info; 556struct ethtool_ts_info;
524extern void efx_ptp_probe(struct efx_nic *efx); 557void efx_ptp_probe(struct efx_nic *efx);
525extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); 558int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
526extern void efx_ptp_get_ts_info(struct efx_nic *efx, 559void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
527 struct ethtool_ts_info *ts_info); 560bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
528extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 561int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
529extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 562void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
530extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
531 563
532extern const struct efx_nic_type falcon_a1_nic_type; 564extern const struct efx_nic_type falcon_a1_nic_type;
533extern const struct efx_nic_type falcon_b0_nic_type; 565extern const struct efx_nic_type falcon_b0_nic_type;
@@ -541,7 +573,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type;
541 ************************************************************************** 573 **************************************************************************
542 */ 574 */
543 575
544extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); 576int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
545 577
546/* TX data path */ 578/* TX data path */
547static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 579static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -609,58 +641,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
609{ 641{
610 channel->efx->type->ev_read_ack(channel); 642 channel->efx->type->ev_read_ack(channel);
611} 643}
612extern void efx_nic_event_test_start(struct efx_channel *channel); 644void efx_nic_event_test_start(struct efx_channel *channel);
613 645
614/* Falcon/Siena queue operations */ 646/* Falcon/Siena queue operations */
615extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); 647int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
616extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue); 648void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
617extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); 649void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
618extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); 650void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
619extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue); 651void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
620extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); 652int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
621extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue); 653void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
622extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); 654void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
623extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); 655void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
624extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue); 656void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
625extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); 657void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
626extern int efx_farch_ev_probe(struct efx_channel *channel); 658int efx_farch_ev_probe(struct efx_channel *channel);
627extern int efx_farch_ev_init(struct efx_channel *channel); 659int efx_farch_ev_init(struct efx_channel *channel);
628extern void efx_farch_ev_fini(struct efx_channel *channel); 660void efx_farch_ev_fini(struct efx_channel *channel);
629extern void efx_farch_ev_remove(struct efx_channel *channel); 661void efx_farch_ev_remove(struct efx_channel *channel);
630extern int efx_farch_ev_process(struct efx_channel *channel, int quota); 662int efx_farch_ev_process(struct efx_channel *channel, int quota);
631extern void efx_farch_ev_read_ack(struct efx_channel *channel); 663void efx_farch_ev_read_ack(struct efx_channel *channel);
632extern void efx_farch_ev_test_generate(struct efx_channel *channel); 664void efx_farch_ev_test_generate(struct efx_channel *channel);
633 665
634/* Falcon/Siena filter operations */ 666/* Falcon/Siena filter operations */
635extern int efx_farch_filter_table_probe(struct efx_nic *efx); 667int efx_farch_filter_table_probe(struct efx_nic *efx);
636extern void efx_farch_filter_table_restore(struct efx_nic *efx); 668void efx_farch_filter_table_restore(struct efx_nic *efx);
637extern void efx_farch_filter_table_remove(struct efx_nic *efx); 669void efx_farch_filter_table_remove(struct efx_nic *efx);
638extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); 670void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
639extern s32 efx_farch_filter_insert(struct efx_nic *efx, 671s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
640 struct efx_filter_spec *spec, bool replace); 672 bool replace);
641extern int efx_farch_filter_remove_safe(struct efx_nic *efx, 673int efx_farch_filter_remove_safe(struct efx_nic *efx,
642 enum efx_filter_priority priority, 674 enum efx_filter_priority priority,
643 u32 filter_id); 675 u32 filter_id);
644extern int efx_farch_filter_get_safe(struct efx_nic *efx, 676int efx_farch_filter_get_safe(struct efx_nic *efx,
645 enum efx_filter_priority priority, 677 enum efx_filter_priority priority, u32 filter_id,
646 u32 filter_id, struct efx_filter_spec *); 678 struct efx_filter_spec *);
647extern void efx_farch_filter_clear_rx(struct efx_nic *efx, 679void efx_farch_filter_clear_rx(struct efx_nic *efx,
648 enum efx_filter_priority priority); 680 enum efx_filter_priority priority);
649extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, 681u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
650 enum efx_filter_priority priority); 682 enum efx_filter_priority priority);
651extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); 683u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
652extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, 684s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
653 enum efx_filter_priority priority, 685 enum efx_filter_priority priority, u32 *buf,
654 u32 *buf, u32 size); 686 u32 size);
655#ifdef CONFIG_RFS_ACCEL 687#ifdef CONFIG_RFS_ACCEL
656extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, 688s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
657 struct efx_filter_spec *spec); 689 struct efx_filter_spec *spec);
658extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 690bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
659 unsigned int index); 691 unsigned int index);
660#endif 692#endif
661extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); 693void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
662 694
663extern bool efx_nic_event_present(struct efx_channel *channel); 695bool efx_nic_event_present(struct efx_channel *channel);
664 696
665/* Some statistics are computed as A - B where A and B each increase 697/* Some statistics are computed as A - B where A and B each increase
666 * linearly with some hardware counter(s) and the counters are read 698 * linearly with some hardware counter(s) and the counters are read
@@ -681,17 +713,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
681} 713}
682 714
683/* Interrupts */ 715/* Interrupts */
684extern int efx_nic_init_interrupt(struct efx_nic *efx); 716int efx_nic_init_interrupt(struct efx_nic *efx);
685extern void efx_nic_irq_test_start(struct efx_nic *efx); 717void efx_nic_irq_test_start(struct efx_nic *efx);
686extern void efx_nic_fini_interrupt(struct efx_nic *efx); 718void efx_nic_fini_interrupt(struct efx_nic *efx);
687 719
688/* Falcon/Siena interrupts */ 720/* Falcon/Siena interrupts */
689extern void efx_farch_irq_enable_master(struct efx_nic *efx); 721void efx_farch_irq_enable_master(struct efx_nic *efx);
690extern void efx_farch_irq_test_generate(struct efx_nic *efx); 722void efx_farch_irq_test_generate(struct efx_nic *efx);
691extern void efx_farch_irq_disable_master(struct efx_nic *efx); 723void efx_farch_irq_disable_master(struct efx_nic *efx);
692extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); 724irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
693extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); 725irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
694extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); 726irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
695 727
696static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) 728static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
697{ 729{
@@ -703,21 +735,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
703} 735}
704 736
705/* Global Resources */ 737/* Global Resources */
706extern int efx_nic_flush_queues(struct efx_nic *efx); 738int efx_nic_flush_queues(struct efx_nic *efx);
707extern void siena_prepare_flush(struct efx_nic *efx); 739void siena_prepare_flush(struct efx_nic *efx);
708extern int efx_farch_fini_dmaq(struct efx_nic *efx); 740int efx_farch_fini_dmaq(struct efx_nic *efx);
709extern void siena_finish_flush(struct efx_nic *efx); 741void siena_finish_flush(struct efx_nic *efx);
710extern void falcon_start_nic_stats(struct efx_nic *efx); 742void falcon_start_nic_stats(struct efx_nic *efx);
711extern void falcon_stop_nic_stats(struct efx_nic *efx); 743void falcon_stop_nic_stats(struct efx_nic *efx);
712extern int falcon_reset_xaui(struct efx_nic *efx); 744int falcon_reset_xaui(struct efx_nic *efx);
713extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); 745void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
714extern void efx_farch_init_common(struct efx_nic *efx); 746void efx_farch_init_common(struct efx_nic *efx);
715extern void efx_ef10_handle_drain_event(struct efx_nic *efx); 747void efx_ef10_handle_drain_event(struct efx_nic *efx);
716static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) 748static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
717{ 749{
718 efx->type->rx_push_indir_table(efx); 750 efx->type->rx_push_indir_table(efx);
719} 751}
720extern void efx_farch_rx_push_indir_table(struct efx_nic *efx); 752void efx_farch_rx_push_indir_table(struct efx_nic *efx);
721 753
722int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 754int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
723 unsigned int len, gfp_t gfp_flags); 755 unsigned int len, gfp_t gfp_flags);
@@ -728,24 +760,22 @@ struct efx_farch_register_test {
728 unsigned address; 760 unsigned address;
729 efx_oword_t mask; 761 efx_oword_t mask;
730}; 762};
731extern int efx_farch_test_registers(struct efx_nic *efx, 763int efx_farch_test_registers(struct efx_nic *efx,
732 const struct efx_farch_register_test *regs, 764 const struct efx_farch_register_test *regs,
733 size_t n_regs); 765 size_t n_regs);
734 766
735extern size_t efx_nic_get_regs_len(struct efx_nic *efx); 767size_t efx_nic_get_regs_len(struct efx_nic *efx);
736extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); 768void efx_nic_get_regs(struct efx_nic *efx, void *buf);
737 769
738extern size_t 770size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
739efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, 771 const unsigned long *mask, u8 *names);
740 const unsigned long *mask, u8 *names); 772void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
741extern void 773 const unsigned long *mask, u64 *stats,
742efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, 774 const void *dma_buf, bool accumulate);
743 const unsigned long *mask,
744 u64 *stats, const void *dma_buf, bool accumulate);
745 775
746#define EFX_MAX_FLUSH_TIME 5000 776#define EFX_MAX_FLUSH_TIME 5000
747 777
748extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, 778void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
749 efx_qword_t *event); 779 efx_qword_t *event);
750 780
751#endif /* EFX_NIC_H */ 781#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 45eeb7075156..803bf445c08e 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -15,7 +15,7 @@
15 */ 15 */
16extern const struct efx_phy_operations falcon_sfx7101_phy_ops; 16extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
17 17
18extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 18void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
19 19
20/**************************************************************************** 20/****************************************************************************
21 * AMCC/Quake QT202x PHYs 21 * AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
34#define QUAKE_LED_TXLINK (0) 34#define QUAKE_LED_TXLINK (0)
35#define QUAKE_LED_RXLINK (8) 35#define QUAKE_LED_RXLINK (8)
36 36
37extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); 37void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
38 38
39/**************************************************************************** 39/****************************************************************************
40* Transwitch CX4 retimer 40* Transwitch CX4 retimer
@@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
44#define TXC_GPIO_DIR_INPUT 0 44#define TXC_GPIO_DIR_INPUT 0
45#define TXC_GPIO_DIR_OUTPUT 1 45#define TXC_GPIO_DIR_OUTPUT 1
46 46
47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); 47void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); 48void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
49 49
50#endif 50#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 4a596725023f..8f09e686fc23 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -12,6 +12,7 @@
12#include <linux/in.h> 12#include <linux/in.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/ip.h> 14#include <linux/ip.h>
15#include <linux/ipv6.h>
15#include <linux/tcp.h> 16#include <linux/tcp.h>
16#include <linux/udp.h> 17#include <linux/udp.h>
17#include <linux/prefetch.h> 18#include <linux/prefetch.h>
@@ -818,44 +819,70 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
818 struct efx_nic *efx = netdev_priv(net_dev); 819 struct efx_nic *efx = netdev_priv(net_dev);
819 struct efx_channel *channel; 820 struct efx_channel *channel;
820 struct efx_filter_spec spec; 821 struct efx_filter_spec spec;
821 const struct iphdr *ip;
822 const __be16 *ports; 822 const __be16 *ports;
823 __be16 ether_type;
823 int nhoff; 824 int nhoff;
824 int rc; 825 int rc;
825 826
826 nhoff = skb_network_offset(skb); 827 /* The core RPS/RFS code has already parsed and validated
828 * VLAN, IP and transport headers. We assume they are in the
829 * header area.
830 */
827 831
828 if (skb->protocol == htons(ETH_P_8021Q)) { 832 if (skb->protocol == htons(ETH_P_8021Q)) {
829 EFX_BUG_ON_PARANOID(skb_headlen(skb) < 833 const struct vlan_hdr *vh =
830 nhoff + sizeof(struct vlan_hdr)); 834 (const struct vlan_hdr *)skb->data;
831 if (((const struct vlan_hdr *)skb->data + nhoff)->
832 h_vlan_encapsulated_proto != htons(ETH_P_IP))
833 return -EPROTONOSUPPORT;
834 835
835 /* This is IP over 802.1q VLAN. We can't filter on the 836 /* We can't filter on the IP 5-tuple and the vlan
836 * IP 5-tuple and the vlan together, so just strip the 837 * together, so just strip the vlan header and filter
837 * vlan header and filter on the IP part. 838 * on the IP part.
838 */ 839 */
839 nhoff += sizeof(struct vlan_hdr); 840 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
840 } else if (skb->protocol != htons(ETH_P_IP)) { 841 ether_type = vh->h_vlan_encapsulated_proto;
841 return -EPROTONOSUPPORT; 842 nhoff = sizeof(struct vlan_hdr);
843 } else {
844 ether_type = skb->protocol;
845 nhoff = 0;
842 } 846 }
843 847
844 /* RFS must validate the IP header length before calling us */ 848 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
845 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
846 ip = (const struct iphdr *)(skb->data + nhoff);
847 if (ip_is_fragment(ip))
848 return -EPROTONOSUPPORT; 849 return -EPROTONOSUPPORT;
849 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
850 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
851 850
852 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 851 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
853 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 852 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
854 rxq_index); 853 rxq_index);
855 rc = efx_filter_set_ipv4_full(&spec, ip->protocol, 854 spec.match_flags =
856 ip->daddr, ports[1], ip->saddr, ports[0]); 855 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
857 if (rc) 856 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
858 return rc; 857 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
858 spec.ether_type = ether_type;
859
860 if (ether_type == htons(ETH_P_IP)) {
861 const struct iphdr *ip =
862 (const struct iphdr *)(skb->data + nhoff);
863
864 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
865 if (ip_is_fragment(ip))
866 return -EPROTONOSUPPORT;
867 spec.ip_proto = ip->protocol;
868 spec.rem_host[0] = ip->saddr;
869 spec.loc_host[0] = ip->daddr;
870 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
871 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
872 } else {
873 const struct ipv6hdr *ip6 =
874 (const struct ipv6hdr *)(skb->data + nhoff);
875
876 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
877 nhoff + sizeof(*ip6) + 4);
878 spec.ip_proto = ip6->nexthdr;
879 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
880 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
881 ports = (const __be16 *)(ip6 + 1);
882 }
883
884 spec.rem_port = ports[0];
885 spec.loc_port = ports[1];
859 886
860 rc = efx->type->filter_rfs_insert(efx, &spec); 887 rc = efx->type->filter_rfs_insert(efx, &spec);
861 if (rc < 0) 888 if (rc < 0)
@@ -866,11 +893,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
866 channel = efx_get_channel(efx, skb_get_rx_queue(skb)); 893 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
867 ++channel->rfs_filters_added; 894 ++channel->rfs_filters_added;
868 895
869 netif_info(efx, rx_status, efx->net_dev, 896 if (ether_type == htons(ETH_P_IP))
870 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 897 netif_info(efx, rx_status, efx->net_dev,
871 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", 898 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
872 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]), 899 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
873 rxq_index, flow_id, rc); 900 spec.rem_host, ntohs(ports[0]), spec.loc_host,
901 ntohs(ports[1]), rxq_index, flow_id, rc);
902 else
903 netif_info(efx, rx_status, efx->net_dev,
904 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
905 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
906 spec.rem_host, ntohs(ports[0]), spec.loc_host,
907 ntohs(ports[1]), rxq_index, flow_id, rc);
874 908
875 return rc; 909 return rc;
876} 910}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 87698ae0bf75..a2f4a06ffa4e 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -43,13 +43,12 @@ struct efx_self_tests {
43 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; 43 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
44}; 44};
45 45
46extern void efx_loopback_rx_packet(struct efx_nic *efx, 46void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
47 const char *buf_ptr, int pkt_len); 47 int pkt_len);
48extern int efx_selftest(struct efx_nic *efx, 48int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
49 struct efx_self_tests *tests, 49 unsigned flags);
50 unsigned flags); 50void efx_selftest_async_start(struct efx_nic *efx);
51extern void efx_selftest_async_start(struct efx_nic *efx); 51void efx_selftest_async_cancel(struct efx_nic *efx);
52extern void efx_selftest_async_cancel(struct efx_nic *efx); 52void efx_selftest_async_work(struct work_struct *data);
53extern void efx_selftest_async_work(struct work_struct *data);
54 53
55#endif /* EFX_SELFTEST_H */ 54#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 2ac91c5b5eea..282692c48e6b 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -17,10 +17,46 @@
17#include <net/ipv6.h> 17#include <net/ipv6.h>
18#include <linux/if_ether.h> 18#include <linux/if_ether.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/cache.h>
20#include "net_driver.h" 21#include "net_driver.h"
21#include "efx.h" 22#include "efx.h"
23#include "io.h"
22#include "nic.h" 24#include "nic.h"
23#include "workarounds.h" 25#include "workarounds.h"
26#include "ef10_regs.h"
27
28#ifdef EFX_USE_PIO
29
30#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
34#endif /* EFX_USE_PIO */
35
36static inline unsigned int
37efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
38{
39 return tx_queue->insert_count & tx_queue->ptr_mask;
40}
41
42static inline struct efx_tx_buffer *
43__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
44{
45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
46}
47
48static inline struct efx_tx_buffer *
49efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
50{
51 struct efx_tx_buffer *buffer =
52 __efx_tx_queue_get_insert_buffer(tx_queue);
53
54 EFX_BUG_ON_PARANOID(buffer->len);
55 EFX_BUG_ON_PARANOID(buffer->flags);
56 EFX_BUG_ON_PARANOID(buffer->unmap_len);
57
58 return buffer;
59}
24 60
25static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 61static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
26 struct efx_tx_buffer *buffer, 62 struct efx_tx_buffer *buffer,
@@ -83,8 +119,10 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
83 */ 119 */
84 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 120 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
85 121
86 /* Possibly one more per segment for the alignment workaround */ 122 /* Possibly one more per segment for the alignment workaround,
87 if (EFX_WORKAROUND_5391(efx)) 123 * or for option descriptors
124 */
125 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
88 max_descs += EFX_TSO_MAX_SEGS; 126 max_descs += EFX_TSO_MAX_SEGS;
89 127
90 /* Possibly more for PCIe page boundaries within input fragments */ 128 /* Possibly more for PCIe page boundaries within input fragments */
@@ -145,6 +183,145 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
145 } 183 }
146} 184}
147 185
186#ifdef EFX_USE_PIO
187
188struct efx_short_copy_buffer {
189 int used;
190 u8 buf[L1_CACHE_BYTES];
191};
192
193/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
194 * Advances piobuf pointer. Leaves additional data in the copy buffer.
195 */
196static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
197 u8 *data, int len,
198 struct efx_short_copy_buffer *copy_buf)
199{
200 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
201
202 memcpy_toio(*piobuf, data, block_len);
203 *piobuf += block_len;
204 len -= block_len;
205
206 if (len) {
207 data += block_len;
208 BUG_ON(copy_buf->used);
209 BUG_ON(len > sizeof(copy_buf->buf));
210 memcpy(copy_buf->buf, data, len);
211 copy_buf->used = len;
212 }
213}
214
215/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
216 * Advances piobuf pointer. Leaves additional data in the copy buffer.
217 */
218static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
219 u8 *data, int len,
220 struct efx_short_copy_buffer *copy_buf)
221{
222 if (copy_buf->used) {
223 /* if the copy buffer is partially full, fill it up and write */
224 int copy_to_buf =
225 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
226
227 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
228 copy_buf->used += copy_to_buf;
229
230 /* if we didn't fill it up then we're done for now */
231 if (copy_buf->used < sizeof(copy_buf->buf))
232 return;
233
234 memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
235 *piobuf += sizeof(copy_buf->buf);
236 data += copy_to_buf;
237 len -= copy_to_buf;
238 copy_buf->used = 0;
239 }
240
241 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
242}
243
244static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
245 struct efx_short_copy_buffer *copy_buf)
246{
247 /* if there's anything in it, write the whole buffer, including junk */
248 if (copy_buf->used)
249 memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
250}
251
252/* Traverse skb structure and copy fragments in to PIO buffer.
253 * Advances piobuf pointer.
254 */
255static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
256 u8 __iomem **piobuf,
257 struct efx_short_copy_buffer *copy_buf)
258{
259 int i;
260
261 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
262 copy_buf);
263
264 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
265 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
266 u8 *vaddr;
267
268 vaddr = kmap_atomic(skb_frag_page(f));
269
270 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
271 skb_frag_size(f), copy_buf);
272 kunmap_atomic(vaddr);
273 }
274
275 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
276}
277
278static struct efx_tx_buffer *
279efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
280{
281 struct efx_tx_buffer *buffer =
282 efx_tx_queue_get_insert_buffer(tx_queue);
283 u8 __iomem *piobuf = tx_queue->piobuf;
284
285 /* Copy to PIO buffer. Ensure the writes are padded to the end
286 * of a cache line, as this is required for write-combining to be
287 * effective on at least x86.
288 */
289
290 if (skb_shinfo(skb)->nr_frags) {
291 /* The size of the copy buffer will ensure all writes
292 * are the size of a cache line.
293 */
294 struct efx_short_copy_buffer copy_buf;
295
296 copy_buf.used = 0;
297
298 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
299 &piobuf, &copy_buf);
300 efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
301 } else {
302 /* Pad the write to the size of a cache line.
303 * We can do this because we know the skb_shared_info sruct is
304 * after the source, and the destination buffer is big enough.
305 */
306 BUILD_BUG_ON(L1_CACHE_BYTES >
307 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
308 memcpy_toio(tx_queue->piobuf, skb->data,
309 ALIGN(skb->len, L1_CACHE_BYTES));
310 }
311
312 EFX_POPULATE_QWORD_5(buffer->option,
313 ESF_DZ_TX_DESC_IS_OPT, 1,
314 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
315 ESF_DZ_TX_PIO_CONT, 0,
316 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
317 ESF_DZ_TX_PIO_BUF_ADDR,
318 tx_queue->piobuf_offset);
319 ++tx_queue->pio_packets;
320 ++tx_queue->insert_count;
321 return buffer;
322}
323#endif /* EFX_USE_PIO */
324
148/* 325/*
149 * Add a socket buffer to a TX queue 326 * Add a socket buffer to a TX queue
150 * 327 *
@@ -167,7 +344,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
167 struct device *dma_dev = &efx->pci_dev->dev; 344 struct device *dma_dev = &efx->pci_dev->dev;
168 struct efx_tx_buffer *buffer; 345 struct efx_tx_buffer *buffer;
169 skb_frag_t *fragment; 346 skb_frag_t *fragment;
170 unsigned int len, unmap_len = 0, insert_ptr; 347 unsigned int len, unmap_len = 0;
171 dma_addr_t dma_addr, unmap_addr = 0; 348 dma_addr_t dma_addr, unmap_addr = 0;
172 unsigned int dma_len; 349 unsigned int dma_len;
173 unsigned short dma_flags; 350 unsigned short dma_flags;
@@ -189,6 +366,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
189 return NETDEV_TX_OK; 366 return NETDEV_TX_OK;
190 } 367 }
191 368
369 /* Consider using PIO for short packets */
370#ifdef EFX_USE_PIO
371 if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
372 efx_nic_tx_is_empty(tx_queue) &&
373 efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
374 buffer = efx_enqueue_skb_pio(tx_queue, skb);
375 dma_flags = EFX_TX_BUF_OPTION;
376 goto finish_packet;
377 }
378#endif
379
192 /* Map for DMA. Use dma_map_single rather than dma_map_page 380 /* Map for DMA. Use dma_map_single rather than dma_map_page
193 * since this is more efficient on machines with sparse 381 * since this is more efficient on machines with sparse
194 * memory. 382 * memory.
@@ -208,11 +396,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
208 396
209 /* Add to TX queue, splitting across DMA boundaries */ 397 /* Add to TX queue, splitting across DMA boundaries */
210 do { 398 do {
211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 399 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
212 buffer = &tx_queue->buffer[insert_ptr];
213 EFX_BUG_ON_PARANOID(buffer->flags);
214 EFX_BUG_ON_PARANOID(buffer->len);
215 EFX_BUG_ON_PARANOID(buffer->unmap_len);
216 400
217 dma_len = efx_max_tx_len(efx, dma_addr); 401 dma_len = efx_max_tx_len(efx, dma_addr);
218 if (likely(dma_len >= len)) 402 if (likely(dma_len >= len))
@@ -245,6 +429,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
245 } 429 }
246 430
247 /* Transfer ownership of the skb to the final buffer */ 431 /* Transfer ownership of the skb to the final buffer */
432finish_packet:
248 buffer->skb = skb; 433 buffer->skb = skb;
249 buffer->flags = EFX_TX_BUF_SKB | dma_flags; 434 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
250 435
@@ -270,8 +455,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
270 while (tx_queue->insert_count != tx_queue->write_count) { 455 while (tx_queue->insert_count != tx_queue->write_count) {
271 unsigned int pkts_compl = 0, bytes_compl = 0; 456 unsigned int pkts_compl = 0, bytes_compl = 0;
272 --tx_queue->insert_count; 457 --tx_queue->insert_count;
273 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 458 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
274 buffer = &tx_queue->buffer[insert_ptr];
275 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 459 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
276 } 460 }
277 461
@@ -628,6 +812,9 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
628 * @tcp_off: Offset of TCP header 812 * @tcp_off: Offset of TCP header
629 * @header_len: Number of bytes of header 813 * @header_len: Number of bytes of header
630 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload 814 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
815 * @header_dma_addr: Header DMA address, when using option descriptors
816 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
817 * descriptors
631 * 818 *
632 * The state used during segmentation. It is put into this data structure 819 * The state used during segmentation. It is put into this data structure
633 * just to make it easy to pass into inline functions. 820 * just to make it easy to pass into inline functions.
@@ -636,7 +823,7 @@ struct tso_state {
636 /* Output position */ 823 /* Output position */
637 unsigned out_len; 824 unsigned out_len;
638 unsigned seqnum; 825 unsigned seqnum;
639 unsigned ipv4_id; 826 u16 ipv4_id;
640 unsigned packet_space; 827 unsigned packet_space;
641 828
642 /* Input position */ 829 /* Input position */
@@ -651,6 +838,8 @@ struct tso_state {
651 unsigned int tcp_off; 838 unsigned int tcp_off;
652 unsigned header_len; 839 unsigned header_len;
653 unsigned int ip_base_len; 840 unsigned int ip_base_len;
841 dma_addr_t header_dma_addr;
842 unsigned int header_unmap_len;
654}; 843};
655 844
656 845
@@ -737,23 +926,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
737{ 926{
738 struct efx_tx_buffer *buffer; 927 struct efx_tx_buffer *buffer;
739 struct efx_nic *efx = tx_queue->efx; 928 struct efx_nic *efx = tx_queue->efx;
740 unsigned dma_len, insert_ptr; 929 unsigned dma_len;
741 930
742 EFX_BUG_ON_PARANOID(len <= 0); 931 EFX_BUG_ON_PARANOID(len <= 0);
743 932
744 while (1) { 933 while (1) {
745 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 934 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
746 buffer = &tx_queue->buffer[insert_ptr];
747 ++tx_queue->insert_count; 935 ++tx_queue->insert_count;
748 936
749 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 937 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
750 tx_queue->read_count >= 938 tx_queue->read_count >=
751 efx->txq_entries); 939 efx->txq_entries);
752 940
753 EFX_BUG_ON_PARANOID(buffer->len);
754 EFX_BUG_ON_PARANOID(buffer->unmap_len);
755 EFX_BUG_ON_PARANOID(buffer->flags);
756
757 buffer->dma_addr = dma_addr; 941 buffer->dma_addr = dma_addr;
758 942
759 dma_len = efx_max_tx_len(efx, dma_addr); 943 dma_len = efx_max_tx_len(efx, dma_addr);
@@ -814,19 +998,27 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
814 /* Work backwards until we hit the original insert pointer value */ 998 /* Work backwards until we hit the original insert pointer value */
815 while (tx_queue->insert_count != tx_queue->write_count) { 999 while (tx_queue->insert_count != tx_queue->write_count) {
816 --tx_queue->insert_count; 1000 --tx_queue->insert_count;
817 buffer = &tx_queue->buffer[tx_queue->insert_count & 1001 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
818 tx_queue->ptr_mask];
819 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); 1002 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
820 } 1003 }
821} 1004}
822 1005
823 1006
824/* Parse the SKB header and initialise state. */ 1007/* Parse the SKB header and initialise state. */
825static void tso_start(struct tso_state *st, const struct sk_buff *skb) 1008static int tso_start(struct tso_state *st, struct efx_nic *efx,
1009 const struct sk_buff *skb)
826{ 1010{
1011 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
1012 struct device *dma_dev = &efx->pci_dev->dev;
1013 unsigned int header_len, in_len;
1014 dma_addr_t dma_addr;
1015
827 st->ip_off = skb_network_header(skb) - skb->data; 1016 st->ip_off = skb_network_header(skb) - skb->data;
828 st->tcp_off = skb_transport_header(skb) - skb->data; 1017 st->tcp_off = skb_transport_header(skb) - skb->data;
829 st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); 1018 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
1019 in_len = skb_headlen(skb) - header_len;
1020 st->header_len = header_len;
1021 st->in_len = in_len;
830 if (st->protocol == htons(ETH_P_IP)) { 1022 if (st->protocol == htons(ETH_P_IP)) {
831 st->ip_base_len = st->header_len - st->ip_off; 1023 st->ip_base_len = st->header_len - st->ip_off;
832 st->ipv4_id = ntohs(ip_hdr(skb)->id); 1024 st->ipv4_id = ntohs(ip_hdr(skb)->id);
@@ -840,9 +1032,34 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
840 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 1032 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
841 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 1033 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
842 1034
843 st->out_len = skb->len - st->header_len; 1035 st->out_len = skb->len - header_len;
844 st->unmap_len = 0; 1036
845 st->dma_flags = 0; 1037 if (!use_options) {
1038 st->header_unmap_len = 0;
1039
1040 if (likely(in_len == 0)) {
1041 st->dma_flags = 0;
1042 st->unmap_len = 0;
1043 return 0;
1044 }
1045
1046 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
1047 in_len, DMA_TO_DEVICE);
1048 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
1049 st->dma_addr = dma_addr;
1050 st->unmap_addr = dma_addr;
1051 st->unmap_len = in_len;
1052 } else {
1053 dma_addr = dma_map_single(dma_dev, skb->data,
1054 skb_headlen(skb), DMA_TO_DEVICE);
1055 st->header_dma_addr = dma_addr;
1056 st->header_unmap_len = skb_headlen(skb);
1057 st->dma_flags = 0;
1058 st->dma_addr = dma_addr + header_len;
1059 st->unmap_len = 0;
1060 }
1061
1062 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
846} 1063}
847 1064
848static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 1065static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -860,24 +1077,6 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
860 return -ENOMEM; 1077 return -ENOMEM;
861} 1078}
862 1079
863static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
864 const struct sk_buff *skb)
865{
866 int hl = st->header_len;
867 int len = skb_headlen(skb) - hl;
868
869 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
870 len, DMA_TO_DEVICE);
871 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
872 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
873 st->unmap_len = len;
874 st->in_len = len;
875 st->dma_addr = st->unmap_addr;
876 return 0;
877 }
878 return -ENOMEM;
879}
880
881 1080
882/** 1081/**
883 * tso_fill_packet_with_fragment - form descriptors for the current fragment 1082 * tso_fill_packet_with_fragment - form descriptors for the current fragment
@@ -944,55 +1143,97 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
944 struct tso_state *st) 1143 struct tso_state *st)
945{ 1144{
946 struct efx_tx_buffer *buffer = 1145 struct efx_tx_buffer *buffer =
947 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; 1146 efx_tx_queue_get_insert_buffer(tx_queue);
948 struct tcphdr *tsoh_th; 1147 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
949 unsigned ip_length; 1148 u8 tcp_flags_clear;
950 u8 *header;
951 int rc;
952 1149
953 /* Allocate and insert a DMA-mapped header buffer. */ 1150 if (!is_last) {
954 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
955 if (!header)
956 return -ENOMEM;
957
958 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
959
960 /* Copy and update the headers. */
961 memcpy(header, skb->data, st->header_len);
962
963 tsoh_th->seq = htonl(st->seqnum);
964 st->seqnum += skb_shinfo(skb)->gso_size;
965 if (st->out_len > skb_shinfo(skb)->gso_size) {
966 /* This packet will not finish the TSO burst. */
967 st->packet_space = skb_shinfo(skb)->gso_size; 1151 st->packet_space = skb_shinfo(skb)->gso_size;
968 tsoh_th->fin = 0; 1152 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
969 tsoh_th->psh = 0;
970 } else { 1153 } else {
971 /* This packet will be the last in the TSO burst. */
972 st->packet_space = st->out_len; 1154 st->packet_space = st->out_len;
973 tsoh_th->fin = tcp_hdr(skb)->fin; 1155 tcp_flags_clear = 0x00;
974 tsoh_th->psh = tcp_hdr(skb)->psh;
975 } 1156 }
976 ip_length = st->ip_base_len + st->packet_space;
977 1157
978 if (st->protocol == htons(ETH_P_IP)) { 1158 if (!st->header_unmap_len) {
979 struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); 1159 /* Allocate and insert a DMA-mapped header buffer. */
1160 struct tcphdr *tsoh_th;
1161 unsigned ip_length;
1162 u8 *header;
1163 int rc;
1164
1165 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1166 if (!header)
1167 return -ENOMEM;
980 1168
981 tsoh_iph->tot_len = htons(ip_length); 1169 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1170
1171 /* Copy and update the headers. */
1172 memcpy(header, skb->data, st->header_len);
1173
1174 tsoh_th->seq = htonl(st->seqnum);
1175 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1176
1177 ip_length = st->ip_base_len + st->packet_space;
1178
1179 if (st->protocol == htons(ETH_P_IP)) {
1180 struct iphdr *tsoh_iph =
1181 (struct iphdr *)(header + st->ip_off);
1182
1183 tsoh_iph->tot_len = htons(ip_length);
1184 tsoh_iph->id = htons(st->ipv4_id);
1185 } else {
1186 struct ipv6hdr *tsoh_iph =
1187 (struct ipv6hdr *)(header + st->ip_off);
1188
1189 tsoh_iph->payload_len = htons(ip_length);
1190 }
982 1191
983 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 1192 rc = efx_tso_put_header(tx_queue, buffer, header);
984 tsoh_iph->id = htons(st->ipv4_id); 1193 if (unlikely(rc))
985 st->ipv4_id++; 1194 return rc;
986 } else { 1195 } else {
987 struct ipv6hdr *tsoh_iph = 1196 /* Send the original headers with a TSO option descriptor
988 (struct ipv6hdr *)(header + st->ip_off); 1197 * in front
1198 */
1199 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
989 1200
990 tsoh_iph->payload_len = htons(ip_length); 1201 buffer->flags = EFX_TX_BUF_OPTION;
1202 buffer->len = 0;
1203 buffer->unmap_len = 0;
1204 EFX_POPULATE_QWORD_5(buffer->option,
1205 ESF_DZ_TX_DESC_IS_OPT, 1,
1206 ESF_DZ_TX_OPTION_TYPE,
1207 ESE_DZ_TX_OPTION_DESC_TSO,
1208 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1209 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1210 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1211 ++tx_queue->insert_count;
1212
1213 /* We mapped the headers in tso_start(). Unmap them
1214 * when the last segment is completed.
1215 */
1216 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
1217 buffer->dma_addr = st->header_dma_addr;
1218 buffer->len = st->header_len;
1219 if (is_last) {
1220 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1221 buffer->unmap_len = st->header_unmap_len;
1222 /* Ensure we only unmap them once in case of a
1223 * later DMA mapping error and rollback
1224 */
1225 st->header_unmap_len = 0;
1226 } else {
1227 buffer->flags = EFX_TX_BUF_CONT;
1228 buffer->unmap_len = 0;
1229 }
1230 ++tx_queue->insert_count;
991 } 1231 }
992 1232
993 rc = efx_tso_put_header(tx_queue, buffer, header); 1233 st->seqnum += skb_shinfo(skb)->gso_size;
994 if (unlikely(rc)) 1234
995 return rc; 1235 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1236 ++st->ipv4_id;
996 1237
997 ++tx_queue->tso_packets; 1238 ++tx_queue->tso_packets;
998 1239
@@ -1023,12 +1264,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1023 1264
1024 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 1265 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1025 1266
1026 tso_start(&state, skb); 1267 rc = tso_start(&state, efx, skb);
1268 if (rc)
1269 goto mem_err;
1027 1270
1028 /* Assume that skb header area contains exactly the headers, and 1271 if (likely(state.in_len == 0)) {
1029 * all payload is in the frag list.
1030 */
1031 if (skb_headlen(skb) == state.header_len) {
1032 /* Grab the first payload fragment. */ 1272 /* Grab the first payload fragment. */
1033 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1273 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1034 frag_i = 0; 1274 frag_i = 0;
@@ -1037,9 +1277,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1037 if (rc) 1277 if (rc)
1038 goto mem_err; 1278 goto mem_err;
1039 } else { 1279 } else {
1040 rc = tso_get_head_fragment(&state, efx, skb); 1280 /* Payload starts in the header area. */
1041 if (rc)
1042 goto mem_err;
1043 frag_i = -1; 1281 frag_i = -1;
1044 } 1282 }
1045 1283
@@ -1091,6 +1329,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1091 state.unmap_len, DMA_TO_DEVICE); 1329 state.unmap_len, DMA_TO_DEVICE);
1092 } 1330 }
1093 1331
1332 /* Free the header DMA mapping, if using option descriptors */
1333 if (state.header_unmap_len)
1334 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1335 state.header_unmap_len, DMA_TO_DEVICE);
1336
1094 efx_enqueue_unwind(tx_queue); 1337 efx_enqueue_unwind(tx_queue);
1095 return NETDEV_TX_OK; 1338 return NETDEV_TX_OK;
1096} 1339}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 770036bc2d87..513ed8b1ba58 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -839,7 +839,7 @@ static int meth_probe(struct platform_device *pdev)
839 dev->watchdog_timeo = timeout; 839 dev->watchdog_timeo = timeout;
840 dev->irq = MACE_ETHERNET_IRQ; 840 dev->irq = MACE_ETHERNET_IRQ;
841 dev->base_addr = (unsigned long)&mace->eth; 841 dev->base_addr = (unsigned long)&mace->eth;
842 memcpy(dev->dev_addr, o2meth_eaddr, 6); 842 memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
843 843
844 priv = netdev_priv(dev); 844 priv = netdev_priv(dev);
845 spin_lock_init(&priv->meth_lock); 845 spin_lock_init(&priv->meth_lock);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index ee18e6f7b4fe..acbbe48a519c 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1921,7 +1921,6 @@ static void sis190_remove_one(struct pci_dev *pdev)
1921 cancel_work_sync(&tp->phy_task); 1921 cancel_work_sync(&tp->phy_task);
1922 unregister_netdev(dev); 1922 unregister_netdev(dev);
1923 sis190_release_board(pdev); 1923 sis190_release_board(pdev);
1924 pci_set_drvdata(pdev, NULL);
1925} 1924}
1926 1925
1927static struct pci_driver sis190_pci_driver = { 1926static struct pci_driver sis190_pci_driver = {
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 03b256af7ed5..8c5c24a16f8a 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1535,7 +1535,6 @@ static void epic_remove_one(struct pci_dev *pdev)
1535 pci_release_regions(pdev); 1535 pci_release_regions(pdev);
1536 free_netdev(dev); 1536 free_netdev(dev);
1537 pci_disable_device(pdev); 1537 pci_disable_device(pdev);
1538 pci_set_drvdata(pdev, NULL);
1539 /* pci_power_off(pdev, -1); */ 1538 /* pci_power_off(pdev, -1); */
1540} 1539}
1541 1540
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 5fdbc2686eb3..01f8459c3213 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2502,7 +2502,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2502 SMSC_TRACE(pdata, probe, 2502 SMSC_TRACE(pdata, probe,
2503 "MAC Address is specified by configuration"); 2503 "MAC Address is specified by configuration");
2504 } else if (is_valid_ether_addr(pdata->config.mac)) { 2504 } else if (is_valid_ether_addr(pdata->config.mac)) {
2505 memcpy(dev->dev_addr, pdata->config.mac, 6); 2505 memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
2506 SMSC_TRACE(pdata, probe, 2506 SMSC_TRACE(pdata, probe,
2507 "MAC Address specified by platform data"); 2507 "MAC Address specified by platform data");
2508 } else { 2508 } else {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 5f9e79f7f2df..e55e3365a306 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1707,8 +1707,6 @@ static void smsc9420_remove(struct pci_dev *pdev)
1707 if (!dev) 1707 if (!dev)
1708 return; 1708 return;
1709 1709
1710 pci_set_drvdata(pdev, NULL);
1711
1712 pd = netdev_priv(dev); 1710 pd = netdev_priv(dev);
1713 unregister_netdev(dev); 1711 unregister_netdev(dev);
1714 1712
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7eb8babed2cb..fc94f202a43e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -451,14 +451,14 @@ struct mac_device_info {
451struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr); 451struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
452struct mac_device_info *dwmac100_setup(void __iomem *ioaddr); 452struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
453 453
454extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], 454void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
455 unsigned int high, unsigned int low); 455 unsigned int high, unsigned int low);
456extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 456void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
457 unsigned int high, unsigned int low); 457 unsigned int high, unsigned int low);
458 458
459extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); 459void stmmac_set_mac(void __iomem *ioaddr, bool enable);
460 460
461extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 461void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
462extern const struct stmmac_ring_mode_ops ring_mode_ops; 462extern const struct stmmac_ring_mode_ops ring_mode_ops;
463extern const struct stmmac_chain_mode_ops chain_mode_ops; 463extern const struct stmmac_chain_mode_ops chain_mode_ops;
464 464
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 8e5662ce488b..def266da55db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -104,14 +104,13 @@
104#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 104#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
105#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ 105#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
106 106
107extern void dwmac_enable_dma_transmission(void __iomem *ioaddr); 107void dwmac_enable_dma_transmission(void __iomem *ioaddr);
108extern void dwmac_enable_dma_irq(void __iomem *ioaddr); 108void dwmac_enable_dma_irq(void __iomem *ioaddr);
109extern void dwmac_disable_dma_irq(void __iomem *ioaddr); 109void dwmac_disable_dma_irq(void __iomem *ioaddr);
110extern void dwmac_dma_start_tx(void __iomem *ioaddr); 110void dwmac_dma_start_tx(void __iomem *ioaddr);
111extern void dwmac_dma_stop_tx(void __iomem *ioaddr); 111void dwmac_dma_stop_tx(void __iomem *ioaddr);
112extern void dwmac_dma_start_rx(void __iomem *ioaddr); 112void dwmac_dma_start_rx(void __iomem *ioaddr);
113extern void dwmac_dma_stop_rx(void __iomem *ioaddr); 113void dwmac_dma_stop_rx(void __iomem *ioaddr);
114extern int dwmac_dma_interrupt(void __iomem *ioaddr, 114int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
115 struct stmmac_extra_stats *x);
116 115
117#endif /* __DWMAC_DMA_H__ */ 116#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 48ec001566b5..8607488cbcfc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -128,8 +128,8 @@ struct stmmac_counters {
128 unsigned int mmc_rx_icmp_err_octets; 128 unsigned int mmc_rx_icmp_err_octets;
129}; 129};
130 130
131extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); 131void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
132extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); 132void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
133extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); 133void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
134 134
135#endif /* __MMC_H__ */ 135#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f16a9bdf45bb..22f89ffdfd95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -110,14 +110,14 @@ struct stmmac_priv {
110 110
111extern int phyaddr; 111extern int phyaddr;
112 112
113extern int stmmac_mdio_unregister(struct net_device *ndev); 113int stmmac_mdio_unregister(struct net_device *ndev);
114extern int stmmac_mdio_register(struct net_device *ndev); 114int stmmac_mdio_register(struct net_device *ndev);
115extern void stmmac_set_ethtool_ops(struct net_device *netdev); 115void stmmac_set_ethtool_ops(struct net_device *netdev);
116extern const struct stmmac_desc_ops enh_desc_ops; 116extern const struct stmmac_desc_ops enh_desc_ops;
117extern const struct stmmac_desc_ops ndesc_ops; 117extern const struct stmmac_desc_ops ndesc_ops;
118extern const struct stmmac_hwtimestamp stmmac_ptp; 118extern const struct stmmac_hwtimestamp stmmac_ptp;
119extern int stmmac_ptp_register(struct stmmac_priv *priv); 119int stmmac_ptp_register(struct stmmac_priv *priv);
120extern void stmmac_ptp_unregister(struct stmmac_priv *priv); 120void stmmac_ptp_unregister(struct stmmac_priv *priv);
121int stmmac_freeze(struct net_device *ndev); 121int stmmac_freeze(struct net_device *ndev);
122int stmmac_restore(struct net_device *ndev); 122int stmmac_restore(struct net_device *ndev);
123int stmmac_resume(struct net_device *ndev); 123int stmmac_resume(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 023b7c29cb2f..644d80ece067 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -138,7 +138,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
138 138
139 stmmac_dvr_remove(ndev); 139 stmmac_dvr_remove(ndev);
140 140
141 pci_set_drvdata(pdev, NULL);
142 pci_iounmap(pdev, priv->ioaddr); 141 pci_iounmap(pdev, priv->ioaddr);
143 pci_release_regions(pdev); 142 pci_release_regions(pdev);
144 pci_disable_device(pdev); 143 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 759441b29e53..b4d50d74ba18 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
3354#if defined(CONFIG_SPARC) 3354#if defined(CONFIG_SPARC)
3355 addr = of_get_property(cp->of_node, "local-mac-address", NULL); 3355 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3356 if (addr != NULL) { 3356 if (addr != NULL) {
3357 memcpy(dev_addr, addr, 6); 3357 memcpy(dev_addr, addr, ETH_ALEN);
3358 goto done; 3358 goto done;
3359 } 3359 }
3360#endif 3360#endif
@@ -5168,7 +5168,6 @@ err_out_free_netdev:
5168 5168
5169err_out_disable_pdev: 5169err_out_disable_pdev:
5170 pci_disable_device(pdev); 5170 pci_disable_device(pdev);
5171 pci_set_drvdata(pdev, NULL);
5172 return -ENODEV; 5171 return -ENODEV;
5173} 5172}
5174 5173
@@ -5206,7 +5205,6 @@ static void cas_remove_one(struct pci_dev *pdev)
5206 free_netdev(dev); 5205 free_netdev(dev);
5207 pci_release_regions(pdev); 5206 pci_release_regions(pdev);
5208 pci_disable_device(pdev); 5207 pci_disable_device(pdev);
5209 pci_set_drvdata(pdev, NULL);
5210} 5208}
5211 5209
5212#ifdef CONFIG_PM 5210#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index e62df2b81302..a235bd9fd980 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2779,7 +2779,7 @@ static int gem_get_device_address(struct gem *gp)
2779 return -1; 2779 return -1;
2780#endif 2780#endif
2781 } 2781 }
2782 memcpy(dev->dev_addr, addr, 6); 2782 memcpy(dev->dev_addr, addr, ETH_ALEN);
2783#else 2783#else
2784 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); 2784 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2785#endif 2785#endif
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index e37b587b3860..0dbf46f08ed5 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2675,10 +2675,10 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2675 2675
2676 addr = of_get_property(dp, "local-mac-address", &len); 2676 addr = of_get_property(dp, "local-mac-address", &len);
2677 2677
2678 if (qfe_slot != -1 && addr && len == 6) 2678 if (qfe_slot != -1 && addr && len == ETH_ALEN)
2679 memcpy(dev->dev_addr, addr, 6); 2679 memcpy(dev->dev_addr, addr, ETH_ALEN);
2680 else 2680 else
2681 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 2681 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
2682 } 2682 }
2683 2683
2684 hp = netdev_priv(dev); 2684 hp = netdev_priv(dev);
@@ -3024,9 +3024,9 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
3024 (addr = of_get_property(dp, "local-mac-address", &len)) 3024 (addr = of_get_property(dp, "local-mac-address", &len))
3025 != NULL && 3025 != NULL &&
3026 len == 6) { 3026 len == 6) {
3027 memcpy(dev->dev_addr, addr, 6); 3027 memcpy(dev->dev_addr, addr, ETH_ALEN);
3028 } else { 3028 } else {
3029 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 3029 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
3030 } 3030 }
3031#else 3031#else
3032 get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]); 3032 get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
@@ -3170,8 +3170,6 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
3170 pci_release_regions(hp->happy_dev); 3170 pci_release_regions(hp->happy_dev);
3171 3171
3172 free_netdev(net_dev); 3172 free_netdev(net_dev);
3173
3174 pci_set_drvdata(pdev, NULL);
3175} 3173}
3176 3174
3177static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = { 3175static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index b072f4dba033..5695ae2411de 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -843,7 +843,7 @@ static int qec_ether_init(struct platform_device *op)
843 if (!dev) 843 if (!dev)
844 return -ENOMEM; 844 return -ENOMEM;
845 845
846 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 846 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
847 847
848 qe = netdev_priv(dev); 848 qe = netdev_priv(dev);
849 849
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index de71b1ec4625..53150c25a96b 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,11 +49,19 @@ config TI_DAVINCI_CPDMA
49 To compile this driver as a module, choose M here: the module 49 To compile this driver as a module, choose M here: the module
50 will be called davinci_cpdma. This is recommended. 50 will be called davinci_cpdma. This is recommended.
51 51
52config TI_CPSW_PHY_SEL
53 boolean "TI CPSW Switch Phy sel Support"
54 depends on TI_CPSW
55 ---help---
56 This driver supports configuring of the phy mode connected to
57 the CPSW.
58
52config TI_CPSW 59config TI_CPSW
53 tristate "TI CPSW Switch Support" 60 tristate "TI CPSW Switch Support"
54 depends on ARM && (ARCH_DAVINCI || SOC_AM33XX) 61 depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
55 select TI_DAVINCI_CPDMA 62 select TI_DAVINCI_CPDMA
56 select TI_DAVINCI_MDIO 63 select TI_DAVINCI_MDIO
64 select TI_CPSW_PHY_SEL
57 ---help--- 65 ---help---
58 This driver supports TI's CPSW Ethernet Switch. 66 This driver supports TI's CPSW Ethernet Switch.
59 67
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index c65148e8aa1d..9cfaab8152be 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_CPMAC) += cpmac.o
7obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o 7obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o 8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
10obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 11obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
11ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o 12ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644
index 000000000000..148da9ae8366
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -0,0 +1,161 @@
1/* Texas Instruments Ethernet Switch Driver
2 *
3 * Copyright (C) 2013 Texas Instruments
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/platform_device.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/phy.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21
22#include "cpsw.h"
23
24/* AM33xx SoC specific definitions for the CONTROL port */
25#define AM33XX_GMII_SEL_MODE_MII 0
26#define AM33XX_GMII_SEL_MODE_RMII 1
27#define AM33XX_GMII_SEL_MODE_RGMII 2
28
29#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
30#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
31
32struct cpsw_phy_sel_priv {
33 struct device *dev;
34 u32 __iomem *gmii_sel;
35 bool rmii_clock_external;
36 void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
37 phy_interface_t phy_mode, int slave);
38};
39
40
41static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
42 phy_interface_t phy_mode, int slave)
43{
44 u32 reg;
45 u32 mask;
46 u32 mode = 0;
47
48 reg = readl(priv->gmii_sel);
49
50 switch (phy_mode) {
51 case PHY_INTERFACE_MODE_RMII:
52 mode = AM33XX_GMII_SEL_MODE_RMII;
53 break;
54
55 case PHY_INTERFACE_MODE_RGMII:
56 case PHY_INTERFACE_MODE_RGMII_ID:
57 case PHY_INTERFACE_MODE_RGMII_RXID:
58 case PHY_INTERFACE_MODE_RGMII_TXID:
59 mode = AM33XX_GMII_SEL_MODE_RGMII;
60 break;
61
62 case PHY_INTERFACE_MODE_MII:
63 default:
64 mode = AM33XX_GMII_SEL_MODE_MII;
65 break;
66 };
67
68 mask = 0x3 << (slave * 2) | BIT(slave + 6);
69 mode <<= slave * 2;
70
71 if (priv->rmii_clock_external) {
72 if (slave == 0)
73 mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
74 else
75 mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
76 }
77
78 reg &= ~mask;
79 reg |= mode;
80
81 writel(reg, priv->gmii_sel);
82}
83
84static struct platform_driver cpsw_phy_sel_driver;
85static int match(struct device *dev, void *data)
86{
87 struct device_node *node = (struct device_node *)data;
88 return dev->of_node == node &&
89 dev->driver == &cpsw_phy_sel_driver.driver;
90}
91
92void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
93{
94 struct device_node *node;
95 struct cpsw_phy_sel_priv *priv;
96
97 node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
98 if (!node) {
99 dev_err(dev, "Phy mode driver DT not found\n");
100 return;
101 }
102
103 dev = bus_find_device(&platform_bus_type, NULL, node, match);
104 priv = dev_get_drvdata(dev);
105
106 priv->cpsw_phy_sel(priv, phy_mode, slave);
107}
108EXPORT_SYMBOL_GPL(cpsw_phy_sel);
109
110static const struct of_device_id cpsw_phy_sel_id_table[] = {
111 {
112 .compatible = "ti,am3352-cpsw-phy-sel",
113 .data = &cpsw_gmii_sel_am3352,
114 },
115 {}
116};
117MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
118
119static int cpsw_phy_sel_probe(struct platform_device *pdev)
120{
121 struct resource *res;
122 const struct of_device_id *of_id;
123 struct cpsw_phy_sel_priv *priv;
124
125 of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
126 if (!of_id)
127 return -EINVAL;
128
129 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
130 if (!priv) {
131 dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
132 return -ENOMEM;
133 }
134
135 priv->cpsw_phy_sel = of_id->data;
136
137 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
138 priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
139 if (IS_ERR(priv->gmii_sel))
140 return PTR_ERR(priv->gmii_sel);
141
142 if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
143 priv->rmii_clock_external = true;
144
145 dev_set_drvdata(&pdev->dev, priv);
146
147 return 0;
148}
149
150static struct platform_driver cpsw_phy_sel_driver = {
151 .probe = cpsw_phy_sel_probe,
152 .driver = {
153 .name = "cpsw-phy-sel",
154 .owner = THIS_MODULE,
155 .of_match_table = cpsw_phy_sel_id_table,
156 },
157};
158
159module_platform_driver(cpsw_phy_sel_driver);
160MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
161MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index cc3ce557e4aa..90d41d26ec6d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,8 +367,6 @@ struct cpsw_priv {
367 spinlock_t lock; 367 spinlock_t lock;
368 struct platform_device *pdev; 368 struct platform_device *pdev;
369 struct net_device *ndev; 369 struct net_device *ndev;
370 struct resource *cpsw_res;
371 struct resource *cpsw_wr_res;
372 struct napi_struct napi; 370 struct napi_struct napi;
373 struct device *dev; 371 struct device *dev;
374 struct cpsw_platform_data data; 372 struct cpsw_platform_data data;
@@ -1016,6 +1014,10 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1016 dev_info(priv->dev, "phy found : id is : 0x%x\n", 1014 dev_info(priv->dev, "phy found : id is : 0x%x\n",
1017 slave->phy->phy_id); 1015 slave->phy->phy_id);
1018 phy_start(slave->phy); 1016 phy_start(slave->phy);
1017
1018 /* Configure GMII_SEL register */
1019 cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
1020 slave->slave_num);
1019 } 1021 }
1020} 1022}
1021 1023
@@ -1705,62 +1707,55 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1705 1707
1706 if (of_property_read_u32(node, "active_slave", &prop)) { 1708 if (of_property_read_u32(node, "active_slave", &prop)) {
1707 pr_err("Missing active_slave property in the DT.\n"); 1709 pr_err("Missing active_slave property in the DT.\n");
1708 ret = -EINVAL; 1710 return -EINVAL;
1709 goto error_ret;
1710 } 1711 }
1711 data->active_slave = prop; 1712 data->active_slave = prop;
1712 1713
1713 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { 1714 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1714 pr_err("Missing cpts_clock_mult property in the DT.\n"); 1715 pr_err("Missing cpts_clock_mult property in the DT.\n");
1715 ret = -EINVAL; 1716 return -EINVAL;
1716 goto error_ret;
1717 } 1717 }
1718 data->cpts_clock_mult = prop; 1718 data->cpts_clock_mult = prop;
1719 1719
1720 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { 1720 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
1721 pr_err("Missing cpts_clock_shift property in the DT.\n"); 1721 pr_err("Missing cpts_clock_shift property in the DT.\n");
1722 ret = -EINVAL; 1722 return -EINVAL;
1723 goto error_ret;
1724 } 1723 }
1725 data->cpts_clock_shift = prop; 1724 data->cpts_clock_shift = prop;
1726 1725
1727 data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data), 1726 data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
1728 GFP_KERNEL); 1727 * sizeof(struct cpsw_slave_data),
1728 GFP_KERNEL);
1729 if (!data->slave_data) 1729 if (!data->slave_data)
1730 return -EINVAL; 1730 return -ENOMEM;
1731 1731
1732 if (of_property_read_u32(node, "cpdma_channels", &prop)) { 1732 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
1733 pr_err("Missing cpdma_channels property in the DT.\n"); 1733 pr_err("Missing cpdma_channels property in the DT.\n");
1734 ret = -EINVAL; 1734 return -EINVAL;
1735 goto error_ret;
1736 } 1735 }
1737 data->channels = prop; 1736 data->channels = prop;
1738 1737
1739 if (of_property_read_u32(node, "ale_entries", &prop)) { 1738 if (of_property_read_u32(node, "ale_entries", &prop)) {
1740 pr_err("Missing ale_entries property in the DT.\n"); 1739 pr_err("Missing ale_entries property in the DT.\n");
1741 ret = -EINVAL; 1740 return -EINVAL;
1742 goto error_ret;
1743 } 1741 }
1744 data->ale_entries = prop; 1742 data->ale_entries = prop;
1745 1743
1746 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 1744 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
1747 pr_err("Missing bd_ram_size property in the DT.\n"); 1745 pr_err("Missing bd_ram_size property in the DT.\n");
1748 ret = -EINVAL; 1746 return -EINVAL;
1749 goto error_ret;
1750 } 1747 }
1751 data->bd_ram_size = prop; 1748 data->bd_ram_size = prop;
1752 1749
1753 if (of_property_read_u32(node, "rx_descs", &prop)) { 1750 if (of_property_read_u32(node, "rx_descs", &prop)) {
1754 pr_err("Missing rx_descs property in the DT.\n"); 1751 pr_err("Missing rx_descs property in the DT.\n");
1755 ret = -EINVAL; 1752 return -EINVAL;
1756 goto error_ret;
1757 } 1753 }
1758 data->rx_descs = prop; 1754 data->rx_descs = prop;
1759 1755
1760 if (of_property_read_u32(node, "mac_control", &prop)) { 1756 if (of_property_read_u32(node, "mac_control", &prop)) {
1761 pr_err("Missing mac_control property in the DT.\n"); 1757 pr_err("Missing mac_control property in the DT.\n");
1762 ret = -EINVAL; 1758 return -EINVAL;
1763 goto error_ret;
1764 } 1759 }
1765 data->mac_control = prop; 1760 data->mac_control = prop;
1766 1761
@@ -1791,8 +1786,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1791 parp = of_get_property(slave_node, "phy_id", &lenp); 1786 parp = of_get_property(slave_node, "phy_id", &lenp);
1792 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1787 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1793 pr_err("Missing slave[%d] phy_id property\n", i); 1788 pr_err("Missing slave[%d] phy_id property\n", i);
1794 ret = -EINVAL; 1789 return -EINVAL;
1795 goto error_ret;
1796 } 1790 }
1797 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1791 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1798 phyid = be32_to_cpup(parp+1); 1792 phyid = be32_to_cpup(parp+1);
@@ -1822,10 +1816,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1822 } 1816 }
1823 1817
1824 return 0; 1818 return 0;
1825
1826error_ret:
1827 kfree(data->slave_data);
1828 return ret;
1829} 1819}
1830 1820
1831static int cpsw_probe_dual_emac(struct platform_device *pdev, 1821static int cpsw_probe_dual_emac(struct platform_device *pdev,
@@ -1867,7 +1857,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1867 priv_sl2->coal_intvl = 0; 1857 priv_sl2->coal_intvl = 0;
1868 priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; 1858 priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
1869 1859
1870 priv_sl2->cpsw_res = priv->cpsw_res;
1871 priv_sl2->regs = priv->regs; 1860 priv_sl2->regs = priv->regs;
1872 priv_sl2->host_port = priv->host_port; 1861 priv_sl2->host_port = priv->host_port;
1873 priv_sl2->host_port_regs = priv->host_port_regs; 1862 priv_sl2->host_port_regs = priv->host_port_regs;
@@ -1911,8 +1900,8 @@ static int cpsw_probe(struct platform_device *pdev)
1911 struct cpsw_priv *priv; 1900 struct cpsw_priv *priv;
1912 struct cpdma_params dma_params; 1901 struct cpdma_params dma_params;
1913 struct cpsw_ale_params ale_params; 1902 struct cpsw_ale_params ale_params;
1914 void __iomem *ss_regs, *wr_regs; 1903 void __iomem *ss_regs;
1915 struct resource *res; 1904 struct resource *res, *ss_res;
1916 u32 slave_offset, sliver_offset, slave_size; 1905 u32 slave_offset, sliver_offset, slave_size;
1917 int ret = 0, i, k = 0; 1906 int ret = 0, i, k = 0;
1918 1907
@@ -1948,7 +1937,7 @@ static int cpsw_probe(struct platform_device *pdev)
1948 if (cpsw_probe_dt(&priv->data, pdev)) { 1937 if (cpsw_probe_dt(&priv->data, pdev)) {
1949 pr_err("cpsw: platform data missing\n"); 1938 pr_err("cpsw: platform data missing\n");
1950 ret = -ENODEV; 1939 ret = -ENODEV;
1951 goto clean_ndev_ret; 1940 goto clean_runtime_disable_ret;
1952 } 1941 }
1953 data = &priv->data; 1942 data = &priv->data;
1954 1943
@@ -1962,11 +1951,12 @@ static int cpsw_probe(struct platform_device *pdev)
1962 1951
1963 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 1952 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1964 1953
1965 priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves, 1954 priv->slaves = devm_kzalloc(&pdev->dev,
1966 GFP_KERNEL); 1955 sizeof(struct cpsw_slave) * data->slaves,
1956 GFP_KERNEL);
1967 if (!priv->slaves) { 1957 if (!priv->slaves) {
1968 ret = -EBUSY; 1958 ret = -ENOMEM;
1969 goto clean_ndev_ret; 1959 goto clean_runtime_disable_ret;
1970 } 1960 }
1971 for (i = 0; i < data->slaves; i++) 1961 for (i = 0; i < data->slaves; i++)
1972 priv->slaves[i].slave_num = i; 1962 priv->slaves[i].slave_num = i;
@@ -1974,55 +1964,31 @@ static int cpsw_probe(struct platform_device *pdev)
1974 priv->slaves[0].ndev = ndev; 1964 priv->slaves[0].ndev = ndev;
1975 priv->emac_port = 0; 1965 priv->emac_port = 0;
1976 1966
1977 priv->clk = clk_get(&pdev->dev, "fck"); 1967 priv->clk = devm_clk_get(&pdev->dev, "fck");
1978 if (IS_ERR(priv->clk)) { 1968 if (IS_ERR(priv->clk)) {
1979 dev_err(&pdev->dev, "fck is not found\n"); 1969 dev_err(priv->dev, "fck is not found\n");
1980 ret = -ENODEV; 1970 ret = -ENODEV;
1981 goto clean_slave_ret; 1971 goto clean_runtime_disable_ret;
1982 } 1972 }
1983 priv->coal_intvl = 0; 1973 priv->coal_intvl = 0;
1984 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; 1974 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
1985 1975
1986 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1976 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1987 if (!priv->cpsw_res) { 1977 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
1988 dev_err(priv->dev, "error getting i/o resource\n"); 1978 if (IS_ERR(ss_regs)) {
1989 ret = -ENOENT; 1979 ret = PTR_ERR(ss_regs);
1990 goto clean_clk_ret; 1980 goto clean_runtime_disable_ret;
1991 }
1992 if (!request_mem_region(priv->cpsw_res->start,
1993 resource_size(priv->cpsw_res), ndev->name)) {
1994 dev_err(priv->dev, "failed request i/o region\n");
1995 ret = -ENXIO;
1996 goto clean_clk_ret;
1997 }
1998 ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
1999 if (!ss_regs) {
2000 dev_err(priv->dev, "unable to map i/o region\n");
2001 goto clean_cpsw_iores_ret;
2002 } 1981 }
2003 priv->regs = ss_regs; 1982 priv->regs = ss_regs;
2004 priv->version = __raw_readl(&priv->regs->id_ver); 1983 priv->version = __raw_readl(&priv->regs->id_ver);
2005 priv->host_port = HOST_PORT_NUM; 1984 priv->host_port = HOST_PORT_NUM;
2006 1985
2007 priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1986 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2008 if (!priv->cpsw_wr_res) { 1987 priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
2009 dev_err(priv->dev, "error getting i/o resource\n"); 1988 if (IS_ERR(priv->wr_regs)) {
2010 ret = -ENOENT; 1989 ret = PTR_ERR(priv->wr_regs);
2011 goto clean_iomap_ret; 1990 goto clean_runtime_disable_ret;
2012 }
2013 if (!request_mem_region(priv->cpsw_wr_res->start,
2014 resource_size(priv->cpsw_wr_res), ndev->name)) {
2015 dev_err(priv->dev, "failed request i/o region\n");
2016 ret = -ENXIO;
2017 goto clean_iomap_ret;
2018 } 1991 }
2019 wr_regs = ioremap(priv->cpsw_wr_res->start,
2020 resource_size(priv->cpsw_wr_res));
2021 if (!wr_regs) {
2022 dev_err(priv->dev, "unable to map i/o region\n");
2023 goto clean_cpsw_wr_iores_ret;
2024 }
2025 priv->wr_regs = wr_regs;
2026 1992
2027 memset(&dma_params, 0, sizeof(dma_params)); 1993 memset(&dma_params, 0, sizeof(dma_params));
2028 memset(&ale_params, 0, sizeof(ale_params)); 1994 memset(&ale_params, 0, sizeof(ale_params));
@@ -2053,12 +2019,12 @@ static int cpsw_probe(struct platform_device *pdev)
2053 slave_size = CPSW2_SLAVE_SIZE; 2019 slave_size = CPSW2_SLAVE_SIZE;
2054 sliver_offset = CPSW2_SLIVER_OFFSET; 2020 sliver_offset = CPSW2_SLIVER_OFFSET;
2055 dma_params.desc_mem_phys = 2021 dma_params.desc_mem_phys =
2056 (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET; 2022 (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
2057 break; 2023 break;
2058 default: 2024 default:
2059 dev_err(priv->dev, "unknown version 0x%08x\n", priv->version); 2025 dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
2060 ret = -ENODEV; 2026 ret = -ENODEV;
2061 goto clean_cpsw_wr_iores_ret; 2027 goto clean_runtime_disable_ret;
2062 } 2028 }
2063 for (i = 0; i < priv->data.slaves; i++) { 2029 for (i = 0; i < priv->data.slaves; i++) {
2064 struct cpsw_slave *slave = &priv->slaves[i]; 2030 struct cpsw_slave *slave = &priv->slaves[i];
@@ -2086,7 +2052,7 @@ static int cpsw_probe(struct platform_device *pdev)
2086 if (!priv->dma) { 2052 if (!priv->dma) {
2087 dev_err(priv->dev, "error initializing dma\n"); 2053 dev_err(priv->dev, "error initializing dma\n");
2088 ret = -ENOMEM; 2054 ret = -ENOMEM;
2089 goto clean_wr_iomap_ret; 2055 goto clean_runtime_disable_ret;
2090 } 2056 }
2091 2057
2092 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), 2058 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -2121,8 +2087,8 @@ static int cpsw_probe(struct platform_device *pdev)
2121 2087
2122 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2088 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2123 for (i = res->start; i <= res->end; i++) { 2089 for (i = res->start; i <= res->end; i++) {
2124 if (request_irq(i, cpsw_interrupt, 0, 2090 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
2125 dev_name(&pdev->dev), priv)) { 2091 dev_name(priv->dev), priv)) {
2126 dev_err(priv->dev, "error attaching irq\n"); 2092 dev_err(priv->dev, "error attaching irq\n");
2127 goto clean_ale_ret; 2093 goto clean_ale_ret;
2128 } 2094 }
@@ -2144,7 +2110,7 @@ static int cpsw_probe(struct platform_device *pdev)
2144 if (ret) { 2110 if (ret) {
2145 dev_err(priv->dev, "error registering net device\n"); 2111 dev_err(priv->dev, "error registering net device\n");
2146 ret = -ENODEV; 2112 ret = -ENODEV;
2147 goto clean_irq_ret; 2113 goto clean_ale_ret;
2148 } 2114 }
2149 2115
2150 if (cpts_register(&pdev->dev, priv->cpts, 2116 if (cpts_register(&pdev->dev, priv->cpts,
@@ -2152,44 +2118,27 @@ static int cpsw_probe(struct platform_device *pdev)
2152 dev_err(priv->dev, "error registering cpts device\n"); 2118 dev_err(priv->dev, "error registering cpts device\n");
2153 2119
2154 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", 2120 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
2155 priv->cpsw_res->start, ndev->irq); 2121 ss_res->start, ndev->irq);
2156 2122
2157 if (priv->data.dual_emac) { 2123 if (priv->data.dual_emac) {
2158 ret = cpsw_probe_dual_emac(pdev, priv); 2124 ret = cpsw_probe_dual_emac(pdev, priv);
2159 if (ret) { 2125 if (ret) {
2160 cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 2126 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2161 goto clean_irq_ret; 2127 goto clean_ale_ret;
2162 } 2128 }
2163 } 2129 }
2164 2130
2165 return 0; 2131 return 0;
2166 2132
2167clean_irq_ret:
2168 for (i = 0; i < priv->num_irqs; i++)
2169 free_irq(priv->irqs_table[i], priv);
2170clean_ale_ret: 2133clean_ale_ret:
2171 cpsw_ale_destroy(priv->ale); 2134 cpsw_ale_destroy(priv->ale);
2172clean_dma_ret: 2135clean_dma_ret:
2173 cpdma_chan_destroy(priv->txch); 2136 cpdma_chan_destroy(priv->txch);
2174 cpdma_chan_destroy(priv->rxch); 2137 cpdma_chan_destroy(priv->rxch);
2175 cpdma_ctlr_destroy(priv->dma); 2138 cpdma_ctlr_destroy(priv->dma);
2176clean_wr_iomap_ret: 2139clean_runtime_disable_ret:
2177 iounmap(priv->wr_regs);
2178clean_cpsw_wr_iores_ret:
2179 release_mem_region(priv->cpsw_wr_res->start,
2180 resource_size(priv->cpsw_wr_res));
2181clean_iomap_ret:
2182 iounmap(priv->regs);
2183clean_cpsw_iores_ret:
2184 release_mem_region(priv->cpsw_res->start,
2185 resource_size(priv->cpsw_res));
2186clean_clk_ret:
2187 clk_put(priv->clk);
2188clean_slave_ret:
2189 pm_runtime_disable(&pdev->dev); 2140 pm_runtime_disable(&pdev->dev);
2190 kfree(priv->slaves);
2191clean_ndev_ret: 2141clean_ndev_ret:
2192 kfree(priv->data.slave_data);
2193 free_netdev(priv->ndev); 2142 free_netdev(priv->ndev);
2194 return ret; 2143 return ret;
2195} 2144}
@@ -2198,30 +2147,18 @@ static int cpsw_remove(struct platform_device *pdev)
2198{ 2147{
2199 struct net_device *ndev = platform_get_drvdata(pdev); 2148 struct net_device *ndev = platform_get_drvdata(pdev);
2200 struct cpsw_priv *priv = netdev_priv(ndev); 2149 struct cpsw_priv *priv = netdev_priv(ndev);
2201 int i;
2202 2150
2203 if (priv->data.dual_emac) 2151 if (priv->data.dual_emac)
2204 unregister_netdev(cpsw_get_slave_ndev(priv, 1)); 2152 unregister_netdev(cpsw_get_slave_ndev(priv, 1));
2205 unregister_netdev(ndev); 2153 unregister_netdev(ndev);
2206 2154
2207 cpts_unregister(priv->cpts); 2155 cpts_unregister(priv->cpts);
2208 for (i = 0; i < priv->num_irqs; i++)
2209 free_irq(priv->irqs_table[i], priv);
2210 2156
2211 cpsw_ale_destroy(priv->ale); 2157 cpsw_ale_destroy(priv->ale);
2212 cpdma_chan_destroy(priv->txch); 2158 cpdma_chan_destroy(priv->txch);
2213 cpdma_chan_destroy(priv->rxch); 2159 cpdma_chan_destroy(priv->rxch);
2214 cpdma_ctlr_destroy(priv->dma); 2160 cpdma_ctlr_destroy(priv->dma);
2215 iounmap(priv->regs);
2216 release_mem_region(priv->cpsw_res->start,
2217 resource_size(priv->cpsw_res));
2218 iounmap(priv->wr_regs);
2219 release_mem_region(priv->cpsw_wr_res->start,
2220 resource_size(priv->cpsw_wr_res));
2221 pm_runtime_disable(&pdev->dev); 2161 pm_runtime_disable(&pdev->dev);
2222 clk_put(priv->clk);
2223 kfree(priv->slaves);
2224 kfree(priv->data.slave_data);
2225 if (priv->data.dual_emac) 2162 if (priv->data.dual_emac)
2226 free_netdev(cpsw_get_slave_ndev(priv, 1)); 2163 free_netdev(cpsw_get_slave_ndev(priv, 1));
2227 free_netdev(ndev); 2164 free_netdev(ndev);
@@ -2277,7 +2214,7 @@ static struct platform_driver cpsw_driver = {
2277 .name = "cpsw", 2214 .name = "cpsw",
2278 .owner = THIS_MODULE, 2215 .owner = THIS_MODULE,
2279 .pm = &cpsw_pm_ops, 2216 .pm = &cpsw_pm_ops,
2280 .of_match_table = of_match_ptr(cpsw_of_mtable), 2217 .of_match_table = cpsw_of_mtable,
2281 }, 2218 },
2282 .probe = cpsw_probe, 2219 .probe = cpsw_probe,
2283 .remove = cpsw_remove, 2220 .remove = cpsw_remove,
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index eb3e101ec048..574f49da693f 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -39,4 +39,6 @@ struct cpsw_platform_data {
39 bool dual_emac; /* Enable Dual EMAC mode */ 39 bool dual_emac; /* Enable Dual EMAC mode */
40}; 40};
41 41
42void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
43
42#endif /* __CPSW_H__ */ 44#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index fe993cdd7e23..1a581ef7eee8 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -127,8 +127,8 @@ struct cpts {
127}; 127};
128 128
129#ifdef CONFIG_TI_CPTS 129#ifdef CONFIG_TI_CPTS
130extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); 130void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
131extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb); 131void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
132#else 132#else
133static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) 133static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
134{ 134{
@@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
138} 138}
139#endif 139#endif
140 140
141extern int cpts_register(struct device *dev, struct cpts *cpts, 141int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
142 u32 mult, u32 shift); 142void cpts_unregister(struct cpts *cpts);
143extern void cpts_unregister(struct cpts *cpts);
144 143
145#endif 144#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 6a32ef9d63ae..41ba974bf37c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1852,7 +1852,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1852 } 1852 }
1853 1853
1854 /* MAC addr and PHY mask , RMII enable info from platform_data */ 1854 /* MAC addr and PHY mask , RMII enable info from platform_data */
1855 memcpy(priv->mac_addr, pdata->mac_addr, 6); 1855 memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
1856 priv->phy_id = pdata->phy_id; 1856 priv->phy_id = pdata->phy_id;
1857 priv->rmii_en = pdata->rmii_en; 1857 priv->rmii_en = pdata->rmii_en;
1858 priv->version = pdata->version; 1858 priv->version = pdata->version;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 13e6fff8ca23..628b736e5ae7 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2230,7 +2230,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2230 nz_addr |= mac[i]; 2230 nz_addr |= mac[i];
2231 2231
2232 if (nz_addr) { 2232 if (nz_addr) {
2233 memcpy(dev->dev_addr, mac, 6); 2233 memcpy(dev->dev_addr, mac, ETH_ALEN);
2234 dev->addr_len = 6; 2234 dev->addr_len = 6;
2235 } else { 2235 } else {
2236 eth_hw_addr_random(dev); 2236 eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 309abb472aa2..8505196be9f5 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,27 +359,26 @@ static inline void *port_priv(struct gelic_port *port)
359} 359}
360 360
361#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC 361#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
362extern void udbg_shutdown_ps3gelic(void); 362void udbg_shutdown_ps3gelic(void);
363#else 363#else
364static inline void udbg_shutdown_ps3gelic(void) {} 364static inline void udbg_shutdown_ps3gelic(void) {}
365#endif 365#endif
366 366
367extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); 367int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
368/* shared netdev ops */ 368/* shared netdev ops */
369extern void gelic_card_up(struct gelic_card *card); 369void gelic_card_up(struct gelic_card *card);
370extern void gelic_card_down(struct gelic_card *card); 370void gelic_card_down(struct gelic_card *card);
371extern int gelic_net_open(struct net_device *netdev); 371int gelic_net_open(struct net_device *netdev);
372extern int gelic_net_stop(struct net_device *netdev); 372int gelic_net_stop(struct net_device *netdev);
373extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); 373int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
374extern void gelic_net_set_multi(struct net_device *netdev); 374void gelic_net_set_multi(struct net_device *netdev);
375extern void gelic_net_tx_timeout(struct net_device *netdev); 375void gelic_net_tx_timeout(struct net_device *netdev);
376extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu); 376int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
377extern int gelic_net_setup_netdev(struct net_device *netdev, 377int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
378 struct gelic_card *card);
379 378
380/* shared ethtool ops */ 379/* shared ethtool ops */
381extern void gelic_net_get_drvinfo(struct net_device *netdev, 380void gelic_net_get_drvinfo(struct net_device *netdev,
382 struct ethtool_drvinfo *info); 381 struct ethtool_drvinfo *info);
383extern void gelic_net_poll_controller(struct net_device *netdev); 382void gelic_net_poll_controller(struct net_device *netdev);
384 383
385#endif /* _GELIC_NET_H */ 384#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index f7e51b7d7049..11f443d8e4ea 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -320,7 +320,7 @@ struct gelic_eurus_cmd {
320#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0) 320#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
321#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1) 321#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
322 322
323extern int gelic_wl_driver_probe(struct gelic_card *card); 323int gelic_wl_driver_probe(struct gelic_card *card);
324extern int gelic_wl_driver_remove(struct gelic_card *card); 324int gelic_wl_driver_remove(struct gelic_card *card);
325extern void gelic_wl_interrupt(struct net_device *netdev, u64 status); 325void gelic_wl_interrupt(struct net_device *netdev, u64 status);
326#endif /* _GELIC_WIRELESS_H */ 326#endif /* _GELIC_WIRELESS_H */
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index 4ba2135474d1..9b6af0845a11 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -29,8 +29,8 @@
29 29
30#include <linux/sungem_phy.h> 30#include <linux/sungem_phy.h>
31 31
32extern int spider_net_stop(struct net_device *netdev); 32int spider_net_stop(struct net_device *netdev);
33extern int spider_net_open(struct net_device *netdev); 33int spider_net_open(struct net_device *netdev);
34 34
35extern const struct ethtool_ops spider_net_ethtool_ops; 35extern const struct ethtool_ops spider_net_ethtool_ops;
36 36
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 4c619ea5189f..74234a51c851 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -31,7 +31,7 @@
31#define DRIVER_NAME "xilinx_emaclite" 31#define DRIVER_NAME "xilinx_emaclite"
32 32
33/* Register offsets for the EmacLite Core */ 33/* Register offsets for the EmacLite Core */
34#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */ 34#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
35#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */ 35#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
36#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */ 36#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
37#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */ 37#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
@@ -63,13 +63,13 @@
63#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */ 63#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
64 64
65/* Global Interrupt Enable Register (GIER) Bit Masks */ 65/* Global Interrupt Enable Register (GIER) Bit Masks */
66#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */ 66#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
67 67
68/* Transmit Status Register (TSR) Bit Masks */ 68/* Transmit Status Register (TSR) Bit Masks */
69#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */ 69#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
70#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */ 70#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
71#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ 71#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
72#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit 72#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
73 * only. This is not documented 73 * only. This is not documented
74 * in the HW spec */ 74 * in the HW spec */
75 75
@@ -77,21 +77,21 @@
77#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) 77#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
78 78
79/* Receive Status Register (RSR) */ 79/* Receive Status Register (RSR) */
80#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */ 80#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
81#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */ 81#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
82 82
83/* Transmit Packet Length Register (TPLR) */ 83/* Transmit Packet Length Register (TPLR) */
84#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */ 84#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
85 85
86/* Receive Packet Length Register (RPLR) */ 86/* Receive Packet Length Register (RPLR) */
87#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */ 87#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
88 88
89#define XEL_HEADER_OFFSET 12 /* Offset to length field */ 89#define XEL_HEADER_OFFSET 12 /* Offset to length field */
90#define XEL_HEADER_SHIFT 16 /* Shift value for length */ 90#define XEL_HEADER_SHIFT 16 /* Shift value for length */
91 91
92/* General Ethernet Definitions */ 92/* General Ethernet Definitions */
93#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */ 93#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
94#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ 94#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
95 95
96 96
97 97
@@ -1075,14 +1075,9 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1075 * This function un maps the IO region of the Emaclite device and frees the net 1075 * This function un maps the IO region of the Emaclite device and frees the net
1076 * device. 1076 * device.
1077 */ 1077 */
1078static void xemaclite_remove_ndev(struct net_device *ndev, 1078static void xemaclite_remove_ndev(struct net_device *ndev)
1079 struct platform_device *pdev)
1080{ 1079{
1081 if (ndev) { 1080 if (ndev) {
1082 struct net_local *lp = netdev_priv(ndev);
1083
1084 if (lp->base_addr)
1085 devm_iounmap(&pdev->dev, lp->base_addr);
1086 free_netdev(ndev); 1081 free_netdev(ndev);
1087 } 1082 }
1088} 1083}
@@ -1177,7 +1172,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1177 1172
1178 if (mac_address) 1173 if (mac_address)
1179 /* Set the MAC address. */ 1174 /* Set the MAC address. */
1180 memcpy(ndev->dev_addr, mac_address, 6); 1175 memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
1181 else 1176 else
1182 dev_warn(dev, "No MAC address found\n"); 1177 dev_warn(dev, "No MAC address found\n");
1183 1178
@@ -1214,7 +1209,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1214 return 0; 1209 return 0;
1215 1210
1216error: 1211error:
1217 xemaclite_remove_ndev(ndev, ofdev); 1212 xemaclite_remove_ndev(ndev);
1218 return rc; 1213 return rc;
1219} 1214}
1220 1215
@@ -1248,7 +1243,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
1248 of_node_put(lp->phy_node); 1243 of_node_put(lp->phy_node);
1249 lp->phy_node = NULL; 1244 lp->phy_node = NULL;
1250 1245
1251 xemaclite_remove_ndev(ndev, of_dev); 1246 xemaclite_remove_ndev(ndev);
1252 1247
1253 return 0; 1248 return 0;
1254} 1249}
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index a20ed1a98099..f83993590174 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -453,7 +453,7 @@ static void directed_beacon(struct s_smc *smc)
453 */ 453 */
454 * (char *) a = (char) ((long)DBEACON_INFO<<24L) ; 454 * (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
455 a[1] = 0 ; 455 a[1] = 0 ;
456 memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ; 456 memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
457 457
458 CHECK_NPP() ; 458 CHECK_NPP() ;
459 /* set memory address reg for writes */ 459 /* set memory address reg for writes */
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 3ca308b28214..bd1166bf8f61 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -469,20 +469,20 @@ struct s_smc {
469 469
470extern const struct fddi_addr fddi_broadcast; 470extern const struct fddi_addr fddi_broadcast;
471 471
472extern void all_selection_criteria(struct s_smc *smc); 472void all_selection_criteria(struct s_smc *smc);
473extern void card_stop(struct s_smc *smc); 473void card_stop(struct s_smc *smc);
474extern void init_board(struct s_smc *smc, u_char *mac_addr); 474void init_board(struct s_smc *smc, u_char *mac_addr);
475extern int init_fplus(struct s_smc *smc); 475int init_fplus(struct s_smc *smc);
476extern void init_plc(struct s_smc *smc); 476void init_plc(struct s_smc *smc);
477extern int init_smt(struct s_smc *smc, u_char * mac_addr); 477int init_smt(struct s_smc *smc, u_char *mac_addr);
478extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl); 478void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
479extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l); 479void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
480extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l); 480void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
481extern int pcm_status_twisted(struct s_smc *smc); 481int pcm_status_twisted(struct s_smc *smc);
482extern void plc1_irq(struct s_smc *smc); 482void plc1_irq(struct s_smc *smc);
483extern void plc2_irq(struct s_smc *smc); 483void plc2_irq(struct s_smc *smc);
484extern void read_address(struct s_smc *smc, u_char * mac_addr); 484void read_address(struct s_smc *smc, u_char *mac_addr);
485extern void timer_irq(struct s_smc *smc); 485void timer_irq(struct s_smc *smc);
486 486
487#endif /* _SCMECM_ */ 487#endif /* _SCMECM_ */
488 488
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index f5d7305a5784..713d303a06a9 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -436,7 +436,7 @@ static int skfp_driver_init(struct net_device *dev)
436 } 436 }
437 read_address(smc, NULL); 437 read_address(smc, NULL);
438 pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a); 438 pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
439 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6); 439 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
440 440
441 smt_reset_defaults(smc, 0); 441 smt_reset_defaults(smc, 0);
442 442
@@ -503,7 +503,7 @@ static int skfp_open(struct net_device *dev)
503 * address. 503 * address.
504 */ 504 */
505 read_address(smc, NULL); 505 read_address(smc, NULL);
506 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6); 506 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
507 507
508 init_smt(smc, NULL); 508 init_smt(smc, NULL);
509 smt_online(smc, 1); 509 smt_online(smc, 1);
@@ -1213,7 +1213,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1213 if ((unsigned short) frame[1 + 10] != 0) 1213 if ((unsigned short) frame[1 + 10] != 0)
1214 return; 1214 return;
1215 SRBit = frame[1 + 6] & 0x01; 1215 SRBit = frame[1 + 6] & 0x01;
1216 memcpy(&frame[1 + 6], hw_addr, 6); 1216 memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
1217 frame[8] |= SRBit; 1217 frame[8] |= SRBit;
1218} // CheckSourceAddress 1218} // CheckSourceAddress
1219 1219
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index a974727dd9a2..636b65c66d49 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -445,7 +445,7 @@ static int ser12_open(struct net_device *dev)
445 outb(0, FCR(dev->base_addr)); /* disable FIFOs */ 445 outb(0, FCR(dev->base_addr)); /* disable FIFOs */
446 outb(0x0d, MCR(dev->base_addr)); 446 outb(0x0d, MCR(dev->base_addr));
447 outb(0, IER(dev->base_addr)); 447 outb(0, IER(dev->base_addr));
448 if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED, 448 if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
449 "baycom_ser_fdx", dev)) { 449 "baycom_ser_fdx", dev)) {
450 release_region(dev->base_addr, SER12_EXTENT); 450 release_region(dev->base_addr, SER12_EXTENT);
451 return -EBUSY; 451 return -EBUSY;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index e349d867449b..f9a8976195ba 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -490,7 +490,7 @@ static int ser12_open(struct net_device *dev)
490 outb(0, FCR(dev->base_addr)); /* disable FIFOs */ 490 outb(0, FCR(dev->base_addr)); /* disable FIFOs */
491 outb(0x0d, MCR(dev->base_addr)); 491 outb(0x0d, MCR(dev->base_addr));
492 outb(0, IER(dev->base_addr)); 492 outb(0, IER(dev->base_addr));
493 if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED, 493 if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
494 "baycom_ser12", dev)) { 494 "baycom_ser12", dev)) {
495 release_region(dev->base_addr, SER12_EXTENT); 495 release_region(dev->base_addr, SER12_EXTENT);
496 return -EBUSY; 496 return -EBUSY;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index bc1d52170389..4bc6ee8e7987 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1734,7 +1734,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1734 if (!Ivec[hwcfg.irq].used && hwcfg.irq) 1734 if (!Ivec[hwcfg.irq].used && hwcfg.irq)
1735 { 1735 {
1736 if (request_irq(hwcfg.irq, scc_isr, 1736 if (request_irq(hwcfg.irq, scc_isr,
1737 IRQF_DISABLED, "AX.25 SCC", 1737 0, "AX.25 SCC",
1738 (void *)(long) hwcfg.irq)) 1738 (void *)(long) hwcfg.irq))
1739 printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq); 1739 printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
1740 else 1740 else
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5af1c3e5032a..1971411574db 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -888,7 +888,7 @@ static int yam_open(struct net_device *dev)
888 goto out_release_base; 888 goto out_release_base;
889 } 889 }
890 outb(0, IER(dev->base_addr)); 890 outb(0, IER(dev->base_addr));
891 if (request_irq(dev->irq, yam_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev)) { 891 if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) {
892 printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq); 892 printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
893 ret = -EBUSY; 893 ret = -EBUSY;
894 goto out_release_base; 894 goto out_release_base;
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index c74f384c87d5..303c4bd26e17 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -411,12 +411,12 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
411 411
412#else 412#else
413 413
414 if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) { 414 if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
415 dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n"); 415 dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
416 return -EBUSY; 416 return -EBUSY;
417 } 417 }
418 418
419 if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) { 419 if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
420 dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n"); 420 dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
421 free_irq(port->irq, dev); 421 free_irq(port->irq, dev);
422 return -EBUSY; 422 return -EBUSY;
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 31bcb98ef356..768dfe9a9315 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1352,7 +1352,7 @@ toshoboe_net_open (struct net_device *dev)
1352 return 0; 1352 return 0;
1353 1353
1354 rc = request_irq (self->io.irq, toshoboe_interrupt, 1354 rc = request_irq (self->io.irq, toshoboe_interrupt,
1355 IRQF_SHARED | IRQF_DISABLED, dev->name, self); 1355 IRQF_SHARED, dev->name, self);
1356 if (rc) 1356 if (rc)
1357 return rc; 1357 return rc;
1358 1358
@@ -1559,7 +1559,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1559 self->io.fir_base = self->base; 1559 self->io.fir_base = self->base;
1560 self->io.fir_ext = OBOE_IO_EXTENT; 1560 self->io.fir_ext = OBOE_IO_EXTENT;
1561 self->io.irq = pci_dev->irq; 1561 self->io.irq = pci_dev->irq;
1562 self->io.irqflags = IRQF_SHARED | IRQF_DISABLED; 1562 self->io.irqflags = IRQF_SHARED;
1563 1563
1564 self->speed = self->io.speed = 9600; 1564 self->speed = self->io.speed = 9600;
1565 self->async = 0; 1565 self->async = 0;
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4455425f1c77..ff45cd0d60e8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
804 goto err_mem_4; 804 goto err_mem_4;
805 805
806 platform_set_drvdata(pdev, ndev); 806 platform_set_drvdata(pdev, ndev);
807 err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self); 807 err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
808 if (err) { 808 if (err) {
809 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); 809 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
810 goto err_mem_4; 810 goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 89682b49900f..8d9ae5a086d5 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
761 goto err_mem_4; 761 goto err_mem_4;
762 762
763 platform_set_drvdata(pdev, ndev); 763 platform_set_drvdata(pdev, ndev);
764 err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self); 764 err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
765 if (err) { 765 if (err) {
766 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n"); 766 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
767 goto err_mem_4; 767 goto err_mem_4;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 6d5b1e2b1289..f50b9c1c0639 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -102,28 +102,29 @@ struct sir_driver {
102 102
103/* exported */ 103/* exported */
104 104
105extern int irda_register_dongle(struct dongle_driver *new); 105int irda_register_dongle(struct dongle_driver *new);
106extern int irda_unregister_dongle(struct dongle_driver *drv); 106int irda_unregister_dongle(struct dongle_driver *drv);
107 107
108extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name); 108struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
109extern int sirdev_put_instance(struct sir_dev *self); 109 const char *name);
110int sirdev_put_instance(struct sir_dev *self);
110 111
111extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type); 112int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
112extern void sirdev_write_complete(struct sir_dev *dev); 113void sirdev_write_complete(struct sir_dev *dev);
113extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count); 114int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
114 115
115/* low level helpers for SIR device/dongle setup */ 116/* low level helpers for SIR device/dongle setup */
116extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len); 117int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
117extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len); 118int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
118extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts); 119int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
119 120
120/* not exported */ 121/* not exported */
121 122
122extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type); 123int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
123extern int sirdev_put_dongle(struct sir_dev *self); 124int sirdev_put_dongle(struct sir_dev *self);
124 125
125extern void sirdev_enable_rx(struct sir_dev *dev); 126void sirdev_enable_rx(struct sir_dev *dev);
126extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param); 127int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
127 128
128/* inline helpers */ 129/* inline helpers */
129 130
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9bf46bd19b87..cc9845ec91c1 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -828,22 +828,21 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
828 eth_hw_addr_inherit(dev, lowerdev); 828 eth_hw_addr_inherit(dev, lowerdev);
829 } 829 }
830 830
831 port->count += 1;
832 err = register_netdevice(dev);
833 if (err < 0)
834 goto destroy_port;
835
831 err = netdev_upper_dev_link(lowerdev, dev); 836 err = netdev_upper_dev_link(lowerdev, dev);
832 if (err) 837 if (err)
833 goto destroy_port; 838 goto destroy_port;
834 839
835 port->count += 1;
836 err = register_netdevice(dev);
837 if (err < 0)
838 goto upper_dev_unlink;
839 840
840 list_add_tail_rcu(&vlan->list, &port->vlans); 841 list_add_tail_rcu(&vlan->list, &port->vlans);
841 netif_stacked_transfer_operstate(lowerdev, dev); 842 netif_stacked_transfer_operstate(lowerdev, dev);
842 843
843 return 0; 844 return 0;
844 845
845upper_dev_unlink:
846 netdev_upper_dev_unlink(lowerdev, dev);
847destroy_port: 846destroy_port:
848 port->count -= 1; 847 port->count -= 1;
849 if (!port->count) 848 if (!port->count)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ac22283aaf23..bc71947b1ec3 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -100,6 +100,45 @@ static void at803x_get_wol(struct phy_device *phydev,
100 wol->wolopts |= WAKE_MAGIC; 100 wol->wolopts |= WAKE_MAGIC;
101} 101}
102 102
103static int at803x_suspend(struct phy_device *phydev)
104{
105 int value;
106 int wol_enabled;
107
108 mutex_lock(&phydev->lock);
109
110 value = phy_read(phydev, AT803X_INTR_ENABLE);
111 wol_enabled = value & AT803X_WOL_ENABLE;
112
113 value = phy_read(phydev, MII_BMCR);
114
115 if (wol_enabled)
116 value |= BMCR_ISOLATE;
117 else
118 value |= BMCR_PDOWN;
119
120 phy_write(phydev, MII_BMCR, value);
121
122 mutex_unlock(&phydev->lock);
123
124 return 0;
125}
126
127static int at803x_resume(struct phy_device *phydev)
128{
129 int value;
130
131 mutex_lock(&phydev->lock);
132
133 value = phy_read(phydev, MII_BMCR);
134 value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
135 phy_write(phydev, MII_BMCR, value);
136
137 mutex_unlock(&phydev->lock);
138
139 return 0;
140}
141
103static int at803x_config_init(struct phy_device *phydev) 142static int at803x_config_init(struct phy_device *phydev)
104{ 143{
105 int val; 144 int val;
@@ -161,10 +200,12 @@ static struct phy_driver at803x_driver[] = {
161 .config_init = at803x_config_init, 200 .config_init = at803x_config_init,
162 .set_wol = at803x_set_wol, 201 .set_wol = at803x_set_wol,
163 .get_wol = at803x_get_wol, 202 .get_wol = at803x_get_wol,
203 .suspend = at803x_suspend,
204 .resume = at803x_resume,
164 .features = PHY_GBIT_FEATURES, 205 .features = PHY_GBIT_FEATURES,
165 .flags = PHY_HAS_INTERRUPT, 206 .flags = PHY_HAS_INTERRUPT,
166 .config_aneg = &genphy_config_aneg, 207 .config_aneg = genphy_config_aneg,
167 .read_status = &genphy_read_status, 208 .read_status = genphy_read_status,
168 .driver = { 209 .driver = {
169 .owner = THIS_MODULE, 210 .owner = THIS_MODULE,
170 }, 211 },
@@ -176,10 +217,12 @@ static struct phy_driver at803x_driver[] = {
176 .config_init = at803x_config_init, 217 .config_init = at803x_config_init,
177 .set_wol = at803x_set_wol, 218 .set_wol = at803x_set_wol,
178 .get_wol = at803x_get_wol, 219 .get_wol = at803x_get_wol,
220 .suspend = at803x_suspend,
221 .resume = at803x_resume,
179 .features = PHY_GBIT_FEATURES, 222 .features = PHY_GBIT_FEATURES,
180 .flags = PHY_HAS_INTERRUPT, 223 .flags = PHY_HAS_INTERRUPT,
181 .config_aneg = &genphy_config_aneg, 224 .config_aneg = genphy_config_aneg,
182 .read_status = &genphy_read_status, 225 .read_status = genphy_read_status,
183 .driver = { 226 .driver = {
184 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
185 }, 228 },
@@ -191,10 +234,12 @@ static struct phy_driver at803x_driver[] = {
191 .config_init = at803x_config_init, 234 .config_init = at803x_config_init,
192 .set_wol = at803x_set_wol, 235 .set_wol = at803x_set_wol,
193 .get_wol = at803x_get_wol, 236 .get_wol = at803x_get_wol,
237 .suspend = at803x_suspend,
238 .resume = at803x_resume,
194 .features = PHY_GBIT_FEATURES, 239 .features = PHY_GBIT_FEATURES,
195 .flags = PHY_HAS_INTERRUPT, 240 .flags = PHY_HAS_INTERRUPT,
196 .config_aneg = &genphy_config_aneg, 241 .config_aneg = genphy_config_aneg,
197 .read_status = &genphy_read_status, 242 .read_status = genphy_read_status,
198 .driver = { 243 .driver = {
199 .owner = THIS_MODULE, 244 .owner = THIS_MODULE,
200 }, 245 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e91477362d4..2e3c778ea9bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -34,9 +34,9 @@
34#include <linux/marvell_phy.h> 34#include <linux/marvell_phy.h>
35#include <linux/of.h> 35#include <linux/of.h>
36 36
37#include <asm/io.h> 37#include <linux/io.h>
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <asm/uaccess.h> 39#include <linux/uaccess.h>
40 40
41#define MII_MARVELL_PHY_PAGE 22 41#define MII_MARVELL_PHY_PAGE 22
42 42
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c31aad0004cb..3ae28f420868 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -287,6 +287,8 @@ static struct phy_driver ksphy_driver[] = {
287 .read_status = genphy_read_status, 287 .read_status = genphy_read_status,
288 .ack_interrupt = kszphy_ack_interrupt, 288 .ack_interrupt = kszphy_ack_interrupt,
289 .config_intr = ks8737_config_intr, 289 .config_intr = ks8737_config_intr,
290 .suspend = genphy_suspend,
291 .resume = genphy_resume,
290 .driver = { .owner = THIS_MODULE,}, 292 .driver = { .owner = THIS_MODULE,},
291}, { 293}, {
292 .phy_id = PHY_ID_KSZ8021, 294 .phy_id = PHY_ID_KSZ8021,
@@ -300,6 +302,8 @@ static struct phy_driver ksphy_driver[] = {
300 .read_status = genphy_read_status, 302 .read_status = genphy_read_status,
301 .ack_interrupt = kszphy_ack_interrupt, 303 .ack_interrupt = kszphy_ack_interrupt,
302 .config_intr = kszphy_config_intr, 304 .config_intr = kszphy_config_intr,
305 .suspend = genphy_suspend,
306 .resume = genphy_resume,
303 .driver = { .owner = THIS_MODULE,}, 307 .driver = { .owner = THIS_MODULE,},
304}, { 308}, {
305 .phy_id = PHY_ID_KSZ8031, 309 .phy_id = PHY_ID_KSZ8031,
@@ -313,6 +317,8 @@ static struct phy_driver ksphy_driver[] = {
313 .read_status = genphy_read_status, 317 .read_status = genphy_read_status,
314 .ack_interrupt = kszphy_ack_interrupt, 318 .ack_interrupt = kszphy_ack_interrupt,
315 .config_intr = kszphy_config_intr, 319 .config_intr = kszphy_config_intr,
320 .suspend = genphy_suspend,
321 .resume = genphy_resume,
316 .driver = { .owner = THIS_MODULE,}, 322 .driver = { .owner = THIS_MODULE,},
317}, { 323}, {
318 .phy_id = PHY_ID_KSZ8041, 324 .phy_id = PHY_ID_KSZ8041,
@@ -326,6 +332,8 @@ static struct phy_driver ksphy_driver[] = {
326 .read_status = genphy_read_status, 332 .read_status = genphy_read_status,
327 .ack_interrupt = kszphy_ack_interrupt, 333 .ack_interrupt = kszphy_ack_interrupt,
328 .config_intr = kszphy_config_intr, 334 .config_intr = kszphy_config_intr,
335 .suspend = genphy_suspend,
336 .resume = genphy_resume,
329 .driver = { .owner = THIS_MODULE,}, 337 .driver = { .owner = THIS_MODULE,},
330}, { 338}, {
331 .phy_id = PHY_ID_KSZ8051, 339 .phy_id = PHY_ID_KSZ8051,
@@ -339,6 +347,8 @@ static struct phy_driver ksphy_driver[] = {
339 .read_status = genphy_read_status, 347 .read_status = genphy_read_status,
340 .ack_interrupt = kszphy_ack_interrupt, 348 .ack_interrupt = kszphy_ack_interrupt,
341 .config_intr = kszphy_config_intr, 349 .config_intr = kszphy_config_intr,
350 .suspend = genphy_suspend,
351 .resume = genphy_resume,
342 .driver = { .owner = THIS_MODULE,}, 352 .driver = { .owner = THIS_MODULE,},
343}, { 353}, {
344 .phy_id = PHY_ID_KSZ8001, 354 .phy_id = PHY_ID_KSZ8001,
@@ -351,6 +361,8 @@ static struct phy_driver ksphy_driver[] = {
351 .read_status = genphy_read_status, 361 .read_status = genphy_read_status,
352 .ack_interrupt = kszphy_ack_interrupt, 362 .ack_interrupt = kszphy_ack_interrupt,
353 .config_intr = kszphy_config_intr, 363 .config_intr = kszphy_config_intr,
364 .suspend = genphy_suspend,
365 .resume = genphy_resume,
354 .driver = { .owner = THIS_MODULE,}, 366 .driver = { .owner = THIS_MODULE,},
355}, { 367}, {
356 .phy_id = PHY_ID_KSZ8081, 368 .phy_id = PHY_ID_KSZ8081,
@@ -363,6 +375,8 @@ static struct phy_driver ksphy_driver[] = {
363 .read_status = genphy_read_status, 375 .read_status = genphy_read_status,
364 .ack_interrupt = kszphy_ack_interrupt, 376 .ack_interrupt = kszphy_ack_interrupt,
365 .config_intr = kszphy_config_intr, 377 .config_intr = kszphy_config_intr,
378 .suspend = genphy_suspend,
379 .resume = genphy_resume,
366 .driver = { .owner = THIS_MODULE,}, 380 .driver = { .owner = THIS_MODULE,},
367}, { 381}, {
368 .phy_id = PHY_ID_KSZ8061, 382 .phy_id = PHY_ID_KSZ8061,
@@ -375,6 +389,8 @@ static struct phy_driver ksphy_driver[] = {
375 .read_status = genphy_read_status, 389 .read_status = genphy_read_status,
376 .ack_interrupt = kszphy_ack_interrupt, 390 .ack_interrupt = kszphy_ack_interrupt,
377 .config_intr = kszphy_config_intr, 391 .config_intr = kszphy_config_intr,
392 .suspend = genphy_suspend,
393 .resume = genphy_resume,
378 .driver = { .owner = THIS_MODULE,}, 394 .driver = { .owner = THIS_MODULE,},
379}, { 395}, {
380 .phy_id = PHY_ID_KSZ9021, 396 .phy_id = PHY_ID_KSZ9021,
@@ -387,6 +403,8 @@ static struct phy_driver ksphy_driver[] = {
387 .read_status = genphy_read_status, 403 .read_status = genphy_read_status,
388 .ack_interrupt = kszphy_ack_interrupt, 404 .ack_interrupt = kszphy_ack_interrupt,
389 .config_intr = ksz9021_config_intr, 405 .config_intr = ksz9021_config_intr,
406 .suspend = genphy_suspend,
407 .resume = genphy_resume,
390 .driver = { .owner = THIS_MODULE, }, 408 .driver = { .owner = THIS_MODULE, },
391}, { 409}, {
392 .phy_id = PHY_ID_KSZ9031, 410 .phy_id = PHY_ID_KSZ9031,
@@ -400,6 +418,8 @@ static struct phy_driver ksphy_driver[] = {
400 .read_status = genphy_read_status, 418 .read_status = genphy_read_status,
401 .ack_interrupt = kszphy_ack_interrupt, 419 .ack_interrupt = kszphy_ack_interrupt,
402 .config_intr = ksz9021_config_intr, 420 .config_intr = ksz9021_config_intr,
421 .suspend = genphy_suspend,
422 .resume = genphy_resume,
403 .driver = { .owner = THIS_MODULE, }, 423 .driver = { .owner = THIS_MODULE, },
404}, { 424}, {
405 .phy_id = PHY_ID_KSZ8873MLL, 425 .phy_id = PHY_ID_KSZ8873MLL,
@@ -410,6 +430,8 @@ static struct phy_driver ksphy_driver[] = {
410 .config_init = kszphy_config_init, 430 .config_init = kszphy_config_init,
411 .config_aneg = ksz8873mll_config_aneg, 431 .config_aneg = ksz8873mll_config_aneg,
412 .read_status = ksz8873mll_read_status, 432 .read_status = ksz8873mll_read_status,
433 .suspend = genphy_suspend,
434 .resume = genphy_resume,
413 .driver = { .owner = THIS_MODULE, }, 435 .driver = { .owner = THIS_MODULE, },
414}, { 436}, {
415 .phy_id = PHY_ID_KSZ886X, 437 .phy_id = PHY_ID_KSZ886X,
@@ -420,6 +442,8 @@ static struct phy_driver ksphy_driver[] = {
420 .config_init = kszphy_config_init, 442 .config_init = kszphy_config_init,
421 .config_aneg = genphy_config_aneg, 443 .config_aneg = genphy_config_aneg,
422 .read_status = genphy_read_status, 444 .read_status = genphy_read_status,
445 .suspend = genphy_suspend,
446 .resume = genphy_resume,
423 .driver = { .owner = THIS_MODULE, }, 447 .driver = { .owner = THIS_MODULE, },
424} }; 448} };
425 449
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 1f7bef90b467..7b4ff35c8bf7 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1002,7 +1002,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1002 /* Any address will do - we take the first */ 1002 /* Any address will do - we take the first */
1003 const struct in_ifaddr *ifa = in_dev->ifa_list; 1003 const struct in_ifaddr *ifa = in_dev->ifa_list;
1004 if (ifa) { 1004 if (ifa) {
1005 memcpy(eth->h_source, dev->dev_addr, 6); 1005 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1006 memset(eth->h_dest, 0xfc, 2); 1006 memset(eth->h_dest, 0xfc, 2);
1007 memcpy(eth->h_dest+2, &ifa->ifa_address, 4); 1007 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1008 } 1008 }
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 8d5cac2d8e33..df507e6dbb9c 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -640,10 +640,10 @@ static void catc_set_multicast_list(struct net_device *netdev)
640{ 640{
641 struct catc *catc = netdev_priv(netdev); 641 struct catc *catc = netdev_priv(netdev);
642 struct netdev_hw_addr *ha; 642 struct netdev_hw_addr *ha;
643 u8 broadcast[6]; 643 u8 broadcast[ETH_ALEN];
644 u8 rx = RxEnable | RxPolarity | RxMultiCast; 644 u8 rx = RxEnable | RxPolarity | RxMultiCast;
645 645
646 memset(broadcast, 0xff, 6); 646 memset(broadcast, 0xff, ETH_ALEN);
647 memset(catc->multicast, 0, 64); 647 memset(catc->multicast, 0, 64);
648 648
649 catc_multicast(broadcast, catc->multicast); 649 catc_multicast(broadcast, catc->multicast);
@@ -778,7 +778,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
778 struct usb_device *usbdev = interface_to_usbdev(intf); 778 struct usb_device *usbdev = interface_to_usbdev(intf);
779 struct net_device *netdev; 779 struct net_device *netdev;
780 struct catc *catc; 780 struct catc *catc;
781 u8 broadcast[6]; 781 u8 broadcast[ETH_ALEN];
782 int i, pktsz; 782 int i, pktsz;
783 783
784 if (usb_set_interface(usbdev, 784 if (usb_set_interface(usbdev,
@@ -882,7 +882,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
882 882
883 dev_dbg(dev, "Filling the multicast list.\n"); 883 dev_dbg(dev, "Filling the multicast list.\n");
884 884
885 memset(broadcast, 0xff, 6); 885 memset(broadcast, 0xff, ETH_ALEN);
886 catc_multicast(broadcast, catc->multicast); 886 catc_multicast(broadcast, catc->multicast);
887 catc_multicast(netdev->dev_addr, catc->multicast); 887 catc_multicast(netdev->dev_addr, catc->multicast);
888 catc_write_mem(catc, 0xfa80, catc->multicast, 64); 888 catc_write_mem(catc, 0xfa80, catc->multicast, 64);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 7d78669000d7..6358d420e185 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -328,7 +328,7 @@ MODULE_DEVICE_TABLE(usb, usbpn_ids);
328 328
329static struct usb_driver usbpn_driver; 329static struct usb_driver usbpn_driver;
330 330
331int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) 331static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
332{ 332{
333 static const char ifname[] = "usbpn%d"; 333 static const char ifname[] = "usbpn%d";
334 const struct usb_cdc_union_desc *union_header = NULL; 334 const struct usb_cdc_union_desc *union_header = NULL;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 818ce90185b5..e0a4a2b08e45 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -143,16 +143,22 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
143 .ndo_validate_addr = eth_validate_addr, 143 .ndo_validate_addr = eth_validate_addr,
144}; 144};
145 145
146/* using a counter to merge subdriver requests with our own into a combined state */ 146/* using a counter to merge subdriver requests with our own into a
147 * combined state
148 */
147static int qmi_wwan_manage_power(struct usbnet *dev, int on) 149static int qmi_wwan_manage_power(struct usbnet *dev, int on)
148{ 150{
149 struct qmi_wwan_state *info = (void *)&dev->data; 151 struct qmi_wwan_state *info = (void *)&dev->data;
150 int rv = 0; 152 int rv = 0;
151 153
152 dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on); 154 dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__,
155 atomic_read(&info->pmcount), on);
153 156
154 if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) { 157 if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
155 /* need autopm_get/put here to ensure the usbcore sees the new value */ 158 (!on && atomic_dec_and_test(&info->pmcount))) {
159 /* need autopm_get/put here to ensure the usbcore sees
160 * the new value
161 */
156 rv = usb_autopm_get_interface(dev->intf); 162 rv = usb_autopm_get_interface(dev->intf);
157 if (rv < 0) 163 if (rv < 0)
158 goto err; 164 goto err;
@@ -199,7 +205,8 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
199 atomic_set(&info->pmcount, 0); 205 atomic_set(&info->pmcount, 0);
200 206
201 /* register subdriver */ 207 /* register subdriver */
202 subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power); 208 subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
209 4096, &qmi_wwan_cdc_wdm_manage_power);
203 if (IS_ERR(subdriver)) { 210 if (IS_ERR(subdriver)) {
204 dev_err(&info->control->dev, "subdriver registration failed\n"); 211 dev_err(&info->control->dev, "subdriver registration failed\n");
205 rv = PTR_ERR(subdriver); 212 rv = PTR_ERR(subdriver);
@@ -228,7 +235,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
228 struct usb_driver *driver = driver_of(intf); 235 struct usb_driver *driver = driver_of(intf);
229 struct qmi_wwan_state *info = (void *)&dev->data; 236 struct qmi_wwan_state *info = (void *)&dev->data;
230 237
231 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); 238 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) <
239 sizeof(struct qmi_wwan_state)));
232 240
233 /* set up initial state */ 241 /* set up initial state */
234 info->control = intf; 242 info->control = intf;
@@ -250,7 +258,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
250 goto err; 258 goto err;
251 } 259 }
252 if (h->bLength != sizeof(struct usb_cdc_header_desc)) { 260 if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
253 dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength); 261 dev_dbg(&intf->dev, "CDC header len %u\n",
262 h->bLength);
254 goto err; 263 goto err;
255 } 264 }
256 break; 265 break;
@@ -260,7 +269,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
260 goto err; 269 goto err;
261 } 270 }
262 if (h->bLength != sizeof(struct usb_cdc_union_desc)) { 271 if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
263 dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength); 272 dev_dbg(&intf->dev, "CDC union len %u\n",
273 h->bLength);
264 goto err; 274 goto err;
265 } 275 }
266 cdc_union = (struct usb_cdc_union_desc *)buf; 276 cdc_union = (struct usb_cdc_union_desc *)buf;
@@ -271,15 +281,15 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
271 goto err; 281 goto err;
272 } 282 }
273 if (h->bLength != sizeof(struct usb_cdc_ether_desc)) { 283 if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
274 dev_dbg(&intf->dev, "CDC ether len %u\n", h->bLength); 284 dev_dbg(&intf->dev, "CDC ether len %u\n",
285 h->bLength);
275 goto err; 286 goto err;
276 } 287 }
277 cdc_ether = (struct usb_cdc_ether_desc *)buf; 288 cdc_ether = (struct usb_cdc_ether_desc *)buf;
278 break; 289 break;
279 } 290 }
280 291
281 /* 292 /* Remember which CDC functional descriptors we've seen. Works
282 * Remember which CDC functional descriptors we've seen. Works
283 * for all types we care about, of which USB_CDC_ETHERNET_TYPE 293 * for all types we care about, of which USB_CDC_ETHERNET_TYPE
284 * (0x0f) is the highest numbered 294 * (0x0f) is the highest numbered
285 */ 295 */
@@ -293,10 +303,14 @@ next_desc:
293 303
294 /* Use separate control and data interfaces if we found a CDC Union */ 304 /* Use separate control and data interfaces if we found a CDC Union */
295 if (cdc_union) { 305 if (cdc_union) {
296 info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); 306 info->data = usb_ifnum_to_if(dev->udev,
297 if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) { 307 cdc_union->bSlaveInterface0);
298 dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n", 308 if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 ||
299 cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0); 309 !info->data) {
310 dev_err(&intf->dev,
311 "bogus CDC Union: master=%u, slave=%u\n",
312 cdc_union->bMasterInterface0,
313 cdc_union->bSlaveInterface0);
300 goto err; 314 goto err;
301 } 315 }
302 } 316 }
@@ -374,8 +388,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
374 struct qmi_wwan_state *info = (void *)&dev->data; 388 struct qmi_wwan_state *info = (void *)&dev->data;
375 int ret; 389 int ret;
376 390
377 /* 391 /* Both usbnet_suspend() and subdriver->suspend() MUST return 0
378 * Both usbnet_suspend() and subdriver->suspend() MUST return 0
379 * in system sleep context, otherwise, the resume callback has 392 * in system sleep context, otherwise, the resume callback has
380 * to recover device from previous suspend failure. 393 * to recover device from previous suspend failure.
381 */ 394 */
@@ -383,7 +396,8 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
383 if (ret < 0) 396 if (ret < 0)
384 goto err; 397 goto err;
385 398
386 if (intf == info->control && info->subdriver && info->subdriver->suspend) 399 if (intf == info->control && info->subdriver &&
400 info->subdriver->suspend)
387 ret = info->subdriver->suspend(intf, message); 401 ret = info->subdriver->suspend(intf, message);
388 if (ret < 0) 402 if (ret < 0)
389 usbnet_resume(intf); 403 usbnet_resume(intf);
@@ -396,7 +410,8 @@ static int qmi_wwan_resume(struct usb_interface *intf)
396 struct usbnet *dev = usb_get_intfdata(intf); 410 struct usbnet *dev = usb_get_intfdata(intf);
397 struct qmi_wwan_state *info = (void *)&dev->data; 411 struct qmi_wwan_state *info = (void *)&dev->data;
398 int ret = 0; 412 int ret = 0;
399 bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume); 413 bool callsub = (intf == info->control && info->subdriver &&
414 info->subdriver->resume);
400 415
401 if (callsub) 416 if (callsub)
402 ret = info->subdriver->resume(intf); 417 ret = info->subdriver->resume(intf);
@@ -714,6 +729,7 @@ static const struct usb_device_id products[] = {
714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 729 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 730 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 731 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
732 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
717 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 733 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
718 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 734 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
719 735
@@ -777,7 +793,8 @@ static const struct usb_device_id products[] = {
777}; 793};
778MODULE_DEVICE_TABLE(usb, products); 794MODULE_DEVICE_TABLE(usb, products);
779 795
780static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod) 796static int qmi_wwan_probe(struct usb_interface *intf,
797 const struct usb_device_id *prod)
781{ 798{
782 struct usb_device_id *id = (struct usb_device_id *)prod; 799 struct usb_device_id *id = (struct usb_device_id *)prod;
783 800
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index eee1f19ef1e9..b2d034791e15 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -188,6 +188,11 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
188 return tot; 188 return tot;
189} 189}
190 190
191/* fake multicast ability */
192static void veth_set_multicast_list(struct net_device *dev)
193{
194}
195
191static int veth_open(struct net_device *dev) 196static int veth_open(struct net_device *dev)
192{ 197{
193 struct veth_priv *priv = netdev_priv(dev); 198 struct veth_priv *priv = netdev_priv(dev);
@@ -250,6 +255,7 @@ static const struct net_device_ops veth_netdev_ops = {
250 .ndo_start_xmit = veth_xmit, 255 .ndo_start_xmit = veth_xmit,
251 .ndo_change_mtu = veth_change_mtu, 256 .ndo_change_mtu = veth_change_mtu,
252 .ndo_get_stats64 = veth_get_stats64, 257 .ndo_get_stats64 = veth_get_stats64,
258 .ndo_set_rx_mode = veth_set_multicast_list,
253 .ndo_set_mac_address = eth_mac_addr, 259 .ndo_set_mac_address = eth_mac_addr,
254}; 260};
255 261
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a03f358fd58b..12040a35d95d 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -410,9 +410,9 @@ int
410vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 410vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
411 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); 411 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
412 412
413extern void vmxnet3_set_ethtool_ops(struct net_device *netdev); 413void vmxnet3_set_ethtool_ops(struct net_device *netdev);
414 414
415extern struct rtnl_link_stats64 * 415struct rtnl_link_stats64 *
416vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); 416vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
417 417
418extern char vmxnet3_driver_name[]; 418extern char vmxnet3_driver_name[];
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2ef5b6219f3f..da8479479d01 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2087,7 +2087,7 @@ static void vxlan_setup(struct net_device *dev)
2087 vxlan->age_timer.function = vxlan_cleanup; 2087 vxlan->age_timer.function = vxlan_cleanup;
2088 vxlan->age_timer.data = (unsigned long) vxlan; 2088 vxlan->age_timer.data = (unsigned long) vxlan;
2089 2089
2090 inet_get_local_port_range(&low, &high); 2090 inet_get_local_port_range(dev_net(dev), &low, &high);
2091 vxlan->port_min = low; 2091 vxlan->port_min = low;
2092 vxlan->port_max = high; 2092 vxlan->port_max = high;
2093 vxlan->dst_port = htons(vxlan_port); 2093 vxlan->dst_port = htons(vxlan_port);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 3d80e4267de8..3d741663fd67 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -220,7 +220,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
220 /* We want a fast IRQ for this device. Actually we'd like an even faster 220 /* We want a fast IRQ for this device. Actually we'd like an even faster
221 IRQ ;) - This is one driver RtLinux is made for */ 221 IRQ ;) - This is one driver RtLinux is made for */
222 222
223 if (request_irq(irq, z8530_interrupt, IRQF_DISABLED, 223 if (request_irq(irq, z8530_interrupt, 0,
224 "Hostess SV11", sv) < 0) { 224 "Hostess SV11", sv) < 0) {
225 pr_warn("IRQ %d already in use\n", irq); 225 pr_warn("IRQ %d already in use\n", irq);
226 goto err_irq; 226 goto err_irq;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 4f7748478984..27860b4f5908 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -266,7 +266,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
266 /* We want a fast IRQ for this device. Actually we'd like an even faster 266 /* We want a fast IRQ for this device. Actually we'd like an even faster
267 IRQ ;) - This is one driver RtLinux is made for */ 267 IRQ ;) - This is one driver RtLinux is made for */
268 268
269 if (request_irq(irq, z8530_interrupt, IRQF_DISABLED, 269 if (request_irq(irq, z8530_interrupt, 0,
270 "SeaLevel", dev) < 0) { 270 "SeaLevel", dev) < 0) {
271 pr_warn("IRQ %d already in use\n", irq); 271 pr_warn("IRQ %d already in use\n", irq);
272 goto err_request_irq; 272 goto err_request_irq;
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
index 8f0fc2e57e2b..f57ee67836ae 100644
--- a/drivers/net/wan/x25_asy.h
+++ b/drivers/net/wan/x25_asy.h
@@ -41,6 +41,6 @@ struct x25_asy {
41 41
42#define X25_ASY_MAGIC 0x5303 42#define X25_ASY_MAGIC 0x5303
43 43
44extern int x25_asy_init(struct net_device *dev); 44int x25_asy_init(struct net_device *dev);
45 45
46#endif /* _LINUX_X25_ASY.H */ 46#endif /* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index f29d554fc07d..2416a9d60bd6 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -395,20 +395,19 @@ struct z8530_dev
395extern u8 z8530_dead_port[]; 395extern u8 z8530_dead_port[];
396extern u8 z8530_hdlc_kilostream_85230[]; 396extern u8 z8530_hdlc_kilostream_85230[];
397extern u8 z8530_hdlc_kilostream[]; 397extern u8 z8530_hdlc_kilostream[];
398extern irqreturn_t z8530_interrupt(int, void *); 398irqreturn_t z8530_interrupt(int, void *);
399extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io); 399void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
400extern int z8530_init(struct z8530_dev *); 400int z8530_init(struct z8530_dev *);
401extern int z8530_shutdown(struct z8530_dev *); 401int z8530_shutdown(struct z8530_dev *);
402extern int z8530_sync_open(struct net_device *, struct z8530_channel *); 402int z8530_sync_open(struct net_device *, struct z8530_channel *);
403extern int z8530_sync_close(struct net_device *, struct z8530_channel *); 403int z8530_sync_close(struct net_device *, struct z8530_channel *);
404extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *); 404int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
405extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *); 405int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
406extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *); 406int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
407extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); 407int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
408extern int z8530_channel_load(struct z8530_channel *, u8 *); 408int z8530_channel_load(struct z8530_channel *, u8 *);
409extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, 409netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
410 struct sk_buff *skb); 410void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
411extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
412 411
413 412
414/* 413/*
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 9f1e947f3557..649ecad6844c 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -256,21 +256,20 @@ void i2400mu_init(struct i2400mu *i2400mu)
256 i2400mu->rx_size_auto_shrink = 1; 256 i2400mu->rx_size_auto_shrink = 1;
257} 257}
258 258
259extern int i2400mu_notification_setup(struct i2400mu *); 259int i2400mu_notification_setup(struct i2400mu *);
260extern void i2400mu_notification_release(struct i2400mu *); 260void i2400mu_notification_release(struct i2400mu *);
261 261
262extern int i2400mu_rx_setup(struct i2400mu *); 262int i2400mu_rx_setup(struct i2400mu *);
263extern void i2400mu_rx_release(struct i2400mu *); 263void i2400mu_rx_release(struct i2400mu *);
264extern void i2400mu_rx_kick(struct i2400mu *); 264void i2400mu_rx_kick(struct i2400mu *);
265 265
266extern int i2400mu_tx_setup(struct i2400mu *); 266int i2400mu_tx_setup(struct i2400mu *);
267extern void i2400mu_tx_release(struct i2400mu *); 267void i2400mu_tx_release(struct i2400mu *);
268extern void i2400mu_bus_tx_kick(struct i2400m *); 268void i2400mu_bus_tx_kick(struct i2400m *);
269 269
270extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *, 270ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
271 const struct i2400m_bootrom_header *, 271 const struct i2400m_bootrom_header *, size_t,
272 size_t, int); 272 int);
273extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *, 273ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
274 struct i2400m_bootrom_header *, 274 struct i2400m_bootrom_header *, size_t);
275 size_t);
276#endif /* #ifndef __I2400M_USB_H__ */ 275#endif /* #ifndef __I2400M_USB_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 79c6505b5c20..5a34e72bab9a 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -710,18 +710,18 @@ enum i2400m_bri {
710 I2400M_BRI_MAC_REINIT = 1 << 3, 710 I2400M_BRI_MAC_REINIT = 1 << 3,
711}; 711};
712 712
713extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *); 713void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
714extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri); 714int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
715extern int i2400m_read_mac_addr(struct i2400m *); 715int i2400m_read_mac_addr(struct i2400m *);
716extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri); 716int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
717extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t); 717int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
718static inline 718static inline
719int i2400m_is_d2h_barker(const void *buf) 719int i2400m_is_d2h_barker(const void *buf)
720{ 720{
721 const __le32 *barker = buf; 721 const __le32 *barker = buf;
722 return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER; 722 return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
723} 723}
724extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t); 724void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
725 725
726/* Make/grok boot-rom header commands */ 726/* Make/grok boot-rom header commands */
727 727
@@ -789,32 +789,31 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
789/* 789/*
790 * Driver / device setup and internal functions 790 * Driver / device setup and internal functions
791 */ 791 */
792extern void i2400m_init(struct i2400m *); 792void i2400m_init(struct i2400m *);
793extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type); 793int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
794extern void i2400m_netdev_setup(struct net_device *net_dev); 794void i2400m_netdev_setup(struct net_device *net_dev);
795extern int i2400m_sysfs_setup(struct device_driver *); 795int i2400m_sysfs_setup(struct device_driver *);
796extern void i2400m_sysfs_release(struct device_driver *); 796void i2400m_sysfs_release(struct device_driver *);
797extern int i2400m_tx_setup(struct i2400m *); 797int i2400m_tx_setup(struct i2400m *);
798extern void i2400m_wake_tx_work(struct work_struct *); 798void i2400m_wake_tx_work(struct work_struct *);
799extern void i2400m_tx_release(struct i2400m *); 799void i2400m_tx_release(struct i2400m *);
800 800
801extern int i2400m_rx_setup(struct i2400m *); 801int i2400m_rx_setup(struct i2400m *);
802extern void i2400m_rx_release(struct i2400m *); 802void i2400m_rx_release(struct i2400m *);
803 803
804extern void i2400m_fw_cache(struct i2400m *); 804void i2400m_fw_cache(struct i2400m *);
805extern void i2400m_fw_uncache(struct i2400m *); 805void i2400m_fw_uncache(struct i2400m *);
806 806
807extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, 807void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
808 const void *, int); 808 int);
809extern void i2400m_net_erx(struct i2400m *, struct sk_buff *, 809void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
810 enum i2400m_cs); 810void i2400m_net_wake_stop(struct i2400m *);
811extern void i2400m_net_wake_stop(struct i2400m *);
812enum i2400m_pt; 811enum i2400m_pt;
813extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt); 812int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
814 813
815#ifdef CONFIG_DEBUG_FS 814#ifdef CONFIG_DEBUG_FS
816extern int i2400m_debugfs_add(struct i2400m *); 815int i2400m_debugfs_add(struct i2400m *);
817extern void i2400m_debugfs_rm(struct i2400m *); 816void i2400m_debugfs_rm(struct i2400m *);
818#else 817#else
819static inline int i2400m_debugfs_add(struct i2400m *i2400m) 818static inline int i2400m_debugfs_add(struct i2400m *i2400m)
820{ 819{
@@ -824,8 +823,8 @@ static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
824#endif 823#endif
825 824
826/* Initialize/shutdown the device */ 825/* Initialize/shutdown the device */
827extern int i2400m_dev_initialize(struct i2400m *); 826int i2400m_dev_initialize(struct i2400m *);
828extern void i2400m_dev_shutdown(struct i2400m *); 827void i2400m_dev_shutdown(struct i2400m *);
829 828
830extern struct attribute_group i2400m_dev_attr_group; 829extern struct attribute_group i2400m_dev_attr_group;
831 830
@@ -873,21 +872,21 @@ void i2400m_put(struct i2400m *i2400m)
873 dev_put(i2400m->wimax_dev.net_dev); 872 dev_put(i2400m->wimax_dev.net_dev);
874} 873}
875 874
876extern int i2400m_dev_reset_handle(struct i2400m *, const char *); 875int i2400m_dev_reset_handle(struct i2400m *, const char *);
877extern int i2400m_pre_reset(struct i2400m *); 876int i2400m_pre_reset(struct i2400m *);
878extern int i2400m_post_reset(struct i2400m *); 877int i2400m_post_reset(struct i2400m *);
879extern void i2400m_error_recovery(struct i2400m *); 878void i2400m_error_recovery(struct i2400m *);
880 879
881/* 880/*
882 * _setup()/_release() are called by the probe/disconnect functions of 881 * _setup()/_release() are called by the probe/disconnect functions of
883 * the bus-specific drivers. 882 * the bus-specific drivers.
884 */ 883 */
885extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags); 884int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
886extern void i2400m_release(struct i2400m *); 885void i2400m_release(struct i2400m *);
887 886
888extern int i2400m_rx(struct i2400m *, struct sk_buff *); 887int i2400m_rx(struct i2400m *, struct sk_buff *);
889extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); 888struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
890extern void i2400m_tx_msg_sent(struct i2400m *); 889void i2400m_tx_msg_sent(struct i2400m *);
891 890
892 891
893/* 892/*
@@ -900,20 +899,19 @@ struct device *i2400m_dev(struct i2400m *i2400m)
900 return i2400m->wimax_dev.net_dev->dev.parent; 899 return i2400m->wimax_dev.net_dev->dev.parent;
901} 900}
902 901
903extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, 902int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
904 char *, size_t); 903int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
905extern int i2400m_msg_size_check(struct i2400m *, 904 size_t);
906 const struct i2400m_l3l4_hdr *, size_t); 905struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
907extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t); 906void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
908extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int); 907void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
909extern void i2400m_report_hook(struct i2400m *, 908 size_t);
910 const struct i2400m_l3l4_hdr *, size_t); 909void i2400m_report_hook_work(struct work_struct *);
911extern void i2400m_report_hook_work(struct work_struct *); 910int i2400m_cmd_enter_powersave(struct i2400m *);
912extern int i2400m_cmd_enter_powersave(struct i2400m *); 911int i2400m_cmd_exit_idle(struct i2400m *);
913extern int i2400m_cmd_exit_idle(struct i2400m *); 912struct sk_buff *i2400m_get_device_info(struct i2400m *);
914extern struct sk_buff *i2400m_get_device_info(struct i2400m *); 913int i2400m_firmware_check(struct i2400m *);
915extern int i2400m_firmware_check(struct i2400m *); 914int i2400m_set_idle_timeout(struct i2400m *, unsigned);
916extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
917 915
918static inline 916static inline
919struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep) 917struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
@@ -921,10 +919,9 @@ struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
921 return &iface->cur_altsetting->endpoint[ep].desc; 919 return &iface->cur_altsetting->endpoint[ep].desc;
922} 920}
923 921
924extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, 922int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
925 enum wimax_rf_state); 923void i2400m_report_tlv_rf_switches_status(struct i2400m *,
926extern void i2400m_report_tlv_rf_switches_status( 924 const struct i2400m_tlv_rf_switches_status *);
927 struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
928 925
929/* 926/*
930 * Helpers for firmware backwards compatibility 927 * Helpers for firmware backwards compatibility
@@ -968,8 +965,8 @@ void __i2400m_msleep(unsigned ms)
968 965
969 966
970/* module initialization helpers */ 967/* module initialization helpers */
971extern int i2400m_barker_db_init(const char *); 968int i2400m_barker_db_init(const char *);
972extern void i2400m_barker_db_exit(void); 969void i2400m_barker_db_exit(void);
973 970
974 971
975 972
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9a24e599dee..cfce83e1f273 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1924,7 +1924,6 @@ static int adm8211_probe(struct pci_dev *pdev,
1924 pci_iounmap(pdev, priv->map); 1924 pci_iounmap(pdev, priv->map);
1925 1925
1926 err_free_dev: 1926 err_free_dev:
1927 pci_set_drvdata(pdev, NULL);
1928 ieee80211_free_hw(dev); 1927 ieee80211_free_hw(dev);
1929 1928
1930 err_free_reg: 1929 err_free_reg:
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7fe19648f10e..edf4b57c4aaa 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5570,7 +5570,6 @@ static void airo_pci_remove(struct pci_dev *pdev)
5570 airo_print_info(dev->name, "Unregistering..."); 5570 airo_print_info(dev->name, "Unregistering...");
5571 stop_airo_card(dev, 1); 5571 stop_airo_card(dev, 1);
5572 pci_disable_device(pdev); 5572 pci_disable_device(pdev);
5573 pci_set_drvdata(pdev, NULL);
5574} 5573}
5575 5574
5576static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) 5575static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1abf1d421173..ba81d6292eeb 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -32,5 +32,6 @@ source "drivers/net/wireless/ath/ath6kl/Kconfig"
32source "drivers/net/wireless/ath/ar5523/Kconfig" 32source "drivers/net/wireless/ath/ar5523/Kconfig"
33source "drivers/net/wireless/ath/wil6210/Kconfig" 33source "drivers/net/wireless/ath/wil6210/Kconfig"
34source "drivers/net/wireless/ath/ath10k/Kconfig" 34source "drivers/net/wireless/ath/ath10k/Kconfig"
35source "drivers/net/wireless/ath/wcn36xx/Kconfig"
35 36
36endif 37endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index fb05cfd19361..363b05653c7e 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATH6KL) += ath6kl/
5obj-$(CONFIG_AR5523) += ar5523/ 5obj-$(CONFIG_AR5523) += ar5523/
6obj-$(CONFIG_WIL6210) += wil6210/ 6obj-$(CONFIG_WIL6210) += wil6210/
7obj-$(CONFIG_ATH10K) += ath10k/ 7obj-$(CONFIG_ATH10K) += ath10k/
8obj-$(CONFIG_WCN36XX) += wcn36xx/
8 9
9obj-$(CONFIG_ATH_COMMON) += ath.o 10obj-$(CONFIG_ATH_COMMON) += ath.o
10 11
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 17d7fece35d2..280fc3d53a36 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1762,6 +1762,7 @@ static struct usb_device_id ar5523_id_table[] = {
1762 AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */ 1762 AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
1763 AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */ 1763 AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
1764 AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */ 1764 AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
1765 AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
1765 AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */ 1766 AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
1766 AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */ 1767 AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
1767 AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108 1768 AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 744da6d1c405..a1f099628850 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -22,7 +22,8 @@
22 22
23void ath10k_bmi_start(struct ath10k *ar) 23void ath10k_bmi_start(struct ath10k *ar)
24{ 24{
25 ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n"); 25 ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
26
26 ar->bmi.done_sent = false; 27 ar->bmi.done_sent = false;
27} 28}
28 29
@@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
32 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); 33 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
33 int ret; 34 int ret;
34 35
36 ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
37
35 if (ar->bmi.done_sent) { 38 if (ar->bmi.done_sent) {
36 ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__); 39 ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
37 return 0; 40 return 0;
38 } 41 }
39 42
@@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
46 return ret; 49 return ret;
47 } 50 }
48 51
49 ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
50 return 0; 52 return 0;
51} 53}
52 54
@@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
59 u32 resplen = sizeof(resp.get_target_info); 61 u32 resplen = sizeof(resp.get_target_info);
60 int ret; 62 int ret;
61 63
64 ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
65
62 if (ar->bmi.done_sent) { 66 if (ar->bmi.done_sent) {
63 ath10k_warn("BMI Get Target Info Command disallowed\n"); 67 ath10k_warn("BMI Get Target Info Command disallowed\n");
64 return -EBUSY; 68 return -EBUSY;
@@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
80 84
81 target_info->version = __le32_to_cpu(resp.get_target_info.version); 85 target_info->version = __le32_to_cpu(resp.get_target_info.version);
82 target_info->type = __le32_to_cpu(resp.get_target_info.type); 86 target_info->type = __le32_to_cpu(resp.get_target_info.type);
87
83 return 0; 88 return 0;
84} 89}
85 90
@@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
92 u32 rxlen; 97 u32 rxlen;
93 int ret; 98 int ret;
94 99
100 ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
101 address, length);
102
95 if (ar->bmi.done_sent) { 103 if (ar->bmi.done_sent) {
96 ath10k_warn("command disallowed\n"); 104 ath10k_warn("command disallowed\n");
97 return -EBUSY; 105 return -EBUSY;
98 } 106 }
99 107
100 ath10k_dbg(ATH10K_DBG_CORE,
101 "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
102 __func__, ar, address, length);
103
104 while (length) { 108 while (length) {
105 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); 109 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
106 110
@@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
133 u32 txlen; 137 u32 txlen;
134 int ret; 138 int ret;
135 139
140 ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
141 address, length);
142
136 if (ar->bmi.done_sent) { 143 if (ar->bmi.done_sent) {
137 ath10k_warn("command disallowed\n"); 144 ath10k_warn("command disallowed\n");
138 return -EBUSY; 145 return -EBUSY;
139 } 146 }
140 147
141 ath10k_dbg(ATH10K_DBG_CORE,
142 "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
143 __func__, ar, address, length);
144
145 while (length) { 148 while (length) {
146 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); 149 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
147 150
@@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
180 u32 resplen = sizeof(resp.execute); 183 u32 resplen = sizeof(resp.execute);
181 int ret; 184 int ret;
182 185
186 ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
187 address, *param);
188
183 if (ar->bmi.done_sent) { 189 if (ar->bmi.done_sent) {
184 ath10k_warn("command disallowed\n"); 190 ath10k_warn("command disallowed\n");
185 return -EBUSY; 191 return -EBUSY;
186 } 192 }
187 193
188 ath10k_dbg(ATH10K_DBG_CORE,
189 "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
190 __func__, ar, address, *param);
191
192 cmd.id = __cpu_to_le32(BMI_EXECUTE); 194 cmd.id = __cpu_to_le32(BMI_EXECUTE);
193 cmd.execute.addr = __cpu_to_le32(address); 195 cmd.execute.addr = __cpu_to_le32(address);
194 cmd.execute.param = __cpu_to_le32(*param); 196 cmd.execute.param = __cpu_to_le32(*param);
@@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
216 u32 txlen; 218 u32 txlen;
217 int ret; 219 int ret;
218 220
221 ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
222 buffer, length);
223
219 if (ar->bmi.done_sent) { 224 if (ar->bmi.done_sent) {
220 ath10k_warn("command disallowed\n"); 225 ath10k_warn("command disallowed\n");
221 return -EBUSY; 226 return -EBUSY;
@@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
250 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); 255 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
251 int ret; 256 int ret;
252 257
258 ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
259 address);
260
253 if (ar->bmi.done_sent) { 261 if (ar->bmi.done_sent) {
254 ath10k_warn("command disallowed\n"); 262 ath10k_warn("command disallowed\n");
255 return -EBUSY; 263 return -EBUSY;
@@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
275 u32 trailer_len = length - head_len; 283 u32 trailer_len = length - head_len;
276 int ret; 284 int ret;
277 285
286 ath10k_dbg(ATH10K_DBG_BMI,
287 "bmi fast download address 0x%x buffer 0x%p length %d\n",
288 address, buffer, length);
289
278 ret = ath10k_bmi_lz_stream_start(ar, address); 290 ret = ath10k_bmi_lz_stream_start(ar, address);
279 if (ret) 291 if (ret)
280 return ret; 292 return ret;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f8b969f518f8..834e29ea236c 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
76 u32 ce_ctrl_addr, 76 u32 ce_ctrl_addr,
77 unsigned int n) 77 unsigned int n)
78{ 78{
79 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 79 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
80 void __iomem *indicator_addr;
81
82 if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
83 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
84 return;
85 }
86
87 /* workaround for QCA988x_1.0 HW CE */
88 indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
89
90 if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
91 iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
92 } else {
93 unsigned long irq_flags;
94 local_irq_save(irq_flags);
95 iowrite32(1, indicator_addr);
96
97 /*
98 * PCIE write waits for ACK in IPQ8K, there is no
99 * need to read back value.
100 */
101 (void)ioread32(indicator_addr);
102 (void)ioread32(indicator_addr); /* conservative */
103
104 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
105
106 iowrite32(0, indicator_addr);
107 local_irq_restore(irq_flags);
108 }
109} 80}
110 81
111static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, 82static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
285 * ath10k_ce_sendlist_send. 256 * ath10k_ce_sendlist_send.
286 * The caller takes responsibility for any needed locking. 257 * The caller takes responsibility for any needed locking.
287 */ 258 */
288static int ath10k_ce_send_nolock(struct ce_state *ce_state, 259static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
289 void *per_transfer_context, 260 void *per_transfer_context,
290 u32 buffer, 261 u32 buffer,
291 unsigned int nbytes, 262 unsigned int nbytes,
@@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
293 unsigned int flags) 264 unsigned int flags)
294{ 265{
295 struct ath10k *ar = ce_state->ar; 266 struct ath10k *ar = ce_state->ar;
296 struct ce_ring_state *src_ring = ce_state->src_ring; 267 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
297 struct ce_desc *desc, *sdesc; 268 struct ce_desc *desc, *sdesc;
298 unsigned int nentries_mask = src_ring->nentries_mask; 269 unsigned int nentries_mask = src_ring->nentries_mask;
299 unsigned int sw_index = src_ring->sw_index; 270 unsigned int sw_index = src_ring->sw_index;
@@ -306,7 +277,9 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
306 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", 277 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
307 __func__, nbytes, ce_state->src_sz_max); 278 __func__, nbytes, ce_state->src_sz_max);
308 279
309 ath10k_pci_wake(ar); 280 ret = ath10k_pci_wake(ar);
281 if (ret)
282 return ret;
310 283
311 if (unlikely(CE_RING_DELTA(nentries_mask, 284 if (unlikely(CE_RING_DELTA(nentries_mask,
312 write_index, sw_index - 1) <= 0)) { 285 write_index, sw_index - 1) <= 0)) {
@@ -346,7 +319,7 @@ exit:
346 return ret; 319 return ret;
347} 320}
348 321
349int ath10k_ce_send(struct ce_state *ce_state, 322int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
350 void *per_transfer_context, 323 void *per_transfer_context,
351 u32 buffer, 324 u32 buffer,
352 unsigned int nbytes, 325 unsigned int nbytes,
@@ -365,33 +338,19 @@ int ath10k_ce_send(struct ce_state *ce_state,
365 return ret; 338 return ret;
366} 339}
367 340
368void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer, 341int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
369 unsigned int nbytes, u32 flags)
370{
371 unsigned int num_items = sendlist->num_items;
372 struct ce_sendlist_item *item;
373
374 item = &sendlist->item[num_items];
375 item->data = buffer;
376 item->u.nbytes = nbytes;
377 item->flags = flags;
378 sendlist->num_items++;
379}
380
381int ath10k_ce_sendlist_send(struct ce_state *ce_state,
382 void *per_transfer_context, 342 void *per_transfer_context,
383 struct ce_sendlist *sendlist, 343 unsigned int transfer_id,
384 unsigned int transfer_id) 344 u32 paddr, unsigned int nbytes,
345 u32 flags)
385{ 346{
386 struct ce_ring_state *src_ring = ce_state->src_ring; 347 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
387 struct ce_sendlist_item *item;
388 struct ath10k *ar = ce_state->ar; 348 struct ath10k *ar = ce_state->ar;
389 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 349 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
390 unsigned int nentries_mask = src_ring->nentries_mask; 350 unsigned int nentries_mask = src_ring->nentries_mask;
391 unsigned int num_items = sendlist->num_items;
392 unsigned int sw_index; 351 unsigned int sw_index;
393 unsigned int write_index; 352 unsigned int write_index;
394 int i, delta, ret = -ENOMEM; 353 int delta, ret = -ENOMEM;
395 354
396 spin_lock_bh(&ar_pci->ce_lock); 355 spin_lock_bh(&ar_pci->ce_lock);
397 356
@@ -400,30 +359,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
400 359
401 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 360 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
402 361
403 if (delta >= num_items) { 362 if (delta >= 1) {
404 /*
405 * Handle all but the last item uniformly.
406 */
407 for (i = 0; i < num_items - 1; i++) {
408 item = &sendlist->item[i];
409 ret = ath10k_ce_send_nolock(ce_state,
410 CE_SENDLIST_ITEM_CTXT,
411 (u32) item->data,
412 item->u.nbytes, transfer_id,
413 item->flags |
414 CE_SEND_FLAG_GATHER);
415 if (ret)
416 ath10k_warn("CE send failed for item: %d\n", i);
417 }
418 /*
419 * Provide valid context pointer for final item.
420 */
421 item = &sendlist->item[i];
422 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, 363 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
423 (u32) item->data, item->u.nbytes, 364 paddr, nbytes,
424 transfer_id, item->flags); 365 transfer_id, flags);
425 if (ret) 366 if (ret)
426 ath10k_warn("CE send failed for last item: %d\n", i); 367 ath10k_warn("CE send failed: %d\n", ret);
427 } 368 }
428 369
429 spin_unlock_bh(&ar_pci->ce_lock); 370 spin_unlock_bh(&ar_pci->ce_lock);
@@ -431,11 +372,11 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
431 return ret; 372 return ret;
432} 373}
433 374
434int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, 375int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
435 void *per_recv_context, 376 void *per_recv_context,
436 u32 buffer) 377 u32 buffer)
437{ 378{
438 struct ce_ring_state *dest_ring = ce_state->dest_ring; 379 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
439 u32 ctrl_addr = ce_state->ctrl_addr; 380 u32 ctrl_addr = ce_state->ctrl_addr;
440 struct ath10k *ar = ce_state->ar; 381 struct ath10k *ar = ce_state->ar;
441 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 382 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -448,7 +389,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
448 write_index = dest_ring->write_index; 389 write_index = dest_ring->write_index;
449 sw_index = dest_ring->sw_index; 390 sw_index = dest_ring->sw_index;
450 391
451 ath10k_pci_wake(ar); 392 ret = ath10k_pci_wake(ar);
393 if (ret)
394 goto out;
452 395
453 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { 396 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
454 struct ce_desc *base = dest_ring->base_addr_owner_space; 397 struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -470,6 +413,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
470 ret = -EIO; 413 ret = -EIO;
471 } 414 }
472 ath10k_pci_sleep(ar); 415 ath10k_pci_sleep(ar);
416
417out:
473 spin_unlock_bh(&ar_pci->ce_lock); 418 spin_unlock_bh(&ar_pci->ce_lock);
474 419
475 return ret; 420 return ret;
@@ -479,14 +424,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
479 * Guts of ath10k_ce_completed_recv_next. 424 * Guts of ath10k_ce_completed_recv_next.
480 * The caller takes responsibility for any necessary locking. 425 * The caller takes responsibility for any necessary locking.
481 */ 426 */
482static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, 427static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
483 void **per_transfer_contextp, 428 void **per_transfer_contextp,
484 u32 *bufferp, 429 u32 *bufferp,
485 unsigned int *nbytesp, 430 unsigned int *nbytesp,
486 unsigned int *transfer_idp, 431 unsigned int *transfer_idp,
487 unsigned int *flagsp) 432 unsigned int *flagsp)
488{ 433{
489 struct ce_ring_state *dest_ring = ce_state->dest_ring; 434 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
490 unsigned int nentries_mask = dest_ring->nentries_mask; 435 unsigned int nentries_mask = dest_ring->nentries_mask;
491 unsigned int sw_index = dest_ring->sw_index; 436 unsigned int sw_index = dest_ring->sw_index;
492 437
@@ -535,7 +480,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
535 return 0; 480 return 0;
536} 481}
537 482
538int ath10k_ce_completed_recv_next(struct ce_state *ce_state, 483int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
539 void **per_transfer_contextp, 484 void **per_transfer_contextp,
540 u32 *bufferp, 485 u32 *bufferp,
541 unsigned int *nbytesp, 486 unsigned int *nbytesp,
@@ -556,11 +501,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
556 return ret; 501 return ret;
557} 502}
558 503
559int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, 504int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
560 void **per_transfer_contextp, 505 void **per_transfer_contextp,
561 u32 *bufferp) 506 u32 *bufferp)
562{ 507{
563 struct ce_ring_state *dest_ring; 508 struct ath10k_ce_ring *dest_ring;
564 unsigned int nentries_mask; 509 unsigned int nentries_mask;
565 unsigned int sw_index; 510 unsigned int sw_index;
566 unsigned int write_index; 511 unsigned int write_index;
@@ -612,19 +557,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
612 * Guts of ath10k_ce_completed_send_next. 557 * Guts of ath10k_ce_completed_send_next.
613 * The caller takes responsibility for any necessary locking. 558 * The caller takes responsibility for any necessary locking.
614 */ 559 */
615static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, 560static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
616 void **per_transfer_contextp, 561 void **per_transfer_contextp,
617 u32 *bufferp, 562 u32 *bufferp,
618 unsigned int *nbytesp, 563 unsigned int *nbytesp,
619 unsigned int *transfer_idp) 564 unsigned int *transfer_idp)
620{ 565{
621 struct ce_ring_state *src_ring = ce_state->src_ring; 566 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
622 u32 ctrl_addr = ce_state->ctrl_addr; 567 u32 ctrl_addr = ce_state->ctrl_addr;
623 struct ath10k *ar = ce_state->ar; 568 struct ath10k *ar = ce_state->ar;
624 unsigned int nentries_mask = src_ring->nentries_mask; 569 unsigned int nentries_mask = src_ring->nentries_mask;
625 unsigned int sw_index = src_ring->sw_index; 570 unsigned int sw_index = src_ring->sw_index;
571 struct ce_desc *sdesc, *sbase;
626 unsigned int read_index; 572 unsigned int read_index;
627 int ret = -EIO; 573 int ret;
628 574
629 if (src_ring->hw_index == sw_index) { 575 if (src_ring->hw_index == sw_index) {
630 /* 576 /*
@@ -634,48 +580,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
634 * the SW has really caught up to the HW, or if the cached 580 * the SW has really caught up to the HW, or if the cached
635 * value of the HW index has become stale. 581 * value of the HW index has become stale.
636 */ 582 */
637 ath10k_pci_wake(ar); 583
584 ret = ath10k_pci_wake(ar);
585 if (ret)
586 return ret;
587
638 src_ring->hw_index = 588 src_ring->hw_index =
639 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 589 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
640 src_ring->hw_index &= nentries_mask; 590 src_ring->hw_index &= nentries_mask;
591
641 ath10k_pci_sleep(ar); 592 ath10k_pci_sleep(ar);
642 } 593 }
594
643 read_index = src_ring->hw_index; 595 read_index = src_ring->hw_index;
644 596
645 if ((read_index != sw_index) && (read_index != 0xffffffff)) { 597 if ((read_index == sw_index) || (read_index == 0xffffffff))
646 struct ce_desc *sbase = src_ring->shadow_base; 598 return -EIO;
647 struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
648 599
649 /* Return data from completed source descriptor */ 600 sbase = src_ring->shadow_base;
650 *bufferp = __le32_to_cpu(sdesc->addr); 601 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
651 *nbytesp = __le16_to_cpu(sdesc->nbytes);
652 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
653 CE_DESC_FLAGS_META_DATA);
654 602
655 if (per_transfer_contextp) 603 /* Return data from completed source descriptor */
656 *per_transfer_contextp = 604 *bufferp = __le32_to_cpu(sdesc->addr);
657 src_ring->per_transfer_context[sw_index]; 605 *nbytesp = __le16_to_cpu(sdesc->nbytes);
606 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
607 CE_DESC_FLAGS_META_DATA);
658 608
659 /* sanity */ 609 if (per_transfer_contextp)
660 src_ring->per_transfer_context[sw_index] = NULL; 610 *per_transfer_contextp =
611 src_ring->per_transfer_context[sw_index];
661 612
662 /* Update sw_index */ 613 /* sanity */
663 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 614 src_ring->per_transfer_context[sw_index] = NULL;
664 src_ring->sw_index = sw_index;
665 ret = 0;
666 }
667 615
668 return ret; 616 /* Update sw_index */
617 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
618 src_ring->sw_index = sw_index;
619
620 return 0;
669} 621}
670 622
671/* NB: Modeled after ath10k_ce_completed_send_next */ 623/* NB: Modeled after ath10k_ce_completed_send_next */
672int ath10k_ce_cancel_send_next(struct ce_state *ce_state, 624int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
673 void **per_transfer_contextp, 625 void **per_transfer_contextp,
674 u32 *bufferp, 626 u32 *bufferp,
675 unsigned int *nbytesp, 627 unsigned int *nbytesp,
676 unsigned int *transfer_idp) 628 unsigned int *transfer_idp)
677{ 629{
678 struct ce_ring_state *src_ring; 630 struct ath10k_ce_ring *src_ring;
679 unsigned int nentries_mask; 631 unsigned int nentries_mask;
680 unsigned int sw_index; 632 unsigned int sw_index;
681 unsigned int write_index; 633 unsigned int write_index;
@@ -727,7 +679,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
727 return ret; 679 return ret;
728} 680}
729 681
730int ath10k_ce_completed_send_next(struct ce_state *ce_state, 682int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
731 void **per_transfer_contextp, 683 void **per_transfer_contextp,
732 u32 *bufferp, 684 u32 *bufferp,
733 unsigned int *nbytesp, 685 unsigned int *nbytesp,
@@ -756,53 +708,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
756void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) 708void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
757{ 709{
758 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 710 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
759 struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; 711 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
760 u32 ctrl_addr = ce_state->ctrl_addr; 712 u32 ctrl_addr = ce_state->ctrl_addr;
761 void *transfer_context; 713 int ret;
762 u32 buf; 714
763 unsigned int nbytes; 715 ret = ath10k_pci_wake(ar);
764 unsigned int id; 716 if (ret)
765 unsigned int flags; 717 return;
766 718
767 ath10k_pci_wake(ar);
768 spin_lock_bh(&ar_pci->ce_lock); 719 spin_lock_bh(&ar_pci->ce_lock);
769 720
770 /* Clear the copy-complete interrupts that will be handled here. */ 721 /* Clear the copy-complete interrupts that will be handled here. */
771 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, 722 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
772 HOST_IS_COPY_COMPLETE_MASK); 723 HOST_IS_COPY_COMPLETE_MASK);
773 724
774 if (ce_state->recv_cb) { 725 spin_unlock_bh(&ar_pci->ce_lock);
775 /*
776 * Pop completed recv buffers and call the registered
777 * recv callback for each
778 */
779 while (ath10k_ce_completed_recv_next_nolock(ce_state,
780 &transfer_context,
781 &buf, &nbytes,
782 &id, &flags) == 0) {
783 spin_unlock_bh(&ar_pci->ce_lock);
784 ce_state->recv_cb(ce_state, transfer_context, buf,
785 nbytes, id, flags);
786 spin_lock_bh(&ar_pci->ce_lock);
787 }
788 }
789 726
790 if (ce_state->send_cb) { 727 if (ce_state->recv_cb)
791 /* 728 ce_state->recv_cb(ce_state);
792 * Pop completed send buffers and call the registered 729
793 * send callback for each 730 if (ce_state->send_cb)
794 */ 731 ce_state->send_cb(ce_state);
795 while (ath10k_ce_completed_send_next_nolock(ce_state, 732
796 &transfer_context, 733 spin_lock_bh(&ar_pci->ce_lock);
797 &buf,
798 &nbytes,
799 &id) == 0) {
800 spin_unlock_bh(&ar_pci->ce_lock);
801 ce_state->send_cb(ce_state, transfer_context,
802 buf, nbytes, id);
803 spin_lock_bh(&ar_pci->ce_lock);
804 }
805 }
806 734
807 /* 735 /*
808 * Misc CE interrupts are not being handled, but still need 736 * Misc CE interrupts are not being handled, but still need
@@ -823,10 +751,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
823void ath10k_ce_per_engine_service_any(struct ath10k *ar) 751void ath10k_ce_per_engine_service_any(struct ath10k *ar)
824{ 752{
825 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 753 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
826 int ce_id; 754 int ce_id, ret;
827 u32 intr_summary; 755 u32 intr_summary;
828 756
829 ath10k_pci_wake(ar); 757 ret = ath10k_pci_wake(ar);
758 if (ret)
759 return;
760
830 intr_summary = CE_INTERRUPT_SUMMARY(ar); 761 intr_summary = CE_INTERRUPT_SUMMARY(ar);
831 762
832 for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) { 763 for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@@ -849,13 +780,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
849 * 780 *
850 * Called with ce_lock held. 781 * Called with ce_lock held.
851 */ 782 */
852static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state, 783static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
853 int disable_copy_compl_intr) 784 int disable_copy_compl_intr)
854{ 785{
855 u32 ctrl_addr = ce_state->ctrl_addr; 786 u32 ctrl_addr = ce_state->ctrl_addr;
856 struct ath10k *ar = ce_state->ar; 787 struct ath10k *ar = ce_state->ar;
788 int ret;
857 789
858 ath10k_pci_wake(ar); 790 ret = ath10k_pci_wake(ar);
791 if (ret)
792 return;
859 793
860 if ((!disable_copy_compl_intr) && 794 if ((!disable_copy_compl_intr) &&
861 (ce_state->send_cb || ce_state->recv_cb)) 795 (ce_state->send_cb || ce_state->recv_cb))
@@ -871,11 +805,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
871void ath10k_ce_disable_interrupts(struct ath10k *ar) 805void ath10k_ce_disable_interrupts(struct ath10k *ar)
872{ 806{
873 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 807 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
874 int ce_id; 808 int ce_id, ret;
809
810 ret = ath10k_pci_wake(ar);
811 if (ret)
812 return;
875 813
876 ath10k_pci_wake(ar);
877 for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) { 814 for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
878 struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; 815 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
879 u32 ctrl_addr = ce_state->ctrl_addr; 816 u32 ctrl_addr = ce_state->ctrl_addr;
880 817
881 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); 818 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@@ -883,12 +820,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
883 ath10k_pci_sleep(ar); 820 ath10k_pci_sleep(ar);
884} 821}
885 822
886void ath10k_ce_send_cb_register(struct ce_state *ce_state, 823void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
887 void (*send_cb) (struct ce_state *ce_state, 824 void (*send_cb)(struct ath10k_ce_pipe *),
888 void *transfer_context,
889 u32 buffer,
890 unsigned int nbytes,
891 unsigned int transfer_id),
892 int disable_interrupts) 825 int disable_interrupts)
893{ 826{
894 struct ath10k *ar = ce_state->ar; 827 struct ath10k *ar = ce_state->ar;
@@ -900,13 +833,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,
900 spin_unlock_bh(&ar_pci->ce_lock); 833 spin_unlock_bh(&ar_pci->ce_lock);
901} 834}
902 835
903void ath10k_ce_recv_cb_register(struct ce_state *ce_state, 836void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
904 void (*recv_cb) (struct ce_state *ce_state, 837 void (*recv_cb)(struct ath10k_ce_pipe *))
905 void *transfer_context,
906 u32 buffer,
907 unsigned int nbytes,
908 unsigned int transfer_id,
909 unsigned int flags))
910{ 838{
911 struct ath10k *ar = ce_state->ar; 839 struct ath10k *ar = ce_state->ar;
912 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 840 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -919,11 +847,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
919 847
920static int ath10k_ce_init_src_ring(struct ath10k *ar, 848static int ath10k_ce_init_src_ring(struct ath10k *ar,
921 unsigned int ce_id, 849 unsigned int ce_id,
922 struct ce_state *ce_state, 850 struct ath10k_ce_pipe *ce_state,
923 const struct ce_attr *attr) 851 const struct ce_attr *attr)
924{ 852{
925 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 853 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
926 struct ce_ring_state *src_ring; 854 struct ath10k_ce_ring *src_ring;
927 unsigned int nentries = attr->src_nentries; 855 unsigned int nentries = attr->src_nentries;
928 unsigned int ce_nbytes; 856 unsigned int ce_nbytes;
929 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 857 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -937,19 +865,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
937 return 0; 865 return 0;
938 } 866 }
939 867
940 ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); 868 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
941 ptr = kzalloc(ce_nbytes, GFP_KERNEL); 869 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
942 if (ptr == NULL) 870 if (ptr == NULL)
943 return -ENOMEM; 871 return -ENOMEM;
944 872
945 ce_state->src_ring = (struct ce_ring_state *)ptr; 873 ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
946 src_ring = ce_state->src_ring; 874 src_ring = ce_state->src_ring;
947 875
948 ptr += sizeof(struct ce_ring_state); 876 ptr += sizeof(struct ath10k_ce_ring);
949 src_ring->nentries = nentries; 877 src_ring->nentries = nentries;
950 src_ring->nentries_mask = nentries - 1; 878 src_ring->nentries_mask = nentries - 1;
951 879
952 ath10k_pci_wake(ar);
953 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 880 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
954 src_ring->sw_index &= src_ring->nentries_mask; 881 src_ring->sw_index &= src_ring->nentries_mask;
955 src_ring->hw_index = src_ring->sw_index; 882 src_ring->hw_index = src_ring->sw_index;
@@ -957,7 +884,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
957 src_ring->write_index = 884 src_ring->write_index =
958 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); 885 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
959 src_ring->write_index &= src_ring->nentries_mask; 886 src_ring->write_index &= src_ring->nentries_mask;
960 ath10k_pci_sleep(ar);
961 887
962 src_ring->per_transfer_context = (void **)ptr; 888 src_ring->per_transfer_context = (void **)ptr;
963 889
@@ -970,6 +896,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
970 (nentries * sizeof(struct ce_desc) + 896 (nentries * sizeof(struct ce_desc) +
971 CE_DESC_RING_ALIGN), 897 CE_DESC_RING_ALIGN),
972 &base_addr); 898 &base_addr);
899 if (!src_ring->base_addr_owner_space_unaligned) {
900 kfree(ce_state->src_ring);
901 ce_state->src_ring = NULL;
902 return -ENOMEM;
903 }
904
973 src_ring->base_addr_ce_space_unaligned = base_addr; 905 src_ring->base_addr_ce_space_unaligned = base_addr;
974 906
975 src_ring->base_addr_owner_space = PTR_ALIGN( 907 src_ring->base_addr_owner_space = PTR_ALIGN(
@@ -986,12 +918,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
986 src_ring->shadow_base_unaligned = 918 src_ring->shadow_base_unaligned =
987 kmalloc((nentries * sizeof(struct ce_desc) + 919 kmalloc((nentries * sizeof(struct ce_desc) +
988 CE_DESC_RING_ALIGN), GFP_KERNEL); 920 CE_DESC_RING_ALIGN), GFP_KERNEL);
921 if (!src_ring->shadow_base_unaligned) {
922 pci_free_consistent(ar_pci->pdev,
923 (nentries * sizeof(struct ce_desc) +
924 CE_DESC_RING_ALIGN),
925 src_ring->base_addr_owner_space,
926 src_ring->base_addr_ce_space);
927 kfree(ce_state->src_ring);
928 ce_state->src_ring = NULL;
929 return -ENOMEM;
930 }
989 931
990 src_ring->shadow_base = PTR_ALIGN( 932 src_ring->shadow_base = PTR_ALIGN(
991 src_ring->shadow_base_unaligned, 933 src_ring->shadow_base_unaligned,
992 CE_DESC_RING_ALIGN); 934 CE_DESC_RING_ALIGN);
993 935
994 ath10k_pci_wake(ar);
995 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 936 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
996 src_ring->base_addr_ce_space); 937 src_ring->base_addr_ce_space);
997 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); 938 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@@ -999,18 +940,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
999 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); 940 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1000 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); 941 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1001 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); 942 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1002 ath10k_pci_sleep(ar); 943
944 ath10k_dbg(ATH10K_DBG_BOOT,
945 "boot ce src ring id %d entries %d base_addr %p\n",
946 ce_id, nentries, src_ring->base_addr_owner_space);
1003 947
1004 return 0; 948 return 0;
1005} 949}
1006 950
1007static int ath10k_ce_init_dest_ring(struct ath10k *ar, 951static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1008 unsigned int ce_id, 952 unsigned int ce_id,
1009 struct ce_state *ce_state, 953 struct ath10k_ce_pipe *ce_state,
1010 const struct ce_attr *attr) 954 const struct ce_attr *attr)
1011{ 955{
1012 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 956 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1013 struct ce_ring_state *dest_ring; 957 struct ath10k_ce_ring *dest_ring;
1014 unsigned int nentries = attr->dest_nentries; 958 unsigned int nentries = attr->dest_nentries;
1015 unsigned int ce_nbytes; 959 unsigned int ce_nbytes;
1016 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 960 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -1024,25 +968,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1024 return 0; 968 return 0;
1025 } 969 }
1026 970
1027 ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); 971 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
1028 ptr = kzalloc(ce_nbytes, GFP_KERNEL); 972 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
1029 if (ptr == NULL) 973 if (ptr == NULL)
1030 return -ENOMEM; 974 return -ENOMEM;
1031 975
1032 ce_state->dest_ring = (struct ce_ring_state *)ptr; 976 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
1033 dest_ring = ce_state->dest_ring; 977 dest_ring = ce_state->dest_ring;
1034 978
1035 ptr += sizeof(struct ce_ring_state); 979 ptr += sizeof(struct ath10k_ce_ring);
1036 dest_ring->nentries = nentries; 980 dest_ring->nentries = nentries;
1037 dest_ring->nentries_mask = nentries - 1; 981 dest_ring->nentries_mask = nentries - 1;
1038 982
1039 ath10k_pci_wake(ar);
1040 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); 983 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1041 dest_ring->sw_index &= dest_ring->nentries_mask; 984 dest_ring->sw_index &= dest_ring->nentries_mask;
1042 dest_ring->write_index = 985 dest_ring->write_index =
1043 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); 986 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1044 dest_ring->write_index &= dest_ring->nentries_mask; 987 dest_ring->write_index &= dest_ring->nentries_mask;
1045 ath10k_pci_sleep(ar);
1046 988
1047 dest_ring->per_transfer_context = (void **)ptr; 989 dest_ring->per_transfer_context = (void **)ptr;
1048 990
@@ -1055,6 +997,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1055 (nentries * sizeof(struct ce_desc) + 997 (nentries * sizeof(struct ce_desc) +
1056 CE_DESC_RING_ALIGN), 998 CE_DESC_RING_ALIGN),
1057 &base_addr); 999 &base_addr);
1000 if (!dest_ring->base_addr_owner_space_unaligned) {
1001 kfree(ce_state->dest_ring);
1002 ce_state->dest_ring = NULL;
1003 return -ENOMEM;
1004 }
1005
1058 dest_ring->base_addr_ce_space_unaligned = base_addr; 1006 dest_ring->base_addr_ce_space_unaligned = base_addr;
1059 1007
1060 /* 1008 /*
@@ -1071,44 +1019,35 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1071 dest_ring->base_addr_ce_space_unaligned, 1019 dest_ring->base_addr_ce_space_unaligned,
1072 CE_DESC_RING_ALIGN); 1020 CE_DESC_RING_ALIGN);
1073 1021
1074 ath10k_pci_wake(ar);
1075 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 1022 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
1076 dest_ring->base_addr_ce_space); 1023 dest_ring->base_addr_ce_space);
1077 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); 1024 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1078 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); 1025 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1079 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); 1026 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1080 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); 1027 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1081 ath10k_pci_sleep(ar); 1028
1029 ath10k_dbg(ATH10K_DBG_BOOT,
1030 "boot ce dest ring id %d entries %d base_addr %p\n",
1031 ce_id, nentries, dest_ring->base_addr_owner_space);
1082 1032
1083 return 0; 1033 return 0;
1084} 1034}
1085 1035
1086static struct ce_state *ath10k_ce_init_state(struct ath10k *ar, 1036static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1087 unsigned int ce_id, 1037 unsigned int ce_id,
1088 const struct ce_attr *attr) 1038 const struct ce_attr *attr)
1089{ 1039{
1090 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1040 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1091 struct ce_state *ce_state = NULL; 1041 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1092 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 1042 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1093 1043
1094 spin_lock_bh(&ar_pci->ce_lock); 1044 spin_lock_bh(&ar_pci->ce_lock);
1095 1045
1096 if (!ar_pci->ce_id_to_state[ce_id]) { 1046 ce_state->ar = ar;
1097 ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC); 1047 ce_state->id = ce_id;
1098 if (ce_state == NULL) { 1048 ce_state->ctrl_addr = ctrl_addr;
1099 spin_unlock_bh(&ar_pci->ce_lock); 1049 ce_state->attr_flags = attr->flags;
1100 return NULL; 1050 ce_state->src_sz_max = attr->src_sz_max;
1101 }
1102
1103 ar_pci->ce_id_to_state[ce_id] = ce_state;
1104 ce_state->ar = ar;
1105 ce_state->id = ce_id;
1106 ce_state->ctrl_addr = ctrl_addr;
1107 ce_state->state = CE_RUNNING;
1108 /* Save attribute flags */
1109 ce_state->attr_flags = attr->flags;
1110 ce_state->src_sz_max = attr->src_sz_max;
1111 }
1112 1051
1113 spin_unlock_bh(&ar_pci->ce_lock); 1052 spin_unlock_bh(&ar_pci->ce_lock);
1114 1053
@@ -1122,12 +1061,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
1122 * initialization. It may be that only one side or the other is 1061 * initialization. It may be that only one side or the other is
1123 * initialized by software/firmware. 1062 * initialized by software/firmware.
1124 */ 1063 */
1125struct ce_state *ath10k_ce_init(struct ath10k *ar, 1064struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
1126 unsigned int ce_id, 1065 unsigned int ce_id,
1127 const struct ce_attr *attr) 1066 const struct ce_attr *attr)
1128{ 1067{
1129 struct ce_state *ce_state; 1068 struct ath10k_ce_pipe *ce_state;
1130 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 1069 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1070 int ret;
1071
1072 ret = ath10k_pci_wake(ar);
1073 if (ret)
1074 return NULL;
1131 1075
1132 ce_state = ath10k_ce_init_state(ar, ce_id, attr); 1076 ce_state = ath10k_ce_init_state(ar, ce_id, attr);
1133 if (!ce_state) { 1077 if (!ce_state) {
@@ -1136,40 +1080,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
1136 } 1080 }
1137 1081
1138 if (attr->src_nentries) { 1082 if (attr->src_nentries) {
1139 if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) { 1083 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
1140 ath10k_err("Failed to initialize CE src ring for ID: %d\n", 1084 if (ret) {
1141 ce_id); 1085 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1086 ce_id, ret);
1142 ath10k_ce_deinit(ce_state); 1087 ath10k_ce_deinit(ce_state);
1143 return NULL; 1088 return NULL;
1144 } 1089 }
1145 } 1090 }
1146 1091
1147 if (attr->dest_nentries) { 1092 if (attr->dest_nentries) {
1148 if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) { 1093 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
1149 ath10k_err("Failed to initialize CE dest ring for ID: %d\n", 1094 if (ret) {
1150 ce_id); 1095 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1096 ce_id, ret);
1151 ath10k_ce_deinit(ce_state); 1097 ath10k_ce_deinit(ce_state);
1152 return NULL; 1098 return NULL;
1153 } 1099 }
1154 } 1100 }
1155 1101
1156 /* Enable CE error interrupts */ 1102 /* Enable CE error interrupts */
1157 ath10k_pci_wake(ar);
1158 ath10k_ce_error_intr_enable(ar, ctrl_addr); 1103 ath10k_ce_error_intr_enable(ar, ctrl_addr);
1104
1159 ath10k_pci_sleep(ar); 1105 ath10k_pci_sleep(ar);
1160 1106
1161 return ce_state; 1107 return ce_state;
1162} 1108}
1163 1109
1164void ath10k_ce_deinit(struct ce_state *ce_state) 1110void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
1165{ 1111{
1166 unsigned int ce_id = ce_state->id;
1167 struct ath10k *ar = ce_state->ar; 1112 struct ath10k *ar = ce_state->ar;
1168 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1113 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1169 1114
1170 ce_state->state = CE_UNUSED;
1171 ar_pci->ce_id_to_state[ce_id] = NULL;
1172
1173 if (ce_state->src_ring) { 1115 if (ce_state->src_ring) {
1174 kfree(ce_state->src_ring->shadow_base_unaligned); 1116 kfree(ce_state->src_ring->shadow_base_unaligned);
1175 pci_free_consistent(ar_pci->pdev, 1117 pci_free_consistent(ar_pci->pdev,
@@ -1190,5 +1132,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state)
1190 ce_state->dest_ring->base_addr_ce_space); 1132 ce_state->dest_ring->base_addr_ce_space);
1191 kfree(ce_state->dest_ring); 1133 kfree(ce_state->dest_ring);
1192 } 1134 }
1193 kfree(ce_state); 1135
1136 ce_state->src_ring = NULL;
1137 ce_state->dest_ring = NULL;
1194} 1138}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index c17f07c026f4..aec802868341 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -27,7 +27,6 @@
27 27
28/* Descriptor rings must be aligned to this boundary */ 28/* Descriptor rings must be aligned to this boundary */
29#define CE_DESC_RING_ALIGN 8 29#define CE_DESC_RING_ALIGN 8
30#define CE_SENDLIST_ITEMS_MAX 12
31#define CE_SEND_FLAG_GATHER 0x00010000 30#define CE_SEND_FLAG_GATHER 0x00010000
32 31
33/* 32/*
@@ -36,16 +35,9 @@
36 * how to use copy engines. 35 * how to use copy engines.
37 */ 36 */
38 37
39struct ce_state; 38struct ath10k_ce_pipe;
40 39
41 40
42/* Copy Engine operational state */
43enum ce_op_state {
44 CE_UNUSED,
45 CE_PAUSED,
46 CE_RUNNING,
47};
48
49#define CE_DESC_FLAGS_GATHER (1 << 0) 41#define CE_DESC_FLAGS_GATHER (1 << 0)
50#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) 42#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
51#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC 43#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@@ -57,8 +49,7 @@ struct ce_desc {
57 __le16 flags; /* %CE_DESC_FLAGS_ */ 49 __le16 flags; /* %CE_DESC_FLAGS_ */
58}; 50};
59 51
60/* Copy Engine Ring internal state */ 52struct ath10k_ce_ring {
61struct ce_ring_state {
62 /* Number of entries in this ring; must be power of 2 */ 53 /* Number of entries in this ring; must be power of 2 */
63 unsigned int nentries; 54 unsigned int nentries;
64 unsigned int nentries_mask; 55 unsigned int nentries_mask;
@@ -116,49 +107,20 @@ struct ce_ring_state {
116 void **per_transfer_context; 107 void **per_transfer_context;
117}; 108};
118 109
119/* Copy Engine internal state */ 110struct ath10k_ce_pipe {
120struct ce_state {
121 struct ath10k *ar; 111 struct ath10k *ar;
122 unsigned int id; 112 unsigned int id;
123 113
124 unsigned int attr_flags; 114 unsigned int attr_flags;
125 115
126 u32 ctrl_addr; 116 u32 ctrl_addr;
127 enum ce_op_state state;
128
129 void (*send_cb) (struct ce_state *ce_state,
130 void *per_transfer_send_context,
131 u32 buffer,
132 unsigned int nbytes,
133 unsigned int transfer_id);
134 void (*recv_cb) (struct ce_state *ce_state,
135 void *per_transfer_recv_context,
136 u32 buffer,
137 unsigned int nbytes,
138 unsigned int transfer_id,
139 unsigned int flags);
140 117
141 unsigned int src_sz_max; 118 void (*send_cb)(struct ath10k_ce_pipe *);
142 struct ce_ring_state *src_ring; 119 void (*recv_cb)(struct ath10k_ce_pipe *);
143 struct ce_ring_state *dest_ring;
144};
145 120
146struct ce_sendlist_item { 121 unsigned int src_sz_max;
147 /* e.g. buffer or desc list */ 122 struct ath10k_ce_ring *src_ring;
148 dma_addr_t data; 123 struct ath10k_ce_ring *dest_ring;
149 union {
150 /* simple buffer */
151 unsigned int nbytes;
152 /* Rx descriptor list */
153 unsigned int ndesc;
154 } u;
155 /* externally-specified flags; OR-ed with internal flags */
156 u32 flags;
157};
158
159struct ce_sendlist {
160 unsigned int num_items;
161 struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
162}; 124};
163 125
164/* Copy Engine settable attributes */ 126/* Copy Engine settable attributes */
@@ -182,7 +144,7 @@ struct ce_attr;
182 * 144 *
183 * Implementation note: pushes 1 buffer to Source ring 145 * Implementation note: pushes 1 buffer to Source ring
184 */ 146 */
185int ath10k_ce_send(struct ce_state *ce_state, 147int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
186 void *per_transfer_send_context, 148 void *per_transfer_send_context,
187 u32 buffer, 149 u32 buffer,
188 unsigned int nbytes, 150 unsigned int nbytes,
@@ -190,21 +152,10 @@ int ath10k_ce_send(struct ce_state *ce_state,
190 unsigned int transfer_id, 152 unsigned int transfer_id,
191 unsigned int flags); 153 unsigned int flags);
192 154
193void ath10k_ce_send_cb_register(struct ce_state *ce_state, 155void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
194 void (*send_cb) (struct ce_state *ce_state, 156 void (*send_cb)(struct ath10k_ce_pipe *),
195 void *transfer_context,
196 u32 buffer,
197 unsigned int nbytes,
198 unsigned int transfer_id),
199 int disable_interrupts); 157 int disable_interrupts);
200 158
201/* Append a simple buffer (address/length) to a sendlist. */
202void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
203 u32 buffer,
204 unsigned int nbytes,
205 /* OR-ed with internal flags */
206 u32 flags);
207
208/* 159/*
209 * Queue a "sendlist" of buffers to be sent using gather to a single 160 * Queue a "sendlist" of buffers to be sent using gather to a single
210 * anonymous destination buffer 161 * anonymous destination buffer
@@ -215,11 +166,11 @@ void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
215 * 166 *
216 * Implemenation note: Pushes multiple buffers with Gather to Source ring. 167 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
217 */ 168 */
218int ath10k_ce_sendlist_send(struct ce_state *ce_state, 169int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
219 void *per_transfer_send_context, 170 void *per_transfer_context,
220 struct ce_sendlist *sendlist, 171 unsigned int transfer_id,
221 /* 14 bits */ 172 u32 paddr, unsigned int nbytes,
222 unsigned int transfer_id); 173 u32 flags);
223 174
224/*==================Recv=======================*/ 175/*==================Recv=======================*/
225 176
@@ -233,17 +184,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
233 * 184 *
234 * Implemenation note: Pushes a buffer to Dest ring. 185 * Implemenation note: Pushes a buffer to Dest ring.
235 */ 186 */
236int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, 187int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
237 void *per_transfer_recv_context, 188 void *per_transfer_recv_context,
238 u32 buffer); 189 u32 buffer);
239 190
240void ath10k_ce_recv_cb_register(struct ce_state *ce_state, 191void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
241 void (*recv_cb) (struct ce_state *ce_state, 192 void (*recv_cb)(struct ath10k_ce_pipe *));
242 void *transfer_context,
243 u32 buffer,
244 unsigned int nbytes,
245 unsigned int transfer_id,
246 unsigned int flags));
247 193
248/* recv flags */ 194/* recv flags */
249/* Data is byte-swapped */ 195/* Data is byte-swapped */
@@ -253,7 +199,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
253 * Supply data for the next completed unprocessed receive descriptor. 199 * Supply data for the next completed unprocessed receive descriptor.
254 * Pops buffer from Dest ring. 200 * Pops buffer from Dest ring.
255 */ 201 */
256int ath10k_ce_completed_recv_next(struct ce_state *ce_state, 202int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
257 void **per_transfer_contextp, 203 void **per_transfer_contextp,
258 u32 *bufferp, 204 u32 *bufferp,
259 unsigned int *nbytesp, 205 unsigned int *nbytesp,
@@ -263,7 +209,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
263 * Supply data for the next completed unprocessed send descriptor. 209 * Supply data for the next completed unprocessed send descriptor.
264 * Pops 1 completed send buffer from Source ring. 210 * Pops 1 completed send buffer from Source ring.
265 */ 211 */
266int ath10k_ce_completed_send_next(struct ce_state *ce_state, 212int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
267 void **per_transfer_contextp, 213 void **per_transfer_contextp,
268 u32 *bufferp, 214 u32 *bufferp,
269 unsigned int *nbytesp, 215 unsigned int *nbytesp,
@@ -272,7 +218,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
272/*==================CE Engine Initialization=======================*/ 218/*==================CE Engine Initialization=======================*/
273 219
274/* Initialize an instance of a CE */ 220/* Initialize an instance of a CE */
275struct ce_state *ath10k_ce_init(struct ath10k *ar, 221struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
276 unsigned int ce_id, 222 unsigned int ce_id,
277 const struct ce_attr *attr); 223 const struct ce_attr *attr);
278 224
@@ -282,7 +228,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
282 * receive buffers. Target DMA must be stopped before using 228 * receive buffers. Target DMA must be stopped before using
283 * this API. 229 * this API.
284 */ 230 */
285int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, 231int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
286 void **per_transfer_contextp, 232 void **per_transfer_contextp,
287 u32 *bufferp); 233 u32 *bufferp);
288 234
@@ -291,13 +237,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
291 * pending sends. Target DMA must be stopped before using 237 * pending sends. Target DMA must be stopped before using
292 * this API. 238 * this API.
293 */ 239 */
294int ath10k_ce_cancel_send_next(struct ce_state *ce_state, 240int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
295 void **per_transfer_contextp, 241 void **per_transfer_contextp,
296 u32 *bufferp, 242 u32 *bufferp,
297 unsigned int *nbytesp, 243 unsigned int *nbytesp,
298 unsigned int *transfer_idp); 244 unsigned int *transfer_idp);
299 245
300void ath10k_ce_deinit(struct ce_state *ce_state); 246void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
301 247
302/*==================CE Interrupt Handlers====================*/ 248/*==================CE Interrupt Handlers====================*/
303void ath10k_ce_per_engine_service_any(struct ath10k *ar); 249void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@@ -322,9 +268,6 @@ struct ce_attr {
322 /* CE_ATTR_* values */ 268 /* CE_ATTR_* values */
323 unsigned int flags; 269 unsigned int flags;
324 270
325 /* currently not in use */
326 unsigned int priority;
327
328 /* #entries in source ring - Must be a power of 2 */ 271 /* #entries in source ring - Must be a power of 2 */
329 unsigned int src_nentries; 272 unsigned int src_nentries;
330 273
@@ -336,21 +279,8 @@ struct ce_attr {
336 279
337 /* #entries in destination ring - Must be a power of 2 */ 280 /* #entries in destination ring - Must be a power of 2 */
338 unsigned int dest_nentries; 281 unsigned int dest_nentries;
339
340 /* Future use */
341 void *reserved;
342}; 282};
343 283
344/*
345 * When using sendlist_send to transfer multiple buffer fragments, the
346 * transfer context of each fragment, except last one, will be filled
347 * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
348 * each fragment done with send and the transfer context would be
349 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
350 * status of a send completion.
351 */
352#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
353
354#define SR_BA_ADDRESS 0x0000 284#define SR_BA_ADDRESS 0x0000
355#define SR_SIZE_ADDRESS 0x0004 285#define SR_SIZE_ADDRESS 0x0004
356#define DR_BA_ADDRESS 0x0008 286#define DR_BA_ADDRESS 0x0008
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7226c23b9569..76906d5a082e 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -39,17 +39,6 @@ MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
39 39
40static const struct ath10k_hw_params ath10k_hw_params_list[] = { 40static const struct ath10k_hw_params ath10k_hw_params_list[] = {
41 { 41 {
42 .id = QCA988X_HW_1_0_VERSION,
43 .name = "qca988x hw1.0",
44 .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
45 .fw = {
46 .dir = QCA988X_HW_1_0_FW_DIR,
47 .fw = QCA988X_HW_1_0_FW_FILE,
48 .otp = QCA988X_HW_1_0_OTP_FILE,
49 .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
50 },
51 },
52 {
53 .id = QCA988X_HW_2_0_VERSION, 42 .id = QCA988X_HW_2_0_VERSION,
54 .name = "qca988x hw2.0", 43 .name = "qca988x hw2.0",
55 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, 44 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
@@ -64,7 +53,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
64 53
65static void ath10k_send_suspend_complete(struct ath10k *ar) 54static void ath10k_send_suspend_complete(struct ath10k *ar)
66{ 55{
67 ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__); 56 ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
68 57
69 ar->is_target_paused = true; 58 ar->is_target_paused = true;
70 wake_up(&ar->event_queue); 59 wake_up(&ar->event_queue);
@@ -112,7 +101,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
112 goto timeout; 101 goto timeout;
113 } 102 }
114 103
115 ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n"); 104 ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
116 return 0; 105 return 0;
117 106
118timeout: 107timeout:
@@ -214,8 +203,8 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
214 return ret; 203 return ret;
215 } 204 }
216 205
217 ath10k_dbg(ATH10K_DBG_CORE, 206 ath10k_dbg(ATH10K_DBG_BOOT,
218 "ath10k: Board extended Data download addr: 0x%x\n", 207 "boot push board extended data addr 0x%x\n",
219 board_ext_data_addr); 208 board_ext_data_addr);
220 209
221 if (board_ext_data_addr == 0) 210 if (board_ext_data_addr == 0)
@@ -446,6 +435,13 @@ static int ath10k_init_uart(struct ath10k *ar)
446 return ret; 435 return ret;
447 } 436 }
448 437
438 /* Set the UART baud rate to 19200. */
439 ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
440 if (ret) {
441 ath10k_warn("could not set the baud rate (%d)\n", ret);
442 return ret;
443 }
444
449 ath10k_info("UART prints enabled\n"); 445 ath10k_info("UART prints enabled\n");
450 return 0; 446 return 0;
451} 447}
@@ -641,6 +637,10 @@ int ath10k_core_start(struct ath10k *ar)
641 if (status) 637 if (status)
642 goto err_disconnect_htc; 638 goto err_disconnect_htc;
643 639
640 status = ath10k_debug_start(ar);
641 if (status)
642 goto err_disconnect_htc;
643
644 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; 644 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
645 645
646 return 0; 646 return 0;
@@ -658,6 +658,7 @@ EXPORT_SYMBOL(ath10k_core_start);
658 658
659void ath10k_core_stop(struct ath10k *ar) 659void ath10k_core_stop(struct ath10k *ar)
660{ 660{
661 ath10k_debug_stop(ar);
661 ath10k_htc_stop(&ar->htc); 662 ath10k_htc_stop(&ar->htc);
662 ath10k_htt_detach(&ar->htt); 663 ath10k_htt_detach(&ar->htt);
663 ath10k_wmi_detach(ar); 664 ath10k_wmi_detach(ar);
@@ -717,10 +718,46 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
717 return 0; 718 return 0;
718} 719}
719 720
720int ath10k_core_register(struct ath10k *ar) 721static int ath10k_core_check_chip_id(struct ath10k *ar)
722{
723 u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
724
725 ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
726 ar->chip_id, hw_revision);
727
728 /* Check that we are not using hw1.0 (some of them have same pci id
729 * as hw2.0) before doing anything else as ath10k crashes horribly
730 * due to missing hw1.0 workarounds. */
731 switch (hw_revision) {
732 case QCA988X_HW_1_0_CHIP_ID_REV:
733 ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
734 return -EOPNOTSUPP;
735
736 case QCA988X_HW_2_0_CHIP_ID_REV:
737 /* known hardware revision, continue normally */
738 return 0;
739
740 default:
741 ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
742 ar->chip_id);
743 return 0;
744 }
745
746 return 0;
747}
748
749int ath10k_core_register(struct ath10k *ar, u32 chip_id)
721{ 750{
722 int status; 751 int status;
723 752
753 ar->chip_id = chip_id;
754
755 status = ath10k_core_check_chip_id(ar);
756 if (status) {
757 ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
758 return status;
759 }
760
724 status = ath10k_core_probe_fw(ar); 761 status = ath10k_core_probe_fw(ar);
725 if (status) { 762 if (status) {
726 ath10k_err("could not probe fw (%d)\n", status); 763 ath10k_err("could not probe fw (%d)\n", status);
@@ -755,6 +792,7 @@ void ath10k_core_unregister(struct ath10k *ar)
755 * Otherwise we will fail to submit commands to FW and mac80211 will be 792 * Otherwise we will fail to submit commands to FW and mac80211 will be
756 * unhappy about callback failures. */ 793 * unhappy about callback failures. */
757 ath10k_mac_unregister(ar); 794 ath10k_mac_unregister(ar);
795
758 ath10k_core_free_firmware_files(ar); 796 ath10k_core_free_firmware_files(ar);
759} 797}
760EXPORT_SYMBOL(ath10k_core_unregister); 798EXPORT_SYMBOL(ath10k_core_unregister);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e4bba563ed42..292ad4577c98 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -52,18 +52,12 @@ struct ath10k_skb_cb {
52 52
53 struct { 53 struct {
54 u8 vdev_id; 54 u8 vdev_id;
55 u16 msdu_id;
56 u8 tid; 55 u8 tid;
57 bool is_offchan; 56 bool is_offchan;
58 bool is_conf;
59 bool discard;
60 bool no_ack;
61 u8 refcount;
62 struct sk_buff *txfrag;
63 struct sk_buff *msdu;
64 } __packed htt;
65 57
66 /* 4 bytes left on 64bit arch */ 58 u8 frag_len;
59 u8 pad_len;
60 } __packed htt;
67} __packed; 61} __packed;
68 62
69static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) 63static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -112,11 +106,7 @@ struct ath10k_wmi {
112 enum ath10k_htc_ep_id eid; 106 enum ath10k_htc_ep_id eid;
113 struct completion service_ready; 107 struct completion service_ready;
114 struct completion unified_ready; 108 struct completion unified_ready;
115 atomic_t pending_tx_count; 109 wait_queue_head_t tx_credits_wq;
116 wait_queue_head_t wq;
117
118 struct sk_buff_head wmi_event_list;
119 struct work_struct wmi_event_work;
120}; 110};
121 111
122struct ath10k_peer_stat { 112struct ath10k_peer_stat {
@@ -203,6 +193,7 @@ struct ath10k_vif {
203 enum wmi_vdev_subtype vdev_subtype; 193 enum wmi_vdev_subtype vdev_subtype;
204 u32 beacon_interval; 194 u32 beacon_interval;
205 u32 dtim_period; 195 u32 dtim_period;
196 struct sk_buff *beacon;
206 197
207 struct ath10k *ar; 198 struct ath10k *ar;
208 struct ieee80211_vif *vif; 199 struct ieee80211_vif *vif;
@@ -246,6 +237,9 @@ struct ath10k_debug {
246 u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; 237 u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
247 238
248 struct completion event_stats_compl; 239 struct completion event_stats_compl;
240
241 unsigned long htt_stats_mask;
242 struct delayed_work htt_stats_dwork;
249}; 243};
250 244
251enum ath10k_state { 245enum ath10k_state {
@@ -270,12 +264,21 @@ enum ath10k_state {
270 ATH10K_STATE_WEDGED, 264 ATH10K_STATE_WEDGED,
271}; 265};
272 266
267enum ath10k_fw_features {
268 /* wmi_mgmt_rx_hdr contains extra RSSI information */
269 ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
270
271 /* keep last */
272 ATH10K_FW_FEATURE_COUNT,
273};
274
273struct ath10k { 275struct ath10k {
274 struct ath_common ath_common; 276 struct ath_common ath_common;
275 struct ieee80211_hw *hw; 277 struct ieee80211_hw *hw;
276 struct device *dev; 278 struct device *dev;
277 u8 mac_addr[ETH_ALEN]; 279 u8 mac_addr[ETH_ALEN];
278 280
281 u32 chip_id;
279 u32 target_version; 282 u32 target_version;
280 u8 fw_version_major; 283 u8 fw_version_major;
281 u32 fw_version_minor; 284 u32 fw_version_minor;
@@ -288,6 +291,8 @@ struct ath10k {
288 u32 vht_cap_info; 291 u32 vht_cap_info;
289 u32 num_rf_chains; 292 u32 num_rf_chains;
290 293
294 DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
295
291 struct targetdef *targetdef; 296 struct targetdef *targetdef;
292 struct hostdef *hostdef; 297 struct hostdef *hostdef;
293 298
@@ -393,7 +398,7 @@ void ath10k_core_destroy(struct ath10k *ar);
393 398
394int ath10k_core_start(struct ath10k *ar); 399int ath10k_core_start(struct ath10k *ar);
395void ath10k_core_stop(struct ath10k *ar); 400void ath10k_core_stop(struct ath10k *ar);
396int ath10k_core_register(struct ath10k *ar); 401int ath10k_core_register(struct ath10k *ar, u32 chip_id);
397void ath10k_core_unregister(struct ath10k *ar); 402void ath10k_core_unregister(struct ath10k *ar);
398 403
399#endif /* _CORE_H_ */ 404#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 3d65594fa098..59615c7f217e 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -21,6 +21,9 @@
21#include "core.h" 21#include "core.h"
22#include "debug.h" 22#include "debug.h"
23 23
24/* ms */
25#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
26
24static int ath10k_printk(const char *level, const char *fmt, ...) 27static int ath10k_printk(const char *level, const char *fmt, ...)
25{ 28{
26 struct va_format vaf; 29 struct va_format vaf;
@@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
260 } 263 }
261 264
262 spin_unlock_bh(&ar->data_lock); 265 spin_unlock_bh(&ar->data_lock);
263 mutex_unlock(&ar->conf_mutex);
264 complete(&ar->debug.event_stats_compl); 266 complete(&ar->debug.event_stats_compl);
265} 267}
266 268
@@ -499,6 +501,136 @@ static const struct file_operations fops_simulate_fw_crash = {
499 .llseek = default_llseek, 501 .llseek = default_llseek,
500}; 502};
501 503
504static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
505 size_t count, loff_t *ppos)
506{
507 struct ath10k *ar = file->private_data;
508 unsigned int len;
509 char buf[50];
510
511 len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
512
513 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
514}
515
516static const struct file_operations fops_chip_id = {
517 .read = ath10k_read_chip_id,
518 .open = simple_open,
519 .owner = THIS_MODULE,
520 .llseek = default_llseek,
521};
522
523static int ath10k_debug_htt_stats_req(struct ath10k *ar)
524{
525 u64 cookie;
526 int ret;
527
528 lockdep_assert_held(&ar->conf_mutex);
529
530 if (ar->debug.htt_stats_mask == 0)
531 /* htt stats are disabled */
532 return 0;
533
534 if (ar->state != ATH10K_STATE_ON)
535 return 0;
536
537 cookie = get_jiffies_64();
538
539 ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
540 cookie);
541 if (ret) {
542 ath10k_warn("failed to send htt stats request: %d\n", ret);
543 return ret;
544 }
545
546 queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
547 msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
548
549 return 0;
550}
551
552static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
553{
554 struct ath10k *ar = container_of(work, struct ath10k,
555 debug.htt_stats_dwork.work);
556
557 mutex_lock(&ar->conf_mutex);
558
559 ath10k_debug_htt_stats_req(ar);
560
561 mutex_unlock(&ar->conf_mutex);
562}
563
564static ssize_t ath10k_read_htt_stats_mask(struct file *file,
565 char __user *user_buf,
566 size_t count, loff_t *ppos)
567{
568 struct ath10k *ar = file->private_data;
569 char buf[32];
570 unsigned int len;
571
572 len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
573
574 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
575}
576
577static ssize_t ath10k_write_htt_stats_mask(struct file *file,
578 const char __user *user_buf,
579 size_t count, loff_t *ppos)
580{
581 struct ath10k *ar = file->private_data;
582 unsigned long mask;
583 int ret;
584
585 ret = kstrtoul_from_user(user_buf, count, 0, &mask);
586 if (ret)
587 return ret;
588
589 /* max 8 bit masks (for now) */
590 if (mask > 0xff)
591 return -E2BIG;
592
593 mutex_lock(&ar->conf_mutex);
594
595 ar->debug.htt_stats_mask = mask;
596
597 ret = ath10k_debug_htt_stats_req(ar);
598 if (ret)
599 goto out;
600
601 ret = count;
602
603out:
604 mutex_unlock(&ar->conf_mutex);
605
606 return ret;
607}
608
609static const struct file_operations fops_htt_stats_mask = {
610 .read = ath10k_read_htt_stats_mask,
611 .write = ath10k_write_htt_stats_mask,
612 .open = simple_open,
613 .owner = THIS_MODULE,
614 .llseek = default_llseek,
615};
616
617int ath10k_debug_start(struct ath10k *ar)
618{
619 int ret;
620
621 ret = ath10k_debug_htt_stats_req(ar);
622 if (ret)
623 /* continue normally anyway, this isn't serious */
624 ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
625
626 return 0;
627}
628
629void ath10k_debug_stop(struct ath10k *ar)
630{
631 cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
632}
633
502int ath10k_debug_create(struct ath10k *ar) 634int ath10k_debug_create(struct ath10k *ar)
503{ 635{
504 ar->debug.debugfs_phy = debugfs_create_dir("ath10k", 636 ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -507,6 +639,9 @@ int ath10k_debug_create(struct ath10k *ar)
507 if (!ar->debug.debugfs_phy) 639 if (!ar->debug.debugfs_phy)
508 return -ENOMEM; 640 return -ENOMEM;
509 641
642 INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
643 ath10k_debug_htt_stats_dwork);
644
510 init_completion(&ar->debug.event_stats_compl); 645 init_completion(&ar->debug.event_stats_compl);
511 646
512 debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, 647 debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -518,8 +653,15 @@ int ath10k_debug_create(struct ath10k *ar)
518 debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy, 653 debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
519 ar, &fops_simulate_fw_crash); 654 ar, &fops_simulate_fw_crash);
520 655
656 debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
657 ar, &fops_chip_id);
658
659 debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
660 ar, &fops_htt_stats_mask);
661
521 return 0; 662 return 0;
522} 663}
664
523#endif /* CONFIG_ATH10K_DEBUGFS */ 665#endif /* CONFIG_ATH10K_DEBUGFS */
524 666
525#ifdef CONFIG_ATH10K_DEBUG 667#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 168140c54028..6576b82a8d86 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -27,21 +27,24 @@ enum ath10k_debug_mask {
27 ATH10K_DBG_HTC = 0x00000004, 27 ATH10K_DBG_HTC = 0x00000004,
28 ATH10K_DBG_HTT = 0x00000008, 28 ATH10K_DBG_HTT = 0x00000008,
29 ATH10K_DBG_MAC = 0x00000010, 29 ATH10K_DBG_MAC = 0x00000010,
30 ATH10K_DBG_CORE = 0x00000020, 30 ATH10K_DBG_BOOT = 0x00000020,
31 ATH10K_DBG_PCI_DUMP = 0x00000040, 31 ATH10K_DBG_PCI_DUMP = 0x00000040,
32 ATH10K_DBG_HTT_DUMP = 0x00000080, 32 ATH10K_DBG_HTT_DUMP = 0x00000080,
33 ATH10K_DBG_MGMT = 0x00000100, 33 ATH10K_DBG_MGMT = 0x00000100,
34 ATH10K_DBG_DATA = 0x00000200, 34 ATH10K_DBG_DATA = 0x00000200,
35 ATH10K_DBG_BMI = 0x00000400,
35 ATH10K_DBG_ANY = 0xffffffff, 36 ATH10K_DBG_ANY = 0xffffffff,
36}; 37};
37 38
38extern unsigned int ath10k_debug_mask; 39extern unsigned int ath10k_debug_mask;
39 40
40extern __printf(1, 2) int ath10k_info(const char *fmt, ...); 41__printf(1, 2) int ath10k_info(const char *fmt, ...);
41extern __printf(1, 2) int ath10k_err(const char *fmt, ...); 42__printf(1, 2) int ath10k_err(const char *fmt, ...);
42extern __printf(1, 2) int ath10k_warn(const char *fmt, ...); 43__printf(1, 2) int ath10k_warn(const char *fmt, ...);
43 44
44#ifdef CONFIG_ATH10K_DEBUGFS 45#ifdef CONFIG_ATH10K_DEBUGFS
46int ath10k_debug_start(struct ath10k *ar);
47void ath10k_debug_stop(struct ath10k *ar);
45int ath10k_debug_create(struct ath10k *ar); 48int ath10k_debug_create(struct ath10k *ar);
46void ath10k_debug_read_service_map(struct ath10k *ar, 49void ath10k_debug_read_service_map(struct ath10k *ar,
47 void *service_map, 50 void *service_map,
@@ -50,6 +53,15 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
50 struct wmi_stats_event *ev); 53 struct wmi_stats_event *ev);
51 54
52#else 55#else
56static inline int ath10k_debug_start(struct ath10k *ar)
57{
58 return 0;
59}
60
61static inline void ath10k_debug_stop(struct ath10k *ar)
62{
63}
64
53static inline int ath10k_debug_create(struct ath10k *ar) 65static inline int ath10k_debug_create(struct ath10k *ar)
54{ 66{
55 return 0; 67 return 0;
@@ -68,7 +80,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
68#endif /* CONFIG_ATH10K_DEBUGFS */ 80#endif /* CONFIG_ATH10K_DEBUGFS */
69 81
70#ifdef CONFIG_ATH10K_DEBUG 82#ifdef CONFIG_ATH10K_DEBUG
71extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, 83__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
72 const char *fmt, ...); 84 const char *fmt, ...);
73void ath10k_dbg_dump(enum ath10k_debug_mask mask, 85void ath10k_dbg_dump(enum ath10k_debug_mask mask,
74 const char *msg, const char *prefix, 86 const char *msg, const char *prefix,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index ef3329ef52f3..3118d7506734 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
103 struct ath10k_htc_hdr *hdr; 103 struct ath10k_htc_hdr *hdr;
104 104
105 hdr = (struct ath10k_htc_hdr *)skb->data; 105 hdr = (struct ath10k_htc_hdr *)skb->data;
106 memset(hdr, 0, sizeof(*hdr));
107 106
108 hdr->eid = ep->eid; 107 hdr->eid = ep->eid;
109 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); 108 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
109 hdr->flags = 0;
110 110
111 spin_lock_bh(&ep->htc->tx_lock); 111 spin_lock_bh(&ep->htc->tx_lock);
112 hdr->seq_no = ep->seq_no++; 112 hdr->seq_no = ep->seq_no++;
@@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
117 spin_unlock_bh(&ep->htc->tx_lock); 117 spin_unlock_bh(&ep->htc->tx_lock);
118} 118}
119 119
120static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
121 struct ath10k_htc_ep *ep,
122 struct sk_buff *skb,
123 u8 credits)
124{
125 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
126 int ret;
127
128 ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
129 ep->eid, skb);
130
131 ath10k_htc_prepare_tx_skb(ep, skb);
132
133 ret = ath10k_skb_map(htc->ar->dev, skb);
134 if (ret)
135 goto err;
136
137 ret = ath10k_hif_send_head(htc->ar,
138 ep->ul_pipe_id,
139 ep->eid,
140 skb->len,
141 skb);
142 if (unlikely(ret))
143 goto err;
144
145 return 0;
146err:
147 ath10k_warn("HTC issue failed: %d\n", ret);
148
149 spin_lock_bh(&htc->tx_lock);
150 ep->tx_credits += credits;
151 spin_unlock_bh(&htc->tx_lock);
152
153 /* this is the simplest way to handle out-of-resources for non-credit
154 * based endpoints. credit based endpoints can still get -ENOSR, but
155 * this is highly unlikely as credit reservation should prevent that */
156 if (ret == -ENOSR) {
157 spin_lock_bh(&htc->tx_lock);
158 __skb_queue_head(&ep->tx_queue, skb);
159 spin_unlock_bh(&htc->tx_lock);
160
161 return ret;
162 }
163
164 skb_cb->is_aborted = true;
165 ath10k_htc_notify_tx_completion(ep, skb);
166
167 return ret;
168}
169
170static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
171 struct ath10k_htc_ep *ep,
172 u8 *credits)
173{
174 struct sk_buff *skb;
175 struct ath10k_skb_cb *skb_cb;
176 int credits_required;
177 int remainder;
178 unsigned int transfer_len;
179
180 lockdep_assert_held(&htc->tx_lock);
181
182 skb = __skb_dequeue(&ep->tx_queue);
183 if (!skb)
184 return NULL;
185
186 skb_cb = ATH10K_SKB_CB(skb);
187 transfer_len = skb->len;
188
189 if (likely(transfer_len <= htc->target_credit_size)) {
190 credits_required = 1;
191 } else {
192 /* figure out how many credits this message requires */
193 credits_required = transfer_len / htc->target_credit_size;
194 remainder = transfer_len % htc->target_credit_size;
195
196 if (remainder)
197 credits_required++;
198 }
199
200 ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
201 credits_required, ep->tx_credits);
202
203 if (ep->tx_credits < credits_required) {
204 __skb_queue_head(&ep->tx_queue, skb);
205 return NULL;
206 }
207
208 ep->tx_credits -= credits_required;
209 *credits = credits_required;
210 return skb;
211}
212
213static void ath10k_htc_send_work(struct work_struct *work)
214{
215 struct ath10k_htc_ep *ep = container_of(work,
216 struct ath10k_htc_ep, send_work);
217 struct ath10k_htc *htc = ep->htc;
218 struct sk_buff *skb;
219 u8 credits = 0;
220 int ret;
221
222 while (true) {
223 if (ep->ul_is_polled)
224 ath10k_htc_send_complete_check(ep, 0);
225
226 spin_lock_bh(&htc->tx_lock);
227 if (ep->tx_credit_flow_enabled)
228 skb = ath10k_htc_get_skb_credit_based(htc, ep,
229 &credits);
230 else
231 skb = __skb_dequeue(&ep->tx_queue);
232 spin_unlock_bh(&htc->tx_lock);
233
234 if (!skb)
235 break;
236
237 ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
238 if (ret == -ENOSR)
239 break;
240 }
241}
242
243int ath10k_htc_send(struct ath10k_htc *htc, 120int ath10k_htc_send(struct ath10k_htc *htc,
244 enum ath10k_htc_ep_id eid, 121 enum ath10k_htc_ep_id eid,
245 struct sk_buff *skb) 122 struct sk_buff *skb)
246{ 123{
247 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; 124 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
125 int credits = 0;
126 int ret;
248 127
249 if (htc->ar->state == ATH10K_STATE_WEDGED) 128 if (htc->ar->state == ATH10K_STATE_WEDGED)
250 return -ECOMM; 129 return -ECOMM;
@@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
254 return -ENOENT; 133 return -ENOENT;
255 } 134 }
256 135
136 /* FIXME: This looks ugly, can we fix it? */
257 spin_lock_bh(&htc->tx_lock); 137 spin_lock_bh(&htc->tx_lock);
258 if (htc->stopped) { 138 if (htc->stopped) {
259 spin_unlock_bh(&htc->tx_lock); 139 spin_unlock_bh(&htc->tx_lock);
260 return -ESHUTDOWN; 140 return -ESHUTDOWN;
261 } 141 }
142 spin_unlock_bh(&htc->tx_lock);
262 143
263 __skb_queue_tail(&ep->tx_queue, skb);
264 skb_push(skb, sizeof(struct ath10k_htc_hdr)); 144 skb_push(skb, sizeof(struct ath10k_htc_hdr));
265 spin_unlock_bh(&htc->tx_lock);
266 145
267 queue_work(htc->ar->workqueue, &ep->send_work); 146 if (ep->tx_credit_flow_enabled) {
147 credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
148 spin_lock_bh(&htc->tx_lock);
149 if (ep->tx_credits < credits) {
150 spin_unlock_bh(&htc->tx_lock);
151 ret = -EAGAIN;
152 goto err_pull;
153 }
154 ep->tx_credits -= credits;
155 spin_unlock_bh(&htc->tx_lock);
156 }
157
158 ath10k_htc_prepare_tx_skb(ep, skb);
159
160 ret = ath10k_skb_map(htc->ar->dev, skb);
161 if (ret)
162 goto err_credits;
163
164 ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
165 skb->len, skb);
166 if (ret)
167 goto err_unmap;
168
268 return 0; 169 return 0;
170
171err_unmap:
172 ath10k_skb_unmap(htc->ar->dev, skb);
173err_credits:
174 if (ep->tx_credit_flow_enabled) {
175 spin_lock_bh(&htc->tx_lock);
176 ep->tx_credits += credits;
177 spin_unlock_bh(&htc->tx_lock);
178
179 if (ep->ep_ops.ep_tx_credits)
180 ep->ep_ops.ep_tx_credits(htc->ar);
181 }
182err_pull:
183 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
184 return ret;
269} 185}
270 186
271static int ath10k_htc_tx_completion_handler(struct ath10k *ar, 187static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
@@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
278 ath10k_htc_notify_tx_completion(ep, skb); 194 ath10k_htc_notify_tx_completion(ep, skb);
279 /* the skb now belongs to the completion handler */ 195 /* the skb now belongs to the completion handler */
280 196
281 /* note: when using TX credit flow, the re-checking of queues happens
282 * when credits flow back from the target. in the non-TX credit case,
283 * we recheck after the packet completes */
284 spin_lock_bh(&htc->tx_lock);
285 if (!ep->tx_credit_flow_enabled && !htc->stopped)
286 queue_work(ar->workqueue, &ep->send_work);
287 spin_unlock_bh(&htc->tx_lock);
288
289 return 0; 197 return 0;
290} 198}
291 199
292/* flush endpoint TX queue */
293static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
294 struct ath10k_htc_ep *ep)
295{
296 struct sk_buff *skb;
297 struct ath10k_skb_cb *skb_cb;
298
299 spin_lock_bh(&htc->tx_lock);
300 for (;;) {
301 skb = __skb_dequeue(&ep->tx_queue);
302 if (!skb)
303 break;
304
305 skb_cb = ATH10K_SKB_CB(skb);
306 skb_cb->is_aborted = true;
307 ath10k_htc_notify_tx_completion(ep, skb);
308 }
309 spin_unlock_bh(&htc->tx_lock);
310
311 cancel_work_sync(&ep->send_work);
312}
313
314/***********/ 200/***********/
315/* Receive */ 201/* Receive */
316/***********/ 202/***********/
@@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
340 ep = &htc->endpoint[report->eid]; 226 ep = &htc->endpoint[report->eid];
341 ep->tx_credits += report->credits; 227 ep->tx_credits += report->credits;
342 228
343 if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue)) 229 if (ep->ep_ops.ep_tx_credits) {
344 queue_work(htc->ar->workqueue, &ep->send_work); 230 spin_unlock_bh(&htc->tx_lock);
231 ep->ep_ops.ep_tx_credits(htc->ar);
232 spin_lock_bh(&htc->tx_lock);
233 }
345 } 234 }
346 spin_unlock_bh(&htc->tx_lock); 235 spin_unlock_bh(&htc->tx_lock);
347} 236}
@@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
599 ep->max_ep_message_len = 0; 488 ep->max_ep_message_len = 0;
600 ep->max_tx_queue_depth = 0; 489 ep->max_tx_queue_depth = 0;
601 ep->eid = i; 490 ep->eid = i;
602 skb_queue_head_init(&ep->tx_queue);
603 ep->htc = htc; 491 ep->htc = htc;
604 ep->tx_credit_flow_enabled = true; 492 ep->tx_credit_flow_enabled = true;
605 INIT_WORK(&ep->send_work, ath10k_htc_send_work);
606 } 493 }
607} 494}
608 495
@@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
752 tx_alloc = ath10k_htc_get_credit_allocation(htc, 639 tx_alloc = ath10k_htc_get_credit_allocation(htc,
753 conn_req->service_id); 640 conn_req->service_id);
754 if (!tx_alloc) 641 if (!tx_alloc)
755 ath10k_dbg(ATH10K_DBG_HTC, 642 ath10k_dbg(ATH10K_DBG_BOOT,
756 "HTC Service %s does not allocate target credits\n", 643 "boot htc service %s does not allocate target credits\n",
757 htc_service_name(conn_req->service_id)); 644 htc_service_name(conn_req->service_id));
758 645
759 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); 646 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
@@ -772,16 +659,16 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
772 659
773 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC); 660 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
774 661
775 req_msg = &msg->connect_service;
776 req_msg->flags = __cpu_to_le16(flags);
777 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
778
779 /* Only enable credit flow control for WMI ctrl service */ 662 /* Only enable credit flow control for WMI ctrl service */
780 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) { 663 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
781 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; 664 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
782 disable_credit_flow_ctrl = true; 665 disable_credit_flow_ctrl = true;
783 } 666 }
784 667
668 req_msg = &msg->connect_service;
669 req_msg->flags = __cpu_to_le16(flags);
670 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
671
785 INIT_COMPLETION(htc->ctl_resp); 672 INIT_COMPLETION(htc->ctl_resp);
786 673
787 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); 674 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
@@ -873,19 +760,19 @@ setup:
873 if (status) 760 if (status)
874 return status; 761 return status;
875 762
876 ath10k_dbg(ATH10K_DBG_HTC, 763 ath10k_dbg(ATH10K_DBG_BOOT,
877 "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n", 764 "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
878 htc_service_name(ep->service_id), ep->ul_pipe_id, 765 htc_service_name(ep->service_id), ep->ul_pipe_id,
879 ep->dl_pipe_id, ep->eid); 766 ep->dl_pipe_id, ep->eid);
880 767
881 ath10k_dbg(ATH10K_DBG_HTC, 768 ath10k_dbg(ATH10K_DBG_BOOT,
882 "EP %d UL polled: %d, DL polled: %d\n", 769 "boot htc ep %d ul polled %d dl polled %d\n",
883 ep->eid, ep->ul_is_polled, ep->dl_is_polled); 770 ep->eid, ep->ul_is_polled, ep->dl_is_polled);
884 771
885 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { 772 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
886 ep->tx_credit_flow_enabled = false; 773 ep->tx_credit_flow_enabled = false;
887 ath10k_dbg(ATH10K_DBG_HTC, 774 ath10k_dbg(ATH10K_DBG_BOOT,
888 "HTC service: %s eid: %d TX flow control disabled\n", 775 "boot htc service '%s' eid %d TX flow control disabled\n",
889 htc_service_name(ep->service_id), assigned_eid); 776 htc_service_name(ep->service_id), assigned_eid);
890 } 777 }
891 778
@@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
945 */ 832 */
946void ath10k_htc_stop(struct ath10k_htc *htc) 833void ath10k_htc_stop(struct ath10k_htc *htc)
947{ 834{
948 int i;
949 struct ath10k_htc_ep *ep;
950
951 spin_lock_bh(&htc->tx_lock); 835 spin_lock_bh(&htc->tx_lock);
952 htc->stopped = true; 836 htc->stopped = true;
953 spin_unlock_bh(&htc->tx_lock); 837 spin_unlock_bh(&htc->tx_lock);
954 838
955 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
956 ep = &htc->endpoint[i];
957 ath10k_htc_flush_endpoint_tx(htc, ep);
958 }
959
960 ath10k_hif_stop(htc->ar); 839 ath10k_hif_stop(htc->ar);
961} 840}
962 841
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e1dd8c761853..4716d331e6b6 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -276,6 +276,7 @@ struct ath10k_htc_ops {
276struct ath10k_htc_ep_ops { 276struct ath10k_htc_ep_ops {
277 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *); 277 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
278 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *); 278 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
279 void (*ep_tx_credits)(struct ath10k *);
279}; 280};
280 281
281/* service connection information */ 282/* service connection information */
@@ -315,15 +316,11 @@ struct ath10k_htc_ep {
315 int ul_is_polled; /* call HIF to get tx completions */ 316 int ul_is_polled; /* call HIF to get tx completions */
316 int dl_is_polled; /* call HIF to fetch rx (not implemented) */ 317 int dl_is_polled; /* call HIF to fetch rx (not implemented) */
317 318
318 struct sk_buff_head tx_queue;
319
320 u8 seq_no; /* for debugging */ 319 u8 seq_no; /* for debugging */
321 int tx_credits; 320 int tx_credits;
322 int tx_credit_size; 321 int tx_credit_size;
323 int tx_credits_per_max_message; 322 int tx_credits_per_max_message;
324 bool tx_credit_flow_enabled; 323 bool tx_credit_flow_enabled;
325
326 struct work_struct send_work;
327}; 324};
328 325
329struct ath10k_htc_svc_tx_credits { 326struct ath10k_htc_svc_tx_credits {
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 39342c5cfcb2..5f7eeebc5432 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,21 +104,16 @@ err_htc_attach:
104 104
105static int ath10k_htt_verify_version(struct ath10k_htt *htt) 105static int ath10k_htt_verify_version(struct ath10k_htt *htt)
106{ 106{
107 ath10k_dbg(ATH10K_DBG_HTT, 107 ath10k_info("htt target version %d.%d\n",
108 "htt target version %d.%d; host version %d.%d\n", 108 htt->target_version_major, htt->target_version_minor);
109 htt->target_version_major, 109
110 htt->target_version_minor, 110 if (htt->target_version_major != 2 &&
111 HTT_CURRENT_VERSION_MAJOR, 111 htt->target_version_major != 3) {
112 HTT_CURRENT_VERSION_MINOR); 112 ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
113 113 htt->target_version_major);
114 if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
115 ath10k_err("htt major versions are incompatible!\n");
116 return -ENOTSUPP; 114 return -ENOTSUPP;
117 } 115 }
118 116
119 if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
120 ath10k_warn("htt minor version differ but still compatible\n");
121
122 return 0; 117 return 0;
123} 118}
124 119
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 318be4629cde..1a337e93b7e9 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -19,13 +19,11 @@
19#define _HTT_H_ 19#define _HTT_H_
20 20
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/interrupt.h>
22 23
23#include "htc.h" 24#include "htc.h"
24#include "rx_desc.h" 25#include "rx_desc.h"
25 26
26#define HTT_CURRENT_VERSION_MAJOR 2
27#define HTT_CURRENT_VERSION_MINOR 1
28
29enum htt_dbg_stats_type { 27enum htt_dbg_stats_type {
30 HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, 28 HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
31 HTT_DBG_STATS_RX_REORDER = 1 << 1, 29 HTT_DBG_STATS_RX_REORDER = 1 << 1,
@@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */
45 HTT_H2T_MSG_TYPE_SYNC = 4, 43 HTT_H2T_MSG_TYPE_SYNC = 4,
46 HTT_H2T_MSG_TYPE_AGGR_CFG = 5, 44 HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
47 HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, 45 HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
46
47 /* This command is used for sending management frames in HTT < 3.0.
48 * HTT >= 3.0 uses TX_FRM for everything. */
48 HTT_H2T_MSG_TYPE_MGMT_TX = 7, 49 HTT_H2T_MSG_TYPE_MGMT_TX = 7,
49 50
50 HTT_H2T_NUM_MSGS /* keep this last */ 51 HTT_H2T_NUM_MSGS /* keep this last */
@@ -1268,6 +1269,7 @@ struct ath10k_htt {
1268 /* set if host-fw communication goes haywire 1269 /* set if host-fw communication goes haywire
1269 * used to avoid further failures */ 1270 * used to avoid further failures */
1270 bool rx_confused; 1271 bool rx_confused;
1272 struct tasklet_struct rx_replenish_task;
1271}; 1273};
1272 1274
1273#define RX_HTT_HDR_STATUS_LEN 64 1275#define RX_HTT_HDR_STATUS_LEN 64
@@ -1308,6 +1310,10 @@ struct htt_rx_desc {
1308#define HTT_RX_BUF_SIZE 1920 1310#define HTT_RX_BUF_SIZE 1920
1309#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc)) 1311#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
1310 1312
1313/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
1314 * aggregated traffic more nicely. */
1315#define ATH10K_HTT_MAX_NUM_REFILL 16
1316
1311/* 1317/*
1312 * DMA_MAP expects the buffer to be an integral number of cache lines. 1318 * DMA_MAP expects the buffer to be an integral number of cache lines.
1313 * Rather than checking the actual cache line size, this code makes a 1319 * Rather than checking the actual cache line size, this code makes a
@@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
1327void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); 1333void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
1328void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); 1334void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
1329int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); 1335int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
1336int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
1330int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); 1337int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
1331 1338
1332void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); 1339void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e784c40b904b..90d4f74c28d7 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -20,6 +20,7 @@
20#include "htt.h" 20#include "htt.h"
21#include "txrx.h" 21#include "txrx.h"
22#include "debug.h" 22#include "debug.h"
23#include "trace.h"
23 24
24#include <linux/log2.h> 25#include <linux/log2.h>
25 26
@@ -40,6 +41,10 @@
40/* when under memory pressure rx ring refill may fail and needs a retry */ 41/* when under memory pressure rx ring refill may fail and needs a retry */
41#define HTT_RX_RING_REFILL_RETRY_MS 50 42#define HTT_RX_RING_REFILL_RETRY_MS 50
42 43
44
45static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
46
47
43static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) 48static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
44{ 49{
45 int size; 50 int size;
@@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
177 182
178static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 183static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
179{ 184{
180 int ret, num_to_fill; 185 int ret, num_deficit, num_to_fill;
181 186
187 /* Refilling the whole RX ring buffer proves to be a bad idea. The
188 * reason is RX may take up significant amount of CPU cycles and starve
189 * other tasks, e.g. TX on an ethernet device while acting as a bridge
190 * with ath10k wlan interface. This ended up with very poor performance
191 * once CPU the host system was overwhelmed with RX on ath10k.
192 *
193 * By limiting the number of refills the replenishing occurs
194 * progressively. This in turns makes use of the fact tasklets are
195 * processed in FIFO order. This means actual RX processing can starve
196 * out refilling. If there's not enough buffers on RX ring FW will not
197 * report RX until it is refilled with enough buffers. This
198 * automatically balances load wrt to CPU power.
199 *
200 * This probably comes at a cost of lower maximum throughput but
201 * improves the avarage and stability. */
182 spin_lock_bh(&htt->rx_ring.lock); 202 spin_lock_bh(&htt->rx_ring.lock);
183 num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 203 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
204 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
205 num_deficit -= num_to_fill;
184 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 206 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
185 if (ret == -ENOMEM) { 207 if (ret == -ENOMEM) {
186 /* 208 /*
@@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
191 */ 213 */
192 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 214 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
193 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 215 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
216 } else if (num_deficit > 0) {
217 tasklet_schedule(&htt->rx_replenish_task);
194 } 218 }
195 spin_unlock_bh(&htt->rx_ring.lock); 219 spin_unlock_bh(&htt->rx_ring.lock);
196} 220}
@@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
212 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; 236 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
213 237
214 del_timer_sync(&htt->rx_ring.refill_retry_timer); 238 del_timer_sync(&htt->rx_ring.refill_retry_timer);
239 tasklet_kill(&htt->rx_replenish_task);
215 240
216 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { 241 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
217 struct sk_buff *skb = 242 struct sk_buff *skb =
@@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
441 return msdu_chaining; 466 return msdu_chaining;
442} 467}
443 468
469static void ath10k_htt_rx_replenish_task(unsigned long ptr)
470{
471 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
472 ath10k_htt_rx_msdu_buff_replenish(htt);
473}
474
444int ath10k_htt_rx_attach(struct ath10k_htt *htt) 475int ath10k_htt_rx_attach(struct ath10k_htt *htt)
445{ 476{
446 dma_addr_t paddr; 477 dma_addr_t paddr;
@@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
501 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) 532 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
502 goto err_fill_ring; 533 goto err_fill_ring;
503 534
504 ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n", 535 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
536 (unsigned long)htt);
537
538 ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
505 htt->rx_ring.size, htt->rx_ring.fill_level); 539 htt->rx_ring.size, htt->rx_ring.fill_level);
506 return 0; 540 return 0;
507 541
@@ -590,134 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
590 return false; 624 return false;
591} 625}
592 626
593static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt, 627struct rfc1042_hdr {
594 struct htt_rx_info *info) 628 u8 llc_dsap;
629 u8 llc_ssap;
630 u8 llc_ctrl;
631 u8 snap_oui[3];
632 __be16 snap_type;
633} __packed;
634
635struct amsdu_subframe_hdr {
636 u8 dst[ETH_ALEN];
637 u8 src[ETH_ALEN];
638 __be16 len;
639} __packed;
640
641static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
642 struct htt_rx_info *info)
595{ 643{
596 struct htt_rx_desc *rxd; 644 struct htt_rx_desc *rxd;
597 struct sk_buff *amsdu;
598 struct sk_buff *first; 645 struct sk_buff *first;
599 struct ieee80211_hdr *hdr;
600 struct sk_buff *skb = info->skb; 646 struct sk_buff *skb = info->skb;
601 enum rx_msdu_decap_format fmt; 647 enum rx_msdu_decap_format fmt;
602 enum htt_rx_mpdu_encrypt_type enctype; 648 enum htt_rx_mpdu_encrypt_type enctype;
649 struct ieee80211_hdr *hdr;
650 u8 hdr_buf[64], addr[ETH_ALEN], *qos;
603 unsigned int hdr_len; 651 unsigned int hdr_len;
604 int crypto_len;
605 652
606 rxd = (void *)skb->data - sizeof(*rxd); 653 rxd = (void *)skb->data - sizeof(*rxd);
607 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
608 RX_MSDU_START_INFO1_DECAP_FORMAT);
609 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 654 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
610 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 655 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
611 656
612 /* FIXME: No idea what assumptions are safe here. Need logs */ 657 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
613 if ((fmt == RX_MSDU_DECAP_RAW && skb->next) || 658 hdr_len = ieee80211_hdrlen(hdr->frame_control);
614 (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) { 659 memcpy(hdr_buf, hdr, hdr_len);
615 ath10k_htt_rx_free_msdu_chain(skb->next); 660 hdr = (struct ieee80211_hdr *)hdr_buf;
616 skb->next = NULL;
617 return -ENOTSUPP;
618 }
619 661
620 /* A-MSDU max is a little less than 8K */ 662 /* FIXME: Hopefully this is a temporary measure.
621 amsdu = dev_alloc_skb(8*1024); 663 *
622 if (!amsdu) { 664 * Reporting individual A-MSDU subframes means each reported frame
623 ath10k_warn("A-MSDU allocation failed\n"); 665 * shares the same sequence number.
624 ath10k_htt_rx_free_msdu_chain(skb->next); 666 *
625 skb->next = NULL; 667 * mac80211 drops frames it recognizes as duplicates, i.e.
626 return -ENOMEM; 668 * retransmission flag is set and sequence number matches sequence
627 } 669 * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
628 670 * "Duplicate detection and recovery")
629 if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) { 671 *
630 int hdrlen; 672 * To avoid frames being dropped clear retransmission flag for all
631 673 * received A-MSDUs.
632 hdr = (void *)rxd->rx_hdr_status; 674 *
633 hdrlen = ieee80211_hdrlen(hdr->frame_control); 675 * Worst case: actual duplicate frames will be reported but this should
634 memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen); 676 * still be handled gracefully by other OSI/ISO layers. */
635 } 677 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
636 678
637 first = skb; 679 first = skb;
638 while (skb) { 680 while (skb) {
639 void *decap_hdr; 681 void *decap_hdr;
640 int decap_len = 0; 682 int len;
641 683
642 rxd = (void *)skb->data - sizeof(*rxd); 684 rxd = (void *)skb->data - sizeof(*rxd);
643 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 685 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
644 RX_MSDU_START_INFO1_DECAP_FORMAT); 686 RX_MSDU_START_INFO1_DECAP_FORMAT);
645 decap_hdr = (void *)rxd->rx_hdr_status; 687 decap_hdr = (void *)rxd->rx_hdr_status;
646 688
647 if (skb == first) { 689 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
648 /* We receive linked A-MSDU subframe skbuffs. The
649 * first one contains the original 802.11 header (and
650 * possible crypto param) in the RX descriptor. The
651 * A-MSDU subframe header follows that. Each part is
652 * aligned to 4 byte boundary. */
653
654 hdr = (void *)amsdu->data;
655 hdr_len = ieee80211_hdrlen(hdr->frame_control);
656 crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
657
658 decap_hdr += roundup(hdr_len, 4);
659 decap_hdr += roundup(crypto_len, 4);
660 }
661 690
662 if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { 691 /* First frame in an A-MSDU chain has more decapped data. */
663 /* Ethernet2 decap inserts ethernet header in place of 692 if (skb == first) {
664 * A-MSDU subframe header. */ 693 len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
665 skb_pull(skb, 6 + 6 + 2); 694 len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
666 695 4);
667 /* A-MSDU subframe header length */ 696 decap_hdr += len;
668 decap_len += 6 + 6 + 2;
669
670 /* Ethernet2 decap also strips the LLC/SNAP so we need
671 * to re-insert it. The LLC/SNAP follows A-MSDU
672 * subframe header. */
673 /* FIXME: Not all LLCs are 8 bytes long */
674 decap_len += 8;
675
676 memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
677 } 697 }
678 698
679 if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) { 699 switch (fmt) {
680 /* Native Wifi decap inserts regular 802.11 header 700 case RX_MSDU_DECAP_RAW:
681 * in place of A-MSDU subframe header. */ 701 /* remove trailing FCS */
702 skb_trim(skb, skb->len - FCS_LEN);
703 break;
704 case RX_MSDU_DECAP_NATIVE_WIFI:
705 /* pull decapped header and copy DA */
682 hdr = (struct ieee80211_hdr *)skb->data; 706 hdr = (struct ieee80211_hdr *)skb->data;
683 skb_pull(skb, ieee80211_hdrlen(hdr->frame_control)); 707 hdr_len = ieee80211_hdrlen(hdr->frame_control);
708 memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
709 skb_pull(skb, hdr_len);
684 710
685 /* A-MSDU subframe header length */ 711 /* push original 802.11 header */
686 decap_len += 6 + 6 + 2; 712 hdr = (struct ieee80211_hdr *)hdr_buf;
713 hdr_len = ieee80211_hdrlen(hdr->frame_control);
714 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
687 715
688 memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); 716 /* original A-MSDU header has the bit set but we're
689 } 717 * not including A-MSDU subframe header */
718 hdr = (struct ieee80211_hdr *)skb->data;
719 qos = ieee80211_get_qos_ctl(hdr);
720 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
690 721
691 if (fmt == RX_MSDU_DECAP_RAW) 722 /* original 802.11 header has a different DA */
692 skb_trim(skb, skb->len - 4); /* remove FCS */ 723 memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
724 break;
725 case RX_MSDU_DECAP_ETHERNET2_DIX:
726 /* strip ethernet header and insert decapped 802.11
727 * header, amsdu subframe header and rfc1042 header */
693 728
694 memcpy(skb_put(amsdu, skb->len), skb->data, skb->len); 729 len = 0;
730 len += sizeof(struct rfc1042_hdr);
731 len += sizeof(struct amsdu_subframe_hdr);
695 732
696 /* A-MSDU subframes are padded to 4bytes 733 skb_pull(skb, sizeof(struct ethhdr));
697 * but relative to first subframe, not the whole MPDU */ 734 memcpy(skb_push(skb, len), decap_hdr, len);
698 if (skb->next && ((decap_len + skb->len) & 3)) { 735 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
699 int padlen = 4 - ((decap_len + skb->len) & 3); 736 break;
700 memset(skb_put(amsdu, padlen), 0, padlen); 737 case RX_MSDU_DECAP_8023_SNAP_LLC:
738 /* insert decapped 802.11 header making a singly
739 * A-MSDU */
740 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
741 break;
701 } 742 }
702 743
744 info->skb = skb;
745 info->encrypt_type = enctype;
703 skb = skb->next; 746 skb = skb->next;
704 } 747 info->skb->next = NULL;
705 748
706 info->skb = amsdu; 749 ath10k_process_rx(htt->ar, info);
707 info->encrypt_type = enctype; 750 }
708
709 ath10k_htt_rx_free_msdu_chain(first);
710 751
711 return 0; 752 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
753 * monitor interface active for sniffing purposes. */
712} 754}
713 755
714static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) 756static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
715{ 757{
716 struct sk_buff *skb = info->skb; 758 struct sk_buff *skb = info->skb;
717 struct htt_rx_desc *rxd; 759 struct htt_rx_desc *rxd;
718 struct ieee80211_hdr *hdr; 760 struct ieee80211_hdr *hdr;
719 enum rx_msdu_decap_format fmt; 761 enum rx_msdu_decap_format fmt;
720 enum htt_rx_mpdu_encrypt_type enctype; 762 enum htt_rx_mpdu_encrypt_type enctype;
763 int hdr_len;
764 void *rfc1042;
721 765
722 /* This shouldn't happen. If it does than it may be a FW bug. */ 766 /* This shouldn't happen. If it does than it may be a FW bug. */
723 if (skb->next) { 767 if (skb->next) {
@@ -731,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
731 RX_MSDU_START_INFO1_DECAP_FORMAT); 775 RX_MSDU_START_INFO1_DECAP_FORMAT);
732 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 776 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
733 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 777 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
734 hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN; 778 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
779 hdr_len = ieee80211_hdrlen(hdr->frame_control);
780
781 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
735 782
736 switch (fmt) { 783 switch (fmt) {
737 case RX_MSDU_DECAP_RAW: 784 case RX_MSDU_DECAP_RAW:
738 /* remove trailing FCS */ 785 /* remove trailing FCS */
739 skb_trim(skb, skb->len - 4); 786 skb_trim(skb, skb->len - FCS_LEN);
740 break; 787 break;
741 case RX_MSDU_DECAP_NATIVE_WIFI: 788 case RX_MSDU_DECAP_NATIVE_WIFI:
742 /* nothing to do here */ 789 /* Pull decapped header */
790 hdr = (struct ieee80211_hdr *)skb->data;
791 hdr_len = ieee80211_hdrlen(hdr->frame_control);
792 skb_pull(skb, hdr_len);
793
794 /* Push original header */
795 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
796 hdr_len = ieee80211_hdrlen(hdr->frame_control);
797 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
743 break; 798 break;
744 case RX_MSDU_DECAP_ETHERNET2_DIX: 799 case RX_MSDU_DECAP_ETHERNET2_DIX:
745 /* macaddr[6] + macaddr[6] + ethertype[2] */ 800 /* strip ethernet header and insert decapped 802.11 header and
746 skb_pull(skb, 6 + 6 + 2); 801 * rfc1042 header */
747 break;
748 case RX_MSDU_DECAP_8023_SNAP_LLC:
749 /* macaddr[6] + macaddr[6] + len[2] */
750 /* we don't need this for non-A-MSDU */
751 skb_pull(skb, 6 + 6 + 2);
752 break;
753 }
754 802
755 if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { 803 rfc1042 = hdr;
756 void *llc; 804 rfc1042 += roundup(hdr_len, 4);
757 int llclen; 805 rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
758 806
759 llclen = 8; 807 skb_pull(skb, sizeof(struct ethhdr));
760 llc = hdr; 808 memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
761 llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4); 809 rfc1042, sizeof(struct rfc1042_hdr));
762 llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); 810 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
763 811 break;
764 skb_push(skb, llclen); 812 case RX_MSDU_DECAP_8023_SNAP_LLC:
765 memcpy(skb->data, llc, llclen); 813 /* remove A-MSDU subframe header and insert
766 } 814 * decapped 802.11 header. rfc1042 header is already there */
767 815
768 if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) { 816 skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
769 int len = ieee80211_hdrlen(hdr->frame_control); 817 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
770 skb_push(skb, len); 818 break;
771 memcpy(skb->data, hdr, len);
772 } 819 }
773 820
774 info->skb = skb; 821 info->skb = skb;
775 info->encrypt_type = enctype; 822 info->encrypt_type = enctype;
776 return 0; 823
824 ath10k_process_rx(htt->ar, info);
777} 825}
778 826
779static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) 827static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
@@ -845,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
845 int fw_desc_len; 893 int fw_desc_len;
846 u8 *fw_desc; 894 u8 *fw_desc;
847 int i, j; 895 int i, j;
848 int ret;
849 int ip_summed;
850 896
851 memset(&info, 0, sizeof(info)); 897 memset(&info, 0, sizeof(info));
852 898
@@ -921,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
921 continue; 967 continue;
922 } 968 }
923 969
924 /* The skb is not yet processed and it may be
925 * reallocated. Since the offload is in the original
926 * skb extract the checksum now and assign it later */
927 ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
928
929 info.skb = msdu_head; 970 info.skb = msdu_head;
930 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); 971 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
931 info.signal = ATH10K_DEFAULT_NOISE_FLOOR; 972 info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -938,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
938 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); 979 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
939 980
940 if (ath10k_htt_rx_hdr_is_amsdu(hdr)) 981 if (ath10k_htt_rx_hdr_is_amsdu(hdr))
941 ret = ath10k_htt_rx_amsdu(htt, &info); 982 ath10k_htt_rx_amsdu(htt, &info);
942 else 983 else
943 ret = ath10k_htt_rx_msdu(htt, &info); 984 ath10k_htt_rx_msdu(htt, &info);
944
945 if (ret && !info.fcs_err) {
946 ath10k_warn("error processing msdus %d\n", ret);
947 dev_kfree_skb_any(info.skb);
948 continue;
949 }
950
951 if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
952 ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
953
954 info.skb->ip_summed = ip_summed;
955
956 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
957 info.skb->data, info.skb->len);
958 ath10k_process_rx(htt->ar, &info);
959 } 985 }
960 } 986 }
961 987
962 ath10k_htt_rx_msdu_buff_replenish(htt); 988 tasklet_schedule(&htt->rx_replenish_task);
963} 989}
964 990
965static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, 991static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
@@ -1131,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1131 break; 1157 break;
1132 } 1158 }
1133 1159
1134 ath10k_txrx_tx_completed(htt, &tx_done); 1160 ath10k_txrx_tx_unref(htt, &tx_done);
1135 break; 1161 break;
1136 } 1162 }
1137 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { 1163 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
@@ -1165,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1165 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { 1191 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1166 msdu_id = resp->data_tx_completion.msdus[i]; 1192 msdu_id = resp->data_tx_completion.msdus[i];
1167 tx_done.msdu_id = __le16_to_cpu(msdu_id); 1193 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1168 ath10k_txrx_tx_completed(htt, &tx_done); 1194 ath10k_txrx_tx_unref(htt, &tx_done);
1169 } 1195 }
1170 break; 1196 break;
1171 } 1197 }
@@ -1190,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1190 case HTT_T2H_MSG_TYPE_TEST: 1216 case HTT_T2H_MSG_TYPE_TEST:
1191 /* FIX THIS */ 1217 /* FIX THIS */
1192 break; 1218 break;
1193 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1194 case HTT_T2H_MSG_TYPE_STATS_CONF: 1219 case HTT_T2H_MSG_TYPE_STATS_CONF:
1220 trace_ath10k_htt_stats(skb->data, skb->len);
1221 break;
1222 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1195 case HTT_T2H_MSG_TYPE_RX_ADDBA: 1223 case HTT_T2H_MSG_TYPE_RX_ADDBA:
1196 case HTT_T2H_MSG_TYPE_RX_DELBA: 1224 case HTT_T2H_MSG_TYPE_RX_DELBA:
1197 case HTT_T2H_MSG_TYPE_RX_FLUSH: 1225 case HTT_T2H_MSG_TYPE_RX_FLUSH:
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 656c2546b294..3b93c6a01c6c 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
96 htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, 96 htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
97 pipe); 97 pipe);
98 98
99 ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n", 99 ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
100 htt->max_num_pending_tx); 100 htt->max_num_pending_tx);
101 101
102 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * 102 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
117 117
118static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) 118static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
119{ 119{
120 struct sk_buff *txdesc; 120 struct htt_tx_done tx_done = {0};
121 int msdu_id; 121 int msdu_id;
122 122
123 /* No locks needed. Called after communication with the device has 123 /* No locks needed. Called after communication with the device has
@@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
127 if (!test_bit(msdu_id, htt->used_msdu_ids)) 127 if (!test_bit(msdu_id, htt->used_msdu_ids))
128 continue; 128 continue;
129 129
130 txdesc = htt->pending_tx[msdu_id];
131 if (!txdesc)
132 continue;
133
134 ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", 130 ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
135 msdu_id); 131 msdu_id);
136 132
137 if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) 133 tx_done.discard = 1;
138 ATH10K_SKB_CB(txdesc)->htt.refcount = 1; 134 tx_done.msdu_id = msdu_id;
139 135
140 ATH10K_SKB_CB(txdesc)->htt.discard = true; 136 ath10k_txrx_tx_unref(htt, &tx_done);
141 ath10k_txrx_tx_unref(htt, txdesc);
142 } 137 }
143} 138}
144 139
@@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
152 147
153void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 148void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
154{ 149{
155 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 150 dev_kfree_skb_any(skb);
156 struct ath10k_htt *htt = &ar->htt;
157
158 if (skb_cb->htt.is_conf) {
159 dev_kfree_skb_any(skb);
160 return;
161 }
162
163 if (skb_cb->is_aborted) {
164 skb_cb->htt.discard = true;
165
166 /* if the skbuff is aborted we need to make sure we'll free up
167 * the tx resources, we can't simply run tx_unref() 2 times
168 * because if htt tx completion came in earlier we'd access
169 * unallocated memory */
170 if (skb_cb->htt.refcount > 1)
171 skb_cb->htt.refcount = 1;
172 }
173
174 ath10k_txrx_tx_unref(htt, skb);
175} 151}
176 152
177int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) 153int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
@@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
192 cmd = (struct htt_cmd *)skb->data; 168 cmd = (struct htt_cmd *)skb->data;
193 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; 169 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
194 170
195 ATH10K_SKB_CB(skb)->htt.is_conf = true; 171 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
172 if (ret) {
173 dev_kfree_skb_any(skb);
174 return ret;
175 }
176
177 return 0;
178}
179
180int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
181{
182 struct htt_stats_req *req;
183 struct sk_buff *skb;
184 struct htt_cmd *cmd;
185 int len = 0, ret;
186
187 len += sizeof(cmd->hdr);
188 len += sizeof(cmd->stats_req);
189
190 skb = ath10k_htc_alloc_skb(len);
191 if (!skb)
192 return -ENOMEM;
193
194 skb_put(skb, len);
195 cmd = (struct htt_cmd *)skb->data;
196 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
197
198 req = &cmd->stats_req;
199
200 memset(req, 0, sizeof(*req));
201
202 /* currently we support only max 8 bit masks so no need to worry
203 * about endian support */
204 req->upload_types[0] = mask;
205 req->reset_types[0] = mask;
206 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
207 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
208 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
196 209
197 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 210 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
198 if (ret) { 211 if (ret) {
212 ath10k_warn("failed to send htt type stats request: %d", ret);
199 dev_kfree_skb_any(skb); 213 dev_kfree_skb_any(skb);
200 return ret; 214 return ret;
201 } 215 }
@@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
279 293
280#undef desc_offset 294#undef desc_offset
281 295
282 ATH10K_SKB_CB(skb)->htt.is_conf = true;
283
284 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 296 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
285 if (ret) { 297 if (ret) {
286 dev_kfree_skb_any(skb); 298 dev_kfree_skb_any(skb);
@@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
293int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 305int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
294{ 306{
295 struct device *dev = htt->ar->dev; 307 struct device *dev = htt->ar->dev;
296 struct ath10k_skb_cb *skb_cb;
297 struct sk_buff *txdesc = NULL; 308 struct sk_buff *txdesc = NULL;
298 struct htt_cmd *cmd; 309 struct htt_cmd *cmd;
299 u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; 310 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
311 u8 vdev_id = skb_cb->htt.vdev_id;
300 int len = 0; 312 int len = 0;
301 int msdu_id = -1; 313 int msdu_id = -1;
302 int res; 314 int res;
@@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
304 316
305 res = ath10k_htt_tx_inc_pending(htt); 317 res = ath10k_htt_tx_inc_pending(htt);
306 if (res) 318 if (res)
307 return res; 319 goto err;
308 320
309 len += sizeof(cmd->hdr); 321 len += sizeof(cmd->hdr);
310 len += sizeof(cmd->mgmt_tx); 322 len += sizeof(cmd->mgmt_tx);
311 323
312 txdesc = ath10k_htc_alloc_skb(len);
313 if (!txdesc) {
314 res = -ENOMEM;
315 goto err;
316 }
317
318 spin_lock_bh(&htt->tx_lock); 324 spin_lock_bh(&htt->tx_lock);
319 msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); 325 res = ath10k_htt_tx_alloc_msdu_id(htt);
320 if (msdu_id < 0) { 326 if (res < 0) {
321 spin_unlock_bh(&htt->tx_lock); 327 spin_unlock_bh(&htt->tx_lock);
322 res = msdu_id; 328 goto err_tx_dec;
323 goto err;
324 } 329 }
325 htt->pending_tx[msdu_id] = txdesc; 330 msdu_id = res;
331 htt->pending_tx[msdu_id] = msdu;
326 spin_unlock_bh(&htt->tx_lock); 332 spin_unlock_bh(&htt->tx_lock);
327 333
334 txdesc = ath10k_htc_alloc_skb(len);
335 if (!txdesc) {
336 res = -ENOMEM;
337 goto err_free_msdu_id;
338 }
339
328 res = ath10k_skb_map(dev, msdu); 340 res = ath10k_skb_map(dev, msdu);
329 if (res) 341 if (res)
330 goto err; 342 goto err_free_txdesc;
331 343
332 skb_put(txdesc, len); 344 skb_put(txdesc, len);
333 cmd = (struct htt_cmd *)txdesc->data; 345 cmd = (struct htt_cmd *)txdesc->data;
@@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
339 memcpy(cmd->mgmt_tx.hdr, msdu->data, 351 memcpy(cmd->mgmt_tx.hdr, msdu->data,
340 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 352 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
341 353
342 /* refcount is decremented by HTC and HTT completions until it reaches 354 skb_cb->htt.frag_len = 0;
343 * zero and is freed */ 355 skb_cb->htt.pad_len = 0;
344 skb_cb = ATH10K_SKB_CB(txdesc);
345 skb_cb->htt.msdu_id = msdu_id;
346 skb_cb->htt.refcount = 2;
347 skb_cb->htt.msdu = msdu;
348 356
349 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 357 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
350 if (res) 358 if (res)
351 goto err; 359 goto err_unmap_msdu;
352 360
353 return 0; 361 return 0;
354 362
355err: 363err_unmap_msdu:
356 ath10k_skb_unmap(dev, msdu); 364 ath10k_skb_unmap(dev, msdu);
357 365err_free_txdesc:
358 if (txdesc) 366 dev_kfree_skb_any(txdesc);
359 dev_kfree_skb_any(txdesc); 367err_free_msdu_id:
360 if (msdu_id >= 0) { 368 spin_lock_bh(&htt->tx_lock);
361 spin_lock_bh(&htt->tx_lock); 369 htt->pending_tx[msdu_id] = NULL;
362 htt->pending_tx[msdu_id] = NULL; 370 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
363 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 371 spin_unlock_bh(&htt->tx_lock);
364 spin_unlock_bh(&htt->tx_lock); 372err_tx_dec:
365 }
366 ath10k_htt_tx_dec_pending(htt); 373 ath10k_htt_tx_dec_pending(htt);
374err:
367 return res; 375 return res;
368} 376}
369 377
@@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
373 struct htt_cmd *cmd; 381 struct htt_cmd *cmd;
374 struct htt_data_tx_desc_frag *tx_frags; 382 struct htt_data_tx_desc_frag *tx_frags;
375 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 383 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
376 struct ath10k_skb_cb *skb_cb; 384 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
377 struct sk_buff *txdesc = NULL; 385 struct sk_buff *txdesc = NULL;
378 struct sk_buff *txfrag = NULL; 386 bool use_frags;
379 u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; 387 u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
380 u8 tid; 388 u8 tid;
381 int prefetch_len, desc_len, frag_len; 389 int prefetch_len, desc_len;
382 dma_addr_t frags_paddr;
383 int msdu_id = -1; 390 int msdu_id = -1;
384 int res; 391 int res;
385 u8 flags0; 392 u8 flags0;
@@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
387 394
388 res = ath10k_htt_tx_inc_pending(htt); 395 res = ath10k_htt_tx_inc_pending(htt);
389 if (res) 396 if (res)
390 return res; 397 goto err;
398
399 spin_lock_bh(&htt->tx_lock);
400 res = ath10k_htt_tx_alloc_msdu_id(htt);
401 if (res < 0) {
402 spin_unlock_bh(&htt->tx_lock);
403 goto err_tx_dec;
404 }
405 msdu_id = res;
406 htt->pending_tx[msdu_id] = msdu;
407 spin_unlock_bh(&htt->tx_lock);
391 408
392 prefetch_len = min(htt->prefetch_len, msdu->len); 409 prefetch_len = min(htt->prefetch_len, msdu->len);
393 prefetch_len = roundup(prefetch_len, 4); 410 prefetch_len = roundup(prefetch_len, 4);
394 411
395 desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; 412 desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
396 frag_len = sizeof(*tx_frags) * 2;
397 413
398 txdesc = ath10k_htc_alloc_skb(desc_len); 414 txdesc = ath10k_htc_alloc_skb(desc_len);
399 if (!txdesc) { 415 if (!txdesc) {
400 res = -ENOMEM; 416 res = -ENOMEM;
401 goto err; 417 goto err_free_msdu_id;
402 } 418 }
403 419
404 txfrag = dev_alloc_skb(frag_len); 420 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
405 if (!txfrag) { 421 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
406 res = -ENOMEM; 422 * fragment list host driver specifies directly frame pointer. */
407 goto err; 423 use_frags = htt->target_version_major < 3 ||
408 } 424 !ieee80211_is_mgmt(hdr->frame_control);
409 425
410 if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { 426 if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
411 ath10k_warn("htt alignment check failed. dropping packet.\n"); 427 ath10k_warn("htt alignment check failed. dropping packet.\n");
412 res = -EIO; 428 res = -EIO;
413 goto err; 429 goto err_free_txdesc;
414 } 430 }
415 431
416 spin_lock_bh(&htt->tx_lock); 432 if (use_frags) {
417 msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); 433 skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
418 if (msdu_id < 0) { 434 skb_cb->htt.pad_len = (unsigned long)msdu->data -
419 spin_unlock_bh(&htt->tx_lock); 435 round_down((unsigned long)msdu->data, 4);
420 res = msdu_id; 436
421 goto err; 437 skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
438 } else {
439 skb_cb->htt.frag_len = 0;
440 skb_cb->htt.pad_len = 0;
422 } 441 }
423 htt->pending_tx[msdu_id] = txdesc;
424 spin_unlock_bh(&htt->tx_lock);
425 442
426 res = ath10k_skb_map(dev, msdu); 443 res = ath10k_skb_map(dev, msdu);
427 if (res) 444 if (res)
428 goto err; 445 goto err_pull_txfrag;
429 446
430 /* tx fragment list must be terminated with zero-entry */ 447 if (use_frags) {
431 skb_put(txfrag, frag_len); 448 dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
432 tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; 449 DMA_TO_DEVICE);
433 tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); 450
434 tx_frags[0].len = __cpu_to_le32(msdu->len); 451 /* tx fragment list must be terminated with zero-entry */
435 tx_frags[1].paddr = __cpu_to_le32(0); 452 tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
436 tx_frags[1].len = __cpu_to_le32(0); 453 tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
437 454 skb_cb->htt.frag_len +
438 res = ath10k_skb_map(dev, txfrag); 455 skb_cb->htt.pad_len);
439 if (res) 456 tx_frags[0].len = __cpu_to_le32(msdu->len -
440 goto err; 457 skb_cb->htt.frag_len -
458 skb_cb->htt.pad_len);
459 tx_frags[1].paddr = __cpu_to_le32(0);
460 tx_frags[1].len = __cpu_to_le32(0);
461
462 dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
463 DMA_TO_DEVICE);
464 }
441 465
442 ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", 466 ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
443 (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
444 (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); 467 (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
445 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
446 txfrag->data, frag_len);
447 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", 468 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
448 msdu->data, msdu->len); 469 msdu->data, msdu->len);
449 470
450 skb_put(txdesc, desc_len); 471 skb_put(txdesc, desc_len);
451 cmd = (struct htt_cmd *)txdesc->data; 472 cmd = (struct htt_cmd *)txdesc->data;
452 memset(cmd, 0, desc_len);
453 473
454 tid = ATH10K_SKB_CB(msdu)->htt.tid; 474 tid = ATH10K_SKB_CB(msdu)->htt.tid;
455 475
@@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
459 if (!ieee80211_has_protected(hdr->frame_control)) 479 if (!ieee80211_has_protected(hdr->frame_control))
460 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 480 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
461 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 481 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
462 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, 482
463 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 483 if (use_frags)
484 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
485 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
486 else
487 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
488 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
464 489
465 flags1 = 0; 490 flags1 = 0;
466 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 491 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
468 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 493 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
469 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 494 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
470 495
471 frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
472
473 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 496 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
474 cmd->data_tx.flags0 = flags0; 497 cmd->data_tx.flags0 = flags0;
475 cmd->data_tx.flags1 = __cpu_to_le16(flags1); 498 cmd->data_tx.flags1 = __cpu_to_le16(flags1);
476 cmd->data_tx.len = __cpu_to_le16(msdu->len); 499 cmd->data_tx.len = __cpu_to_le16(msdu->len -
500 skb_cb->htt.frag_len -
501 skb_cb->htt.pad_len);
477 cmd->data_tx.id = __cpu_to_le16(msdu_id); 502 cmd->data_tx.id = __cpu_to_le16(msdu_id);
478 cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); 503 cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
479 cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); 504 cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
480 505
481 memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); 506 memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
482
483 /* refcount is decremented by HTC and HTT completions until it reaches
484 * zero and is freed */
485 skb_cb = ATH10K_SKB_CB(txdesc);
486 skb_cb->htt.msdu_id = msdu_id;
487 skb_cb->htt.refcount = 2;
488 skb_cb->htt.txfrag = txfrag;
489 skb_cb->htt.msdu = msdu;
490 507
491 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 508 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
492 if (res) 509 if (res)
493 goto err; 510 goto err_unmap_msdu;
494 511
495 return 0; 512 return 0;
496err: 513
497 if (txfrag) 514err_unmap_msdu:
498 ath10k_skb_unmap(dev, txfrag);
499 if (txdesc)
500 dev_kfree_skb_any(txdesc);
501 if (txfrag)
502 dev_kfree_skb_any(txfrag);
503 if (msdu_id >= 0) {
504 spin_lock_bh(&htt->tx_lock);
505 htt->pending_tx[msdu_id] = NULL;
506 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
507 spin_unlock_bh(&htt->tx_lock);
508 }
509 ath10k_htt_tx_dec_pending(htt);
510 ath10k_skb_unmap(dev, msdu); 515 ath10k_skb_unmap(dev, msdu);
516err_pull_txfrag:
517 skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
518err_free_txdesc:
519 dev_kfree_skb_any(txdesc);
520err_free_msdu_id:
521 spin_lock_bh(&htt->tx_lock);
522 htt->pending_tx[msdu_id] = NULL;
523 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
524 spin_unlock_bh(&htt->tx_lock);
525err_tx_dec:
526 ath10k_htt_tx_dec_pending(htt);
527err:
511 return res; 528 return res;
512} 529}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 44ed5af0a204..8c1be7685922 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -24,18 +24,14 @@
24#define SUPPORTED_FW_MAJOR 1 24#define SUPPORTED_FW_MAJOR 1
25#define SUPPORTED_FW_MINOR 0 25#define SUPPORTED_FW_MINOR 0
26#define SUPPORTED_FW_RELEASE 0 26#define SUPPORTED_FW_RELEASE 0
27#define SUPPORTED_FW_BUILD 629 27#define SUPPORTED_FW_BUILD 636
28 28
29/* QCA988X 1.0 definitions */ 29/* QCA988X 1.0 definitions (unsupported) */
30#define QCA988X_HW_1_0_VERSION 0x4000002c 30#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
31#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
32#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
33#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
34#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
35#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
36 31
37/* QCA988X 2.0 definitions */ 32/* QCA988X 2.0 definitions */
38#define QCA988X_HW_2_0_VERSION 0x4100016c 33#define QCA988X_HW_2_0_VERSION 0x4100016c
34#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
39#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" 35#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
40#define QCA988X_HW_2_0_FW_FILE "firmware.bin" 36#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
41#define QCA988X_HW_2_0_OTP_FILE "otp.bin" 37#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
@@ -53,6 +49,9 @@ enum ath10k_hw_txrx_mode {
53 ATH10K_HW_TXRX_RAW = 0, 49 ATH10K_HW_TXRX_RAW = 0,
54 ATH10K_HW_TXRX_NATIVE_WIFI = 1, 50 ATH10K_HW_TXRX_NATIVE_WIFI = 1,
55 ATH10K_HW_TXRX_ETHERNET = 2, 51 ATH10K_HW_TXRX_ETHERNET = 2,
52
53 /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
54 ATH10K_HW_TXRX_MGMT = 3,
56}; 55};
57 56
58enum ath10k_mcast2ucast_mode { 57enum ath10k_mcast2ucast_mode {
@@ -75,7 +74,11 @@ enum ath10k_mcast2ucast_mode {
75#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) 74#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
76#define TARGET_RX_TIMEOUT_LO_PRI 100 75#define TARGET_RX_TIMEOUT_LO_PRI 100
77#define TARGET_RX_TIMEOUT_HI_PRI 40 76#define TARGET_RX_TIMEOUT_HI_PRI 40
78#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET 77
78/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
79 * avoid a very expensive re-alignment in mac80211. */
80#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
81
79#define TARGET_SCAN_MAX_PENDING_REQS 4 82#define TARGET_SCAN_MAX_PENDING_REQS 4
80#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3 83#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
81#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3 84#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
@@ -169,6 +172,10 @@ enum ath10k_mcast2ucast_mode {
169#define SOC_LPO_CAL_ENABLE_LSB 20 172#define SOC_LPO_CAL_ENABLE_LSB 20
170#define SOC_LPO_CAL_ENABLE_MASK 0x00100000 173#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
171 174
175#define SOC_CHIP_ID_ADDRESS 0x000000ec
176#define SOC_CHIP_ID_REV_LSB 8
177#define SOC_CHIP_ID_REV_MASK 0x00000f00
178
172#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 179#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
173#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 180#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
174#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 181#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index cf2ba4d850c9..99a9bad3f398 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -460,6 +460,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
460 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 460 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
461 } 461 }
462 462
463 ath10k_dbg(ATH10K_DBG_MAC,
464 "mac vdev %d start center_freq %d phymode %s\n",
465 arg.vdev_id, arg.channel.freq,
466 ath10k_wmi_phymode_str(arg.channel.mode));
467
463 ret = ath10k_wmi_vdev_start(ar, &arg); 468 ret = ath10k_wmi_vdev_start(ar, &arg);
464 if (ret) { 469 if (ret) {
465 ath10k_warn("WMI vdev start failed: ret %d\n", ret); 470 ath10k_warn("WMI vdev start failed: ret %d\n", ret);
@@ -503,13 +508,10 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
503{ 508{
504 struct ieee80211_channel *channel = ar->hw->conf.chandef.chan; 509 struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
505 struct wmi_vdev_start_request_arg arg = {}; 510 struct wmi_vdev_start_request_arg arg = {};
506 enum nl80211_channel_type type;
507 int ret = 0; 511 int ret = 0;
508 512
509 lockdep_assert_held(&ar->conf_mutex); 513 lockdep_assert_held(&ar->conf_mutex);
510 514
511 type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
512
513 arg.vdev_id = vdev_id; 515 arg.vdev_id = vdev_id;
514 arg.channel.freq = channel->center_freq; 516 arg.channel.freq = channel->center_freq;
515 arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; 517 arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -607,7 +609,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
607 goto vdev_fail; 609 goto vdev_fail;
608 } 610 }
609 611
610 ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n", 612 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
611 ar->monitor_vdev_id); 613 ar->monitor_vdev_id);
612 614
613 ar->monitor_present = true; 615 ar->monitor_present = true;
@@ -639,7 +641,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
639 ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); 641 ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
640 ar->monitor_present = false; 642 ar->monitor_present = false;
641 643
642 ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n", 644 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
643 ar->monitor_vdev_id); 645 ar->monitor_vdev_id);
644 return ret; 646 return ret;
645} 647}
@@ -668,7 +670,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
668 arvif->vdev_id); 670 arvif->vdev_id);
669 return; 671 return;
670 } 672 }
671 ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id); 673 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
672} 674}
673 675
674static void ath10k_control_ibss(struct ath10k_vif *arvif, 676static void ath10k_control_ibss(struct ath10k_vif *arvif,
@@ -752,14 +754,14 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
752 psmode = WMI_STA_PS_MODE_DISABLED; 754 psmode = WMI_STA_PS_MODE_DISABLED;
753 } 755 }
754 756
757 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
758 arvif->vdev_id, psmode ? "enable" : "disable");
759
755 ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id, 760 ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
756 psmode); 761 psmode);
757 if (ar_iter->ret) 762 if (ar_iter->ret)
758 ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", 763 ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
759 psmode, arvif->vdev_id); 764 psmode, arvif->vdev_id);
760 else
761 ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
762 psmode, arvif->vdev_id);
763} 765}
764 766
765/**********************/ 767/**********************/
@@ -949,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
949 arg->peer_ht_rates.num_rates = n; 951 arg->peer_ht_rates.num_rates = n;
950 arg->peer_num_spatial_streams = max((n+7) / 8, 1); 952 arg->peer_num_spatial_streams = max((n+7) / 8, 1);
951 953
952 ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n", 954 ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
955 arg->addr,
953 arg->peer_ht_rates.num_rates, 956 arg->peer_ht_rates.num_rates,
954 arg->peer_num_spatial_streams); 957 arg->peer_num_spatial_streams);
955} 958}
@@ -969,11 +972,11 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
969 arg->peer_flags |= WMI_PEER_QOS; 972 arg->peer_flags |= WMI_PEER_QOS;
970 973
971 if (sta->wme && sta->uapsd_queues) { 974 if (sta->wme && sta->uapsd_queues) {
972 ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n", 975 ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
973 sta->uapsd_queues, sta->max_sp); 976 sta->uapsd_queues, sta->max_sp);
974 977
975 arg->peer_flags |= WMI_PEER_APSD; 978 arg->peer_flags |= WMI_PEER_APSD;
976 arg->peer_flags |= WMI_RC_UAPSD_FLAG; 979 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
977 980
978 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 981 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
979 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 982 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
@@ -1048,7 +1051,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
1048 arg->peer_vht_rates.tx_mcs_set = 1051 arg->peer_vht_rates.tx_mcs_set =
1049 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); 1052 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
1050 1053
1051 ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n"); 1054 ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
1055 sta->addr, arg->peer_max_mpdu, arg->peer_flags);
1052} 1056}
1053 1057
1054static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 1058static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -1076,8 +1080,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
1076{ 1080{
1077 enum wmi_phy_mode phymode = MODE_UNKNOWN; 1081 enum wmi_phy_mode phymode = MODE_UNKNOWN;
1078 1082
1079 /* FIXME: add VHT */
1080
1081 switch (ar->hw->conf.chandef.chan->band) { 1083 switch (ar->hw->conf.chandef.chan->band) {
1082 case IEEE80211_BAND_2GHZ: 1084 case IEEE80211_BAND_2GHZ:
1083 if (sta->ht_cap.ht_supported) { 1085 if (sta->ht_cap.ht_supported) {
@@ -1091,7 +1093,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
1091 1093
1092 break; 1094 break;
1093 case IEEE80211_BAND_5GHZ: 1095 case IEEE80211_BAND_5GHZ:
1094 if (sta->ht_cap.ht_supported) { 1096 /*
1097 * Check VHT first.
1098 */
1099 if (sta->vht_cap.vht_supported) {
1100 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
1101 phymode = MODE_11AC_VHT80;
1102 else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1103 phymode = MODE_11AC_VHT40;
1104 else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
1105 phymode = MODE_11AC_VHT20;
1106 } else if (sta->ht_cap.ht_supported) {
1095 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 1107 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1096 phymode = MODE_11NA_HT40; 1108 phymode = MODE_11NA_HT40;
1097 else 1109 else
@@ -1105,6 +1117,9 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
1105 break; 1117 break;
1106 } 1118 }
1107 1119
1120 ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
1121 sta->addr, ath10k_wmi_phymode_str(phymode));
1122
1108 arg->peer_phymode = phymode; 1123 arg->peer_phymode = phymode;
1109 WARN_ON(phymode == MODE_UNKNOWN); 1124 WARN_ON(phymode == MODE_UNKNOWN);
1110} 1125}
@@ -1162,15 +1177,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1162 1177
1163 rcu_read_unlock(); 1178 rcu_read_unlock();
1164 1179
1180 ath10k_dbg(ATH10K_DBG_MAC,
1181 "mac vdev %d up (associated) bssid %pM aid %d\n",
1182 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
1183
1165 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid, 1184 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
1166 bss_conf->bssid); 1185 bss_conf->bssid);
1167 if (ret) 1186 if (ret)
1168 ath10k_warn("VDEV: %d up failed: ret %d\n", 1187 ath10k_warn("VDEV: %d up failed: ret %d\n",
1169 arvif->vdev_id, ret); 1188 arvif->vdev_id, ret);
1170 else
1171 ath10k_dbg(ATH10K_DBG_MAC,
1172 "VDEV: %d associated, BSSID: %pM, AID: %d\n",
1173 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
1174} 1189}
1175 1190
1176/* 1191/*
@@ -1191,10 +1206,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1191 * No idea why this happens, even though VDEV-DOWN is supposed 1206 * No idea why this happens, even though VDEV-DOWN is supposed
1192 * to be analogous to link down, so just stop the VDEV. 1207 * to be analogous to link down, so just stop the VDEV.
1193 */ 1208 */
1209 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
1210 arvif->vdev_id);
1211
1212 /* FIXME: check return value */
1194 ret = ath10k_vdev_stop(arvif); 1213 ret = ath10k_vdev_stop(arvif);
1195 if (!ret)
1196 ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
1197 arvif->vdev_id);
1198 1214
1199 /* 1215 /*
1200 * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and 1216 * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
@@ -1203,12 +1219,10 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1203 * interfaces as it expects there is no rx when no interface is 1219 * interfaces as it expects there is no rx when no interface is
1204 * running. 1220 * running.
1205 */ 1221 */
1206 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1222 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
1207 if (ret)
1208 ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
1209 arvif->vdev_id, ret);
1210 1223
1211 ath10k_wmi_flush_tx(ar); 1224 /* FIXME: why don't we print error if wmi call fails? */
1225 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1212 1226
1213 arvif->def_wep_key_index = 0; 1227 arvif->def_wep_key_index = 0;
1214} 1228}
@@ -1333,8 +1347,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
1333 continue; 1347 continue;
1334 1348
1335 ath10k_dbg(ATH10K_DBG_WMI, 1349 ath10k_dbg(ATH10K_DBG_WMI,
1336 "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 1350 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
1337 __func__, ch - arg.channels, arg.n_channels, 1351 ch - arg.channels, arg.n_channels,
1338 ch->freq, ch->max_power, ch->max_reg_power, 1352 ch->freq, ch->max_power, ch->max_reg_power,
1339 ch->max_antenna_gain, ch->mode); 1353 ch->max_antenna_gain, ch->mode);
1340 1354
@@ -1421,10 +1435,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
1421 struct ieee80211_key_conf *key = info->control.hw_key; 1435 struct ieee80211_key_conf *key = info->control.hw_key;
1422 int ret; 1436 int ret;
1423 1437
1424 /* TODO AP mode should be implemented */
1425 if (vif->type != NL80211_IFTYPE_STATION)
1426 return;
1427
1428 if (!ieee80211_has_protected(hdr->frame_control)) 1438 if (!ieee80211_has_protected(hdr->frame_control))
1429 return; 1439 return;
1430 1440
@@ -1438,7 +1448,8 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
1438 if (key->keyidx == arvif->def_wep_key_index) 1448 if (key->keyidx == arvif->def_wep_key_index)
1439 return; 1449 return;
1440 1450
1441 ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx); 1451 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d keyidx %d\n",
1452 arvif->vdev_id, key->keyidx);
1442 1453
1443 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 1454 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
1444 WMI_VDEV_PARAM_DEF_KEYID, 1455 WMI_VDEV_PARAM_DEF_KEYID,
@@ -1480,6 +1491,12 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1480 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1491 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1481 int ret; 1492 int ret;
1482 1493
1494 if (ar->htt.target_version_major >= 3) {
1495 /* Since HTT 3.0 there is no separate mgmt tx command */
1496 ret = ath10k_htt_tx(&ar->htt, skb);
1497 goto exit;
1498 }
1499
1483 if (ieee80211_is_mgmt(hdr->frame_control)) 1500 if (ieee80211_is_mgmt(hdr->frame_control))
1484 ret = ath10k_htt_mgmt_tx(&ar->htt, skb); 1501 ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
1485 else if (ieee80211_is_nullfunc(hdr->frame_control)) 1502 else if (ieee80211_is_nullfunc(hdr->frame_control))
@@ -1491,6 +1508,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1491 else 1508 else
1492 ret = ath10k_htt_tx(&ar->htt, skb); 1509 ret = ath10k_htt_tx(&ar->htt, skb);
1493 1510
1511exit:
1494 if (ret) { 1512 if (ret) {
1495 ath10k_warn("tx failed (%d). dropping packet.\n", ret); 1513 ath10k_warn("tx failed (%d). dropping packet.\n", ret);
1496 ieee80211_free_txskb(ar->hw, skb); 1514 ieee80211_free_txskb(ar->hw, skb);
@@ -1534,7 +1552,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1534 1552
1535 mutex_lock(&ar->conf_mutex); 1553 mutex_lock(&ar->conf_mutex);
1536 1554
1537 ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n", 1555 ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
1538 skb); 1556 skb);
1539 1557
1540 hdr = (struct ieee80211_hdr *)skb->data; 1558 hdr = (struct ieee80211_hdr *)skb->data;
@@ -1546,6 +1564,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1546 spin_unlock_bh(&ar->data_lock); 1564 spin_unlock_bh(&ar->data_lock);
1547 1565
1548 if (peer) 1566 if (peer)
1567 /* FIXME: should this use ath10k_warn()? */
1549 ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 1568 ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
1550 peer_addr, vdev_id); 1569 peer_addr, vdev_id);
1551 1570
@@ -1643,8 +1662,6 @@ static int ath10k_abort_scan(struct ath10k *ar)
1643 return -EIO; 1662 return -EIO;
1644 } 1663 }
1645 1664
1646 ath10k_wmi_flush_tx(ar);
1647
1648 ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); 1665 ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
1649 if (ret == 0) 1666 if (ret == 0)
1650 ath10k_warn("timed out while waiting for scan to stop\n"); 1667 ath10k_warn("timed out while waiting for scan to stop\n");
@@ -1678,10 +1695,6 @@ static int ath10k_start_scan(struct ath10k *ar,
1678 if (ret) 1695 if (ret)
1679 return ret; 1696 return ret;
1680 1697
1681 /* make sure we submit the command so the completion
1682 * timeout makes sense */
1683 ath10k_wmi_flush_tx(ar);
1684
1685 ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); 1698 ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
1686 if (ret == 0) { 1699 if (ret == 0) {
1687 ath10k_abort_scan(ar); 1700 ath10k_abort_scan(ar);
@@ -1727,8 +1740,10 @@ static void ath10k_tx(struct ieee80211_hw *hw,
1727 /* we must calculate tid before we apply qos workaround 1740 /* we must calculate tid before we apply qos workaround
1728 * as we'd lose the qos control field */ 1741 * as we'd lose the qos control field */
1729 tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 1742 tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1730 if (ieee80211_is_data_qos(hdr->frame_control) && 1743 if (ieee80211_is_mgmt(hdr->frame_control)) {
1731 is_unicast_ether_addr(ieee80211_get_DA(hdr))) { 1744 tid = HTT_DATA_TX_EXT_TID_MGMT;
1745 } else if (ieee80211_is_data_qos(hdr->frame_control) &&
1746 is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
1732 u8 *qc = ieee80211_get_qos_ctl(hdr); 1747 u8 *qc = ieee80211_get_qos_ctl(hdr);
1733 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 1748 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1734 } 1749 }
@@ -1742,7 +1757,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
1742 ath10k_tx_h_seq_no(skb); 1757 ath10k_tx_h_seq_no(skb);
1743 } 1758 }
1744 1759
1745 memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb))); 1760 ATH10K_SKB_CB(skb)->htt.is_offchan = false;
1746 ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id; 1761 ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
1747 ATH10K_SKB_CB(skb)->htt.tid = tid; 1762 ATH10K_SKB_CB(skb)->htt.tid = tid;
1748 1763
@@ -1884,7 +1899,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
1884 mutex_lock(&ar->conf_mutex); 1899 mutex_lock(&ar->conf_mutex);
1885 1900
1886 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1901 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1887 ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n", 1902 ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
1888 conf->chandef.chan->center_freq); 1903 conf->chandef.chan->center_freq);
1889 spin_lock_bh(&ar->data_lock); 1904 spin_lock_bh(&ar->data_lock);
1890 ar->rx_channel = conf->chandef.chan; 1905 ar->rx_channel = conf->chandef.chan;
@@ -1901,7 +1916,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
1901 ret = ath10k_monitor_destroy(ar); 1916 ret = ath10k_monitor_destroy(ar);
1902 } 1917 }
1903 1918
1904 ath10k_wmi_flush_tx(ar);
1905 mutex_unlock(&ar->conf_mutex); 1919 mutex_unlock(&ar->conf_mutex);
1906 return ret; 1920 return ret;
1907} 1921}
@@ -1973,7 +1987,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
1973 break; 1987 break;
1974 } 1988 }
1975 1989
1976 ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n", 1990 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
1977 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); 1991 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
1978 1992
1979 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 1993 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
@@ -2052,7 +2066,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2052 2066
2053 mutex_lock(&ar->conf_mutex); 2067 mutex_lock(&ar->conf_mutex);
2054 2068
2055 ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id); 2069 spin_lock_bh(&ar->data_lock);
2070 if (arvif->beacon) {
2071 dev_kfree_skb_any(arvif->beacon);
2072 arvif->beacon = NULL;
2073 }
2074 spin_unlock_bh(&ar->data_lock);
2056 2075
2057 ar->free_vdev_map |= 1 << (arvif->vdev_id); 2076 ar->free_vdev_map |= 1 << (arvif->vdev_id);
2058 2077
@@ -2064,6 +2083,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2064 kfree(arvif->u.ap.noa_data); 2083 kfree(arvif->u.ap.noa_data);
2065 } 2084 }
2066 2085
2086 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
2087 arvif->vdev_id);
2088
2067 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 2089 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
2068 if (ret) 2090 if (ret)
2069 ath10k_warn("WMI vdev delete failed: %d\n", ret); 2091 ath10k_warn("WMI vdev delete failed: %d\n", ret);
@@ -2105,18 +2127,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
2105 2127
2106 if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && 2128 if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
2107 !ar->monitor_enabled) { 2129 !ar->monitor_enabled) {
2130 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
2131 ar->monitor_vdev_id);
2132
2108 ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); 2133 ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
2109 if (ret) 2134 if (ret)
2110 ath10k_warn("Unable to start monitor mode\n"); 2135 ath10k_warn("Unable to start monitor mode\n");
2111 else
2112 ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
2113 } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && 2136 } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
2114 ar->monitor_enabled) { 2137 ar->monitor_enabled) {
2138 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
2139 ar->monitor_vdev_id);
2140
2115 ret = ath10k_monitor_stop(ar); 2141 ret = ath10k_monitor_stop(ar);
2116 if (ret) 2142 if (ret)
2117 ath10k_warn("Unable to stop monitor mode\n"); 2143 ath10k_warn("Unable to stop monitor mode\n");
2118 else
2119 ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
2120 } 2144 }
2121 2145
2122 mutex_unlock(&ar->conf_mutex); 2146 mutex_unlock(&ar->conf_mutex);
@@ -2141,41 +2165,41 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2141 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2165 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
2142 WMI_VDEV_PARAM_BEACON_INTERVAL, 2166 WMI_VDEV_PARAM_BEACON_INTERVAL,
2143 arvif->beacon_interval); 2167 arvif->beacon_interval);
2168 ath10k_dbg(ATH10K_DBG_MAC,
2169 "mac vdev %d beacon_interval %d\n",
2170 arvif->vdev_id, arvif->beacon_interval);
2171
2144 if (ret) 2172 if (ret)
2145 ath10k_warn("Failed to set beacon interval for VDEV: %d\n", 2173 ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
2146 arvif->vdev_id); 2174 arvif->vdev_id);
2147 else
2148 ath10k_dbg(ATH10K_DBG_MAC,
2149 "Beacon interval: %d set for VDEV: %d\n",
2150 arvif->beacon_interval, arvif->vdev_id);
2151 } 2175 }
2152 2176
2153 if (changed & BSS_CHANGED_BEACON) { 2177 if (changed & BSS_CHANGED_BEACON) {
2178 ath10k_dbg(ATH10K_DBG_MAC,
2179 "vdev %d set beacon tx mode to staggered\n",
2180 arvif->vdev_id);
2181
2154 ret = ath10k_wmi_pdev_set_param(ar, 2182 ret = ath10k_wmi_pdev_set_param(ar,
2155 WMI_PDEV_PARAM_BEACON_TX_MODE, 2183 WMI_PDEV_PARAM_BEACON_TX_MODE,
2156 WMI_BEACON_STAGGERED_MODE); 2184 WMI_BEACON_STAGGERED_MODE);
2157 if (ret) 2185 if (ret)
2158 ath10k_warn("Failed to set beacon mode for VDEV: %d\n", 2186 ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
2159 arvif->vdev_id); 2187 arvif->vdev_id);
2160 else
2161 ath10k_dbg(ATH10K_DBG_MAC,
2162 "Set staggered beacon mode for VDEV: %d\n",
2163 arvif->vdev_id);
2164 } 2188 }
2165 2189
2166 if (changed & BSS_CHANGED_BEACON_INFO) { 2190 if (changed & BSS_CHANGED_BEACON_INFO) {
2167 arvif->dtim_period = info->dtim_period; 2191 arvif->dtim_period = info->dtim_period;
2168 2192
2193 ath10k_dbg(ATH10K_DBG_MAC,
2194 "mac vdev %d dtim_period %d\n",
2195 arvif->vdev_id, arvif->dtim_period);
2196
2169 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2197 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
2170 WMI_VDEV_PARAM_DTIM_PERIOD, 2198 WMI_VDEV_PARAM_DTIM_PERIOD,
2171 arvif->dtim_period); 2199 arvif->dtim_period);
2172 if (ret) 2200 if (ret)
2173 ath10k_warn("Failed to set dtim period for VDEV: %d\n", 2201 ath10k_warn("Failed to set dtim period for VDEV: %d\n",
2174 arvif->vdev_id); 2202 arvif->vdev_id);
2175 else
2176 ath10k_dbg(ATH10K_DBG_MAC,
2177 "Set dtim period: %d for VDEV: %d\n",
2178 arvif->dtim_period, arvif->vdev_id);
2179 } 2203 }
2180 2204
2181 if (changed & BSS_CHANGED_SSID && 2205 if (changed & BSS_CHANGED_SSID &&
@@ -2188,16 +2212,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2188 2212
2189 if (changed & BSS_CHANGED_BSSID) { 2213 if (changed & BSS_CHANGED_BSSID) {
2190 if (!is_zero_ether_addr(info->bssid)) { 2214 if (!is_zero_ether_addr(info->bssid)) {
2215 ath10k_dbg(ATH10K_DBG_MAC,
2216 "mac vdev %d create peer %pM\n",
2217 arvif->vdev_id, info->bssid);
2218
2191 ret = ath10k_peer_create(ar, arvif->vdev_id, 2219 ret = ath10k_peer_create(ar, arvif->vdev_id,
2192 info->bssid); 2220 info->bssid);
2193 if (ret) 2221 if (ret)
2194 ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", 2222 ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
2195 info->bssid, arvif->vdev_id); 2223 info->bssid, arvif->vdev_id);
2196 else
2197 ath10k_dbg(ATH10K_DBG_MAC,
2198 "Added peer: %pM for VDEV: %d\n",
2199 info->bssid, arvif->vdev_id);
2200
2201 2224
2202 if (vif->type == NL80211_IFTYPE_STATION) { 2225 if (vif->type == NL80211_IFTYPE_STATION) {
2203 /* 2226 /*
@@ -2207,11 +2230,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2207 memcpy(arvif->u.sta.bssid, info->bssid, 2230 memcpy(arvif->u.sta.bssid, info->bssid,
2208 ETH_ALEN); 2231 ETH_ALEN);
2209 2232
2233 ath10k_dbg(ATH10K_DBG_MAC,
2234 "mac vdev %d start %pM\n",
2235 arvif->vdev_id, info->bssid);
2236
2237 /* FIXME: check return value */
2210 ret = ath10k_vdev_start(arvif); 2238 ret = ath10k_vdev_start(arvif);
2211 if (!ret)
2212 ath10k_dbg(ATH10K_DBG_MAC,
2213 "VDEV: %d started with BSSID: %pM\n",
2214 arvif->vdev_id, info->bssid);
2215 } 2239 }
2216 2240
2217 /* 2241 /*
@@ -2235,16 +2259,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2235 else 2259 else
2236 cts_prot = 0; 2260 cts_prot = 0;
2237 2261
2262 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
2263 arvif->vdev_id, cts_prot);
2264
2238 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2265 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
2239 WMI_VDEV_PARAM_ENABLE_RTSCTS, 2266 WMI_VDEV_PARAM_ENABLE_RTSCTS,
2240 cts_prot); 2267 cts_prot);
2241 if (ret) 2268 if (ret)
2242 ath10k_warn("Failed to set CTS prot for VDEV: %d\n", 2269 ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
2243 arvif->vdev_id); 2270 arvif->vdev_id);
2244 else
2245 ath10k_dbg(ATH10K_DBG_MAC,
2246 "Set CTS prot: %d for VDEV: %d\n",
2247 cts_prot, arvif->vdev_id);
2248 } 2271 }
2249 2272
2250 if (changed & BSS_CHANGED_ERP_SLOT) { 2273 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2255,16 +2278,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2255 else 2278 else
2256 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 2279 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
2257 2280
2281 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
2282 arvif->vdev_id, slottime);
2283
2258 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2284 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
2259 WMI_VDEV_PARAM_SLOT_TIME, 2285 WMI_VDEV_PARAM_SLOT_TIME,
2260 slottime); 2286 slottime);
2261 if (ret) 2287 if (ret)
2262 ath10k_warn("Failed to set erp slot for VDEV: %d\n", 2288 ath10k_warn("Failed to set erp slot for VDEV: %d\n",
2263 arvif->vdev_id); 2289 arvif->vdev_id);
2264 else
2265 ath10k_dbg(ATH10K_DBG_MAC,
2266 "Set slottime: %d for VDEV: %d\n",
2267 slottime, arvif->vdev_id);
2268 } 2290 }
2269 2291
2270 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2292 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2274,16 +2296,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2274 else 2296 else
2275 preamble = WMI_VDEV_PREAMBLE_LONG; 2297 preamble = WMI_VDEV_PREAMBLE_LONG;
2276 2298
2299 ath10k_dbg(ATH10K_DBG_MAC,
2300 "mac vdev %d preamble %dn",
2301 arvif->vdev_id, preamble);
2302
2277 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2303 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
2278 WMI_VDEV_PARAM_PREAMBLE, 2304 WMI_VDEV_PARAM_PREAMBLE,
2279 preamble); 2305 preamble);
2280 if (ret) 2306 if (ret)
2281 ath10k_warn("Failed to set preamble for VDEV: %d\n", 2307 ath10k_warn("Failed to set preamble for VDEV: %d\n",
2282 arvif->vdev_id); 2308 arvif->vdev_id);
2283 else
2284 ath10k_dbg(ATH10K_DBG_MAC,
2285 "Set preamble: %d for VDEV: %d\n",
2286 preamble, arvif->vdev_id);
2287 } 2309 }
2288 2310
2289 if (changed & BSS_CHANGED_ASSOC) { 2311 if (changed & BSS_CHANGED_ASSOC) {
@@ -2474,27 +2496,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
2474 /* 2496 /*
2475 * New station addition. 2497 * New station addition.
2476 */ 2498 */
2499 ath10k_dbg(ATH10K_DBG_MAC,
2500 "mac vdev %d peer create %pM (new sta)\n",
2501 arvif->vdev_id, sta->addr);
2502
2477 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); 2503 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
2478 if (ret) 2504 if (ret)
2479 ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", 2505 ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
2480 sta->addr, arvif->vdev_id); 2506 sta->addr, arvif->vdev_id);
2481 else
2482 ath10k_dbg(ATH10K_DBG_MAC,
2483 "Added peer: %pM for VDEV: %d\n",
2484 sta->addr, arvif->vdev_id);
2485 } else if ((old_state == IEEE80211_STA_NONE && 2507 } else if ((old_state == IEEE80211_STA_NONE &&
2486 new_state == IEEE80211_STA_NOTEXIST)) { 2508 new_state == IEEE80211_STA_NOTEXIST)) {
2487 /* 2509 /*
2488 * Existing station deletion. 2510 * Existing station deletion.
2489 */ 2511 */
2512 ath10k_dbg(ATH10K_DBG_MAC,
2513 "mac vdev %d peer delete %pM (sta gone)\n",
2514 arvif->vdev_id, sta->addr);
2490 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 2515 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
2491 if (ret) 2516 if (ret)
2492 ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n", 2517 ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
2493 sta->addr, arvif->vdev_id); 2518 sta->addr, arvif->vdev_id);
2494 else
2495 ath10k_dbg(ATH10K_DBG_MAC,
2496 "Removed peer: %pM for VDEV: %d\n",
2497 sta->addr, arvif->vdev_id);
2498 2519
2499 if (vif->type == NL80211_IFTYPE_STATION) 2520 if (vif->type == NL80211_IFTYPE_STATION)
2500 ath10k_bss_disassoc(hw, vif); 2521 ath10k_bss_disassoc(hw, vif);
@@ -2505,14 +2526,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
2505 /* 2526 /*
2506 * New association. 2527 * New association.
2507 */ 2528 */
2529 ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
2530 sta->addr);
2531
2508 ret = ath10k_station_assoc(ar, arvif, sta); 2532 ret = ath10k_station_assoc(ar, arvif, sta);
2509 if (ret) 2533 if (ret)
2510 ath10k_warn("Failed to associate station: %pM\n", 2534 ath10k_warn("Failed to associate station: %pM\n",
2511 sta->addr); 2535 sta->addr);
2512 else
2513 ath10k_dbg(ATH10K_DBG_MAC,
2514 "Station %pM moved to assoc state\n",
2515 sta->addr);
2516 } else if (old_state == IEEE80211_STA_ASSOC && 2536 } else if (old_state == IEEE80211_STA_ASSOC &&
2517 new_state == IEEE80211_STA_AUTH && 2537 new_state == IEEE80211_STA_AUTH &&
2518 (vif->type == NL80211_IFTYPE_AP || 2538 (vif->type == NL80211_IFTYPE_AP ||
@@ -2520,14 +2540,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
2520 /* 2540 /*
2521 * Disassociation. 2541 * Disassociation.
2522 */ 2542 */
2543 ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
2544 sta->addr);
2545
2523 ret = ath10k_station_disassoc(ar, arvif, sta); 2546 ret = ath10k_station_disassoc(ar, arvif, sta);
2524 if (ret) 2547 if (ret)
2525 ath10k_warn("Failed to disassociate station: %pM\n", 2548 ath10k_warn("Failed to disassociate station: %pM\n",
2526 sta->addr); 2549 sta->addr);
2527 else
2528 ath10k_dbg(ATH10K_DBG_MAC,
2529 "Station %pM moved to disassociated state\n",
2530 sta->addr);
2531 } 2550 }
2532 2551
2533 mutex_unlock(&ar->conf_mutex); 2552 mutex_unlock(&ar->conf_mutex);
@@ -2747,14 +2766,13 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2747 if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) 2766 if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
2748 return; 2767 return;
2749 2768
2769 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts_threshold %d\n",
2770 arvif->vdev_id, rts);
2771
2750 ar_iter->ret = ath10k_mac_set_rts(arvif, rts); 2772 ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
2751 if (ar_iter->ret) 2773 if (ar_iter->ret)
2752 ath10k_warn("Failed to set RTS threshold for VDEV: %d\n", 2774 ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
2753 arvif->vdev_id); 2775 arvif->vdev_id);
2754 else
2755 ath10k_dbg(ATH10K_DBG_MAC,
2756 "Set RTS threshold: %d for VDEV: %d\n",
2757 rts, arvif->vdev_id);
2758} 2776}
2759 2777
2760static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 2778static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
@@ -2789,14 +2807,13 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2789 if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) 2807 if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
2790 return; 2808 return;
2791 2809
2810 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation_threshold %d\n",
2811 arvif->vdev_id, frag);
2812
2792 ar_iter->ret = ath10k_mac_set_frag(arvif, frag); 2813 ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
2793 if (ar_iter->ret) 2814 if (ar_iter->ret)
2794 ath10k_warn("Failed to set frag threshold for VDEV: %d\n", 2815 ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
2795 arvif->vdev_id); 2816 arvif->vdev_id);
2796 else
2797 ath10k_dbg(ATH10K_DBG_MAC,
2798 "Set frag threshold: %d for VDEV: %d\n",
2799 frag, arvif->vdev_id);
2800} 2817}
2801 2818
2802static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 2819static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -2836,8 +2853,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
2836 bool empty; 2853 bool empty;
2837 2854
2838 spin_lock_bh(&ar->htt.tx_lock); 2855 spin_lock_bh(&ar->htt.tx_lock);
2839 empty = bitmap_empty(ar->htt.used_msdu_ids, 2856 empty = (ar->htt.num_pending_tx == 0);
2840 ar->htt.max_num_pending_tx);
2841 spin_unlock_bh(&ar->htt.tx_lock); 2857 spin_unlock_bh(&ar->htt.tx_lock);
2842 2858
2843 skip = (ar->state == ATH10K_STATE_WEDGED); 2859 skip = (ar->state == ATH10K_STATE_WEDGED);
@@ -3326,6 +3342,10 @@ int ath10k_mac_register(struct ath10k *ar)
3326 IEEE80211_HW_WANT_MONITOR_VIF | 3342 IEEE80211_HW_WANT_MONITOR_VIF |
3327 IEEE80211_HW_AP_LINK_PS; 3343 IEEE80211_HW_AP_LINK_PS;
3328 3344
3345 /* MSDU can have HTT TX fragment pushed in front. The additional 4
3346 * bytes is used for padding/alignment if necessary. */
3347 ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
3348
3329 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 3349 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
3330 ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; 3350 ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
3331 3351
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e2f9ef50b1bd..dff23d97bed0 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps;
36module_param(ath10k_target_ps, uint, 0644); 36module_param(ath10k_target_ps, uint, 0644);
37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); 37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38 38
39#define QCA988X_1_0_DEVICE_ID (0xabcd)
40#define QCA988X_2_0_DEVICE_ID (0x003c) 39#define QCA988X_2_0_DEVICE_ID (0x003c)
41 40
42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { 41static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43 { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 42 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
45 {0} 43 {0}
46}; 44};
@@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
50 48
51static void ath10k_pci_process_ce(struct ath10k *ar); 49static void ath10k_pci_process_ce(struct ath10k *ar);
52static int ath10k_pci_post_rx(struct ath10k *ar); 50static int ath10k_pci_post_rx(struct ath10k *ar);
53static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, 51static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
54 int num); 52 int num);
55static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); 53static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
56static void ath10k_pci_stop_ce(struct ath10k *ar); 54static void ath10k_pci_stop_ce(struct ath10k *ar);
57static void ath10k_pci_device_reset(struct ath10k *ar); 55static void ath10k_pci_device_reset(struct ath10k *ar);
58static int ath10k_pci_reset_target(struct ath10k *ar); 56static int ath10k_pci_reset_target(struct ath10k *ar);
@@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar);
60static void ath10k_pci_stop_intr(struct ath10k *ar); 58static void ath10k_pci_stop_intr(struct ath10k *ar);
61 59
62static const struct ce_attr host_ce_config_wlan[] = { 60static const struct ce_attr host_ce_config_wlan[] = {
63 /* host->target HTC control and raw streams */ 61 /* CE0: host->target HTC control and raw streams */
64 { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, 62 {
65 /* could be moved to share CE3 */ 63 .flags = CE_ATTR_FLAGS,
66 /* target->host HTT + HTC control */ 64 .src_nentries = 16,
67 { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,}, 65 .src_sz_max = 256,
68 /* target->host WMI */ 66 .dest_nentries = 0,
69 { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, 67 },
70 /* host->target WMI */ 68
71 { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, 69 /* CE1: target->host HTT + HTC control */
72 /* host->target HTT */ 70 {
73 { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0, 71 .flags = CE_ATTR_FLAGS,
74 CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, 72 .src_nentries = 0,
75 /* unused */ 73 .src_sz_max = 512,
76 { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, 74 .dest_nentries = 512,
77 /* Target autonomous hif_memcpy */ 75 },
78 { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, 76
79 /* ce_diag, the Diagnostic Window */ 77 /* CE2: target->host WMI */
80 { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, 78 {
79 .flags = CE_ATTR_FLAGS,
80 .src_nentries = 0,
81 .src_sz_max = 2048,
82 .dest_nentries = 32,
83 },
84
85 /* CE3: host->target WMI */
86 {
87 .flags = CE_ATTR_FLAGS,
88 .src_nentries = 32,
89 .src_sz_max = 2048,
90 .dest_nentries = 0,
91 },
92
93 /* CE4: host->target HTT */
94 {
95 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97 .src_sz_max = 256,
98 .dest_nentries = 0,
99 },
100
101 /* CE5: unused */
102 {
103 .flags = CE_ATTR_FLAGS,
104 .src_nentries = 0,
105 .src_sz_max = 0,
106 .dest_nentries = 0,
107 },
108
109 /* CE6: target autonomous hif_memcpy */
110 {
111 .flags = CE_ATTR_FLAGS,
112 .src_nentries = 0,
113 .src_sz_max = 0,
114 .dest_nentries = 0,
115 },
116
117 /* CE7: ce_diag, the Diagnostic Window */
118 {
119 .flags = CE_ATTR_FLAGS,
120 .src_nentries = 2,
121 .src_sz_max = DIAG_TRANSFER_LIMIT,
122 .dest_nentries = 2,
123 },
81}; 124};
82 125
83/* Target firmware's Copy Engine configuration. */ 126/* Target firmware's Copy Engine configuration. */
84static const struct ce_pipe_config target_ce_config_wlan[] = { 127static const struct ce_pipe_config target_ce_config_wlan[] = {
85 /* host->target HTC control and raw streams */ 128 /* CE0: host->target HTC control and raw streams */
86 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, 129 {
87 /* target->host HTT + HTC control */ 130 .pipenum = 0,
88 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,}, 131 .pipedir = PIPEDIR_OUT,
89 /* target->host WMI */ 132 .nentries = 32,
90 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, 133 .nbytes_max = 256,
91 /* host->target WMI */ 134 .flags = CE_ATTR_FLAGS,
92 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, 135 .reserved = 0,
93 /* host->target HTT */ 136 },
94 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, 137
138 /* CE1: target->host HTT + HTC control */
139 {
140 .pipenum = 1,
141 .pipedir = PIPEDIR_IN,
142 .nentries = 32,
143 .nbytes_max = 512,
144 .flags = CE_ATTR_FLAGS,
145 .reserved = 0,
146 },
147
148 /* CE2: target->host WMI */
149 {
150 .pipenum = 2,
151 .pipedir = PIPEDIR_IN,
152 .nentries = 32,
153 .nbytes_max = 2048,
154 .flags = CE_ATTR_FLAGS,
155 .reserved = 0,
156 },
157
158 /* CE3: host->target WMI */
159 {
160 .pipenum = 3,
161 .pipedir = PIPEDIR_OUT,
162 .nentries = 32,
163 .nbytes_max = 2048,
164 .flags = CE_ATTR_FLAGS,
165 .reserved = 0,
166 },
167
168 /* CE4: host->target HTT */
169 {
170 .pipenum = 4,
171 .pipedir = PIPEDIR_OUT,
172 .nentries = 256,
173 .nbytes_max = 256,
174 .flags = CE_ATTR_FLAGS,
175 .reserved = 0,
176 },
177
95 /* NB: 50% of src nentries, since tx has 2 frags */ 178 /* NB: 50% of src nentries, since tx has 2 frags */
96 /* unused */ 179
97 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, 180 /* CE5: unused */
98 /* Reserved for target autonomous hif_memcpy */ 181 {
99 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, 182 .pipenum = 5,
183 .pipedir = PIPEDIR_OUT,
184 .nentries = 32,
185 .nbytes_max = 2048,
186 .flags = CE_ATTR_FLAGS,
187 .reserved = 0,
188 },
189
190 /* CE6: Reserved for target autonomous hif_memcpy */
191 {
192 .pipenum = 6,
193 .pipedir = PIPEDIR_INOUT,
194 .nentries = 32,
195 .nbytes_max = 4096,
196 .flags = CE_ATTR_FLAGS,
197 .reserved = 0,
198 },
199
100 /* CE7 used only by Host */ 200 /* CE7 used only by Host */
101}; 201};
102 202
@@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
114 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 214 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
115 unsigned int id; 215 unsigned int id;
116 unsigned int flags; 216 unsigned int flags;
117 struct ce_state *ce_diag; 217 struct ath10k_ce_pipe *ce_diag;
118 /* Host buffer address in CE space */ 218 /* Host buffer address in CE space */
119 u32 ce_data; 219 u32 ce_data;
120 dma_addr_t ce_data_base = 0; 220 dma_addr_t ce_data_base = 0;
@@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
278 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 378 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
279 unsigned int id; 379 unsigned int id;
280 unsigned int flags; 380 unsigned int flags;
281 struct ce_state *ce_diag; 381 struct ath10k_ce_pipe *ce_diag;
282 void *data_buf = NULL; 382 void *data_buf = NULL;
283 u32 ce_data; /* Host buffer address in CE space */ 383 u32 ce_data; /* Host buffer address in CE space */
284 dma_addr_t ce_data_base = 0; 384 dma_addr_t ce_data_base = 0;
@@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar)
437 ath10k_warn("Unable to wakeup target\n"); 537 ath10k_warn("Unable to wakeup target\n");
438} 538}
439 539
440void ath10k_do_pci_wake(struct ath10k *ar) 540int ath10k_do_pci_wake(struct ath10k *ar)
441{ 541{
442 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 542 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
443 void __iomem *pci_addr = ar_pci->mem; 543 void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
453 atomic_inc(&ar_pci->keep_awake_count); 553 atomic_inc(&ar_pci->keep_awake_count);
454 554
455 if (ar_pci->verified_awake) 555 if (ar_pci->verified_awake)
456 return; 556 return 0;
457 557
458 for (;;) { 558 for (;;) {
459 if (ath10k_pci_target_is_awake(ar)) { 559 if (ath10k_pci_target_is_awake(ar)) {
460 ar_pci->verified_awake = true; 560 ar_pci->verified_awake = true;
461 break; 561 return 0;
462 } 562 }
463 563
464 if (tot_delay > PCIE_WAKE_TIMEOUT) { 564 if (tot_delay > PCIE_WAKE_TIMEOUT) {
465 ath10k_warn("target takes too long to wake up (awake count %d)\n", 565 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
566 PCIE_WAKE_TIMEOUT,
466 atomic_read(&ar_pci->keep_awake_count)); 567 atomic_read(&ar_pci->keep_awake_count));
467 break; 568 return -ETIMEDOUT;
468 } 569 }
469 570
470 udelay(curr_delay); 571 udelay(curr_delay);
@@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
493 * FIXME: Handle OOM properly. 594 * FIXME: Handle OOM properly.
494 */ 595 */
495static inline 596static inline
496struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info) 597struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
497{ 598{
498 struct ath10k_pci_compl *compl = NULL; 599 struct ath10k_pci_compl *compl = NULL;
499 600
@@ -511,39 +612,28 @@ exit:
511} 612}
512 613
513/* Called by lower (CE) layer when a send to Target completes. */ 614/* Called by lower (CE) layer when a send to Target completes. */
514static void ath10k_pci_ce_send_done(struct ce_state *ce_state, 615static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
515 void *transfer_context,
516 u32 ce_data,
517 unsigned int nbytes,
518 unsigned int transfer_id)
519{ 616{
520 struct ath10k *ar = ce_state->ar; 617 struct ath10k *ar = ce_state->ar;
521 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 618 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
522 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; 619 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
523 struct ath10k_pci_compl *compl; 620 struct ath10k_pci_compl *compl;
524 bool process = false; 621 void *transfer_context;
525 622 u32 ce_data;
526 do { 623 unsigned int nbytes;
527 /* 624 unsigned int transfer_id;
528 * For the send completion of an item in sendlist, just
529 * increment num_sends_allowed. The upper layer callback will
530 * be triggered when last fragment is done with send.
531 */
532 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
533 spin_lock_bh(&pipe_info->pipe_lock);
534 pipe_info->num_sends_allowed++;
535 spin_unlock_bh(&pipe_info->pipe_lock);
536 continue;
537 }
538 625
626 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
627 &ce_data, &nbytes,
628 &transfer_id) == 0) {
539 compl = get_free_compl(pipe_info); 629 compl = get_free_compl(pipe_info);
540 if (!compl) 630 if (!compl)
541 break; 631 break;
542 632
543 compl->send_or_recv = HIF_CE_COMPLETE_SEND; 633 compl->state = ATH10K_PCI_COMPL_SEND;
544 compl->ce_state = ce_state; 634 compl->ce_state = ce_state;
545 compl->pipe_info = pipe_info; 635 compl->pipe_info = pipe_info;
546 compl->transfer_context = transfer_context; 636 compl->skb = transfer_context;
547 compl->nbytes = nbytes; 637 compl->nbytes = nbytes;
548 compl->transfer_id = transfer_id; 638 compl->transfer_id = transfer_id;
549 compl->flags = 0; 639 compl->flags = 0;
@@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
554 spin_lock_bh(&ar_pci->compl_lock); 644 spin_lock_bh(&ar_pci->compl_lock);
555 list_add_tail(&compl->list, &ar_pci->compl_process); 645 list_add_tail(&compl->list, &ar_pci->compl_process);
556 spin_unlock_bh(&ar_pci->compl_lock); 646 spin_unlock_bh(&ar_pci->compl_lock);
557 647 }
558 process = true;
559 } while (ath10k_ce_completed_send_next(ce_state,
560 &transfer_context,
561 &ce_data, &nbytes,
562 &transfer_id) == 0);
563
564 /*
565 * If only some of the items within a sendlist have completed,
566 * don't invoke completion processing until the entire sendlist
567 * has been sent.
568 */
569 if (!process)
570 return;
571 648
572 ath10k_pci_process_ce(ar); 649 ath10k_pci_process_ce(ar);
573} 650}
574 651
575/* Called by lower (CE) layer when data is received from the Target. */ 652/* Called by lower (CE) layer when data is received from the Target. */
576static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, 653static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
577 void *transfer_context, u32 ce_data,
578 unsigned int nbytes,
579 unsigned int transfer_id,
580 unsigned int flags)
581{ 654{
582 struct ath10k *ar = ce_state->ar; 655 struct ath10k *ar = ce_state->ar;
583 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 656 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
584 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; 657 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
585 struct ath10k_pci_compl *compl; 658 struct ath10k_pci_compl *compl;
586 struct sk_buff *skb; 659 struct sk_buff *skb;
660 void *transfer_context;
661 u32 ce_data;
662 unsigned int nbytes;
663 unsigned int transfer_id;
664 unsigned int flags;
587 665
588 do { 666 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
667 &ce_data, &nbytes, &transfer_id,
668 &flags) == 0) {
589 compl = get_free_compl(pipe_info); 669 compl = get_free_compl(pipe_info);
590 if (!compl) 670 if (!compl)
591 break; 671 break;
592 672
593 compl->send_or_recv = HIF_CE_COMPLETE_RECV; 673 compl->state = ATH10K_PCI_COMPL_RECV;
594 compl->ce_state = ce_state; 674 compl->ce_state = ce_state;
595 compl->pipe_info = pipe_info; 675 compl->pipe_info = pipe_info;
596 compl->transfer_context = transfer_context; 676 compl->skb = transfer_context;
597 compl->nbytes = nbytes; 677 compl->nbytes = nbytes;
598 compl->transfer_id = transfer_id; 678 compl->transfer_id = transfer_id;
599 compl->flags = flags; 679 compl->flags = flags;
@@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
608 spin_lock_bh(&ar_pci->compl_lock); 688 spin_lock_bh(&ar_pci->compl_lock);
609 list_add_tail(&compl->list, &ar_pci->compl_process); 689 list_add_tail(&compl->list, &ar_pci->compl_process);
610 spin_unlock_bh(&ar_pci->compl_lock); 690 spin_unlock_bh(&ar_pci->compl_lock);
611 691 }
612 } while (ath10k_ce_completed_recv_next(ce_state,
613 &transfer_context,
614 &ce_data, &nbytes,
615 &transfer_id,
616 &flags) == 0);
617 692
618 ath10k_pci_process_ce(ar); 693 ath10k_pci_process_ce(ar);
619} 694}
@@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
625{ 700{
626 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf); 701 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
627 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 702 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
628 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]); 703 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
629 struct ce_state *ce_hdl = pipe_info->ce_hdl; 704 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
630 struct ce_sendlist sendlist;
631 unsigned int len; 705 unsigned int len;
632 u32 flags = 0; 706 u32 flags = 0;
633 int ret; 707 int ret;
634 708
635 memset(&sendlist, 0, sizeof(struct ce_sendlist));
636
637 len = min(bytes, nbuf->len); 709 len = min(bytes, nbuf->len);
638 bytes -= len; 710 bytes -= len;
639 711
@@ -648,8 +720,6 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
648 "ath10k tx: data: ", 720 "ath10k tx: data: ",
649 nbuf->data, nbuf->len); 721 nbuf->data, nbuf->len);
650 722
651 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
652
653 /* Make sure we have resources to handle this request */ 723 /* Make sure we have resources to handle this request */
654 spin_lock_bh(&pipe_info->pipe_lock); 724 spin_lock_bh(&pipe_info->pipe_lock);
655 if (!pipe_info->num_sends_allowed) { 725 if (!pipe_info->num_sends_allowed) {
@@ -660,7 +730,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
660 pipe_info->num_sends_allowed--; 730 pipe_info->num_sends_allowed--;
661 spin_unlock_bh(&pipe_info->pipe_lock); 731 spin_unlock_bh(&pipe_info->pipe_lock);
662 732
663 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 733 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
734 skb_cb->paddr, len, flags);
664 if (ret) 735 if (ret)
665 ath10k_warn("CE send failed: %p\n", nbuf); 736 ath10k_warn("CE send failed: %p\n", nbuf);
666 737
@@ -670,7 +741,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
670static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 741static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
671{ 742{
672 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
673 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]); 744 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
674 int ret; 745 int ret;
675 746
676 spin_lock_bh(&pipe_info->pipe_lock); 747 spin_lock_bh(&pipe_info->pipe_lock);
@@ -764,9 +835,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
764static int ath10k_pci_start_ce(struct ath10k *ar) 835static int ath10k_pci_start_ce(struct ath10k *ar)
765{ 836{
766 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 837 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
767 struct ce_state *ce_diag = ar_pci->ce_diag; 838 struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
768 const struct ce_attr *attr; 839 const struct ce_attr *attr;
769 struct hif_ce_pipe_info *pipe_info; 840 struct ath10k_pci_pipe *pipe_info;
770 struct ath10k_pci_compl *compl; 841 struct ath10k_pci_compl *compl;
771 int i, pipe_num, completions, disable_interrupts; 842 int i, pipe_num, completions, disable_interrupts;
772 843
@@ -805,15 +876,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
805 continue; 876 continue;
806 877
807 for (i = 0; i < completions; i++) { 878 for (i = 0; i < completions; i++) {
808 compl = kmalloc(sizeof(struct ath10k_pci_compl), 879 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
809 GFP_KERNEL);
810 if (!compl) { 880 if (!compl) {
811 ath10k_warn("No memory for completion state\n"); 881 ath10k_warn("No memory for completion state\n");
812 ath10k_pci_stop_ce(ar); 882 ath10k_pci_stop_ce(ar);
813 return -ENOMEM; 883 return -ENOMEM;
814 } 884 }
815 885
816 compl->send_or_recv = HIF_CE_COMPLETE_FREE; 886 compl->state = ATH10K_PCI_COMPL_FREE;
817 list_add_tail(&compl->list, &pipe_info->compl_free); 887 list_add_tail(&compl->list, &pipe_info->compl_free);
818 } 888 }
819 } 889 }
@@ -840,7 +910,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar)
840 * their associated resources */ 910 * their associated resources */
841 spin_lock_bh(&ar_pci->compl_lock); 911 spin_lock_bh(&ar_pci->compl_lock);
842 list_for_each_entry(compl, &ar_pci->compl_process, list) { 912 list_for_each_entry(compl, &ar_pci->compl_process, list) {
843 skb = (struct sk_buff *)compl->transfer_context; 913 skb = compl->skb;
844 ATH10K_SKB_CB(skb)->is_aborted = true; 914 ATH10K_SKB_CB(skb)->is_aborted = true;
845 } 915 }
846 spin_unlock_bh(&ar_pci->compl_lock); 916 spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +920,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
850{ 920{
851 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 921 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
852 struct ath10k_pci_compl *compl, *tmp; 922 struct ath10k_pci_compl *compl, *tmp;
853 struct hif_ce_pipe_info *pipe_info; 923 struct ath10k_pci_pipe *pipe_info;
854 struct sk_buff *netbuf; 924 struct sk_buff *netbuf;
855 int pipe_num; 925 int pipe_num;
856 926
@@ -861,7 +931,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
861 931
862 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { 932 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
863 list_del(&compl->list); 933 list_del(&compl->list);
864 netbuf = (struct sk_buff *)compl->transfer_context; 934 netbuf = compl->skb;
865 dev_kfree_skb_any(netbuf); 935 dev_kfree_skb_any(netbuf);
866 kfree(compl); 936 kfree(compl);
867 } 937 }
@@ -912,12 +982,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
912 list_del(&compl->list); 982 list_del(&compl->list);
913 spin_unlock_bh(&ar_pci->compl_lock); 983 spin_unlock_bh(&ar_pci->compl_lock);
914 984
915 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) { 985 switch (compl->state) {
986 case ATH10K_PCI_COMPL_SEND:
916 cb->tx_completion(ar, 987 cb->tx_completion(ar,
917 compl->transfer_context, 988 compl->skb,
918 compl->transfer_id); 989 compl->transfer_id);
919 send_done = 1; 990 send_done = 1;
920 } else { 991 break;
992 case ATH10K_PCI_COMPL_RECV:
921 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); 993 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
922 if (ret) { 994 if (ret) {
923 ath10k_warn("Unable to post recv buffer for pipe: %d\n", 995 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@@ -925,7 +997,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
925 break; 997 break;
926 } 998 }
927 999
928 skb = (struct sk_buff *)compl->transfer_context; 1000 skb = compl->skb;
929 nbytes = compl->nbytes; 1001 nbytes = compl->nbytes;
930 1002
931 ath10k_dbg(ATH10K_DBG_PCI, 1003 ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,9 +1016,17 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
944 nbytes, 1016 nbytes,
945 skb->len + skb_tailroom(skb)); 1017 skb->len + skb_tailroom(skb));
946 } 1018 }
1019 break;
1020 case ATH10K_PCI_COMPL_FREE:
1021 ath10k_warn("free completion cannot be processed\n");
1022 break;
1023 default:
1024 ath10k_warn("invalid completion state (%d)\n",
1025 compl->state);
1026 break;
947 } 1027 }
948 1028
949 compl->send_or_recv = HIF_CE_COMPLETE_FREE; 1029 compl->state = ATH10K_PCI_COMPL_FREE;
950 1030
951 /* 1031 /*
952 * Add completion back to the pipe's free list. 1032 * Add completion back to the pipe's free list.
@@ -1037,12 +1117,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1037 &dl_is_polled); 1117 &dl_is_polled);
1038} 1118}
1039 1119
1040static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, 1120static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1041 int num) 1121 int num)
1042{ 1122{
1043 struct ath10k *ar = pipe_info->hif_ce_state; 1123 struct ath10k *ar = pipe_info->hif_ce_state;
1044 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1124 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1045 struct ce_state *ce_state = pipe_info->ce_hdl; 1125 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1046 struct sk_buff *skb; 1126 struct sk_buff *skb;
1047 dma_addr_t ce_data; 1127 dma_addr_t ce_data;
1048 int i, ret = 0; 1128 int i, ret = 0;
@@ -1097,7 +1177,7 @@ err:
1097static int ath10k_pci_post_rx(struct ath10k *ar) 1177static int ath10k_pci_post_rx(struct ath10k *ar)
1098{ 1178{
1099 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1179 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1100 struct hif_ce_pipe_info *pipe_info; 1180 struct ath10k_pci_pipe *pipe_info;
1101 const struct ce_attr *attr; 1181 const struct ce_attr *attr;
1102 int pipe_num, ret = 0; 1182 int pipe_num, ret = 0;
1103 1183
@@ -1147,11 +1227,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
1147 return 0; 1227 return 0;
1148} 1228}
1149 1229
1150static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) 1230static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1151{ 1231{
1152 struct ath10k *ar; 1232 struct ath10k *ar;
1153 struct ath10k_pci *ar_pci; 1233 struct ath10k_pci *ar_pci;
1154 struct ce_state *ce_hdl; 1234 struct ath10k_ce_pipe *ce_hdl;
1155 u32 buf_sz; 1235 u32 buf_sz;
1156 struct sk_buff *netbuf; 1236 struct sk_buff *netbuf;
1157 u32 ce_data; 1237 u32 ce_data;
@@ -1179,11 +1259,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1179 } 1259 }
1180} 1260}
1181 1261
1182static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) 1262static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1183{ 1263{
1184 struct ath10k *ar; 1264 struct ath10k *ar;
1185 struct ath10k_pci *ar_pci; 1265 struct ath10k_pci *ar_pci;
1186 struct ce_state *ce_hdl; 1266 struct ath10k_ce_pipe *ce_hdl;
1187 struct sk_buff *netbuf; 1267 struct sk_buff *netbuf;
1188 u32 ce_data; 1268 u32 ce_data;
1189 unsigned int nbytes; 1269 unsigned int nbytes;
@@ -1206,15 +1286,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1206 1286
1207 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, 1287 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1208 &ce_data, &nbytes, &id) == 0) { 1288 &ce_data, &nbytes, &id) == 0) {
1209 if (netbuf != CE_SENDLIST_ITEM_CTXT) 1289 /*
1210 /* 1290 * Indicate the completion to higer layer to free
1211 * Indicate the completion to higer layer to free 1291 * the buffer
1212 * the buffer 1292 */
1213 */ 1293 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1214 ATH10K_SKB_CB(netbuf)->is_aborted = true; 1294 ar_pci->msg_callbacks_current.tx_completion(ar,
1215 ar_pci->msg_callbacks_current.tx_completion(ar, 1295 netbuf,
1216 netbuf, 1296 id);
1217 id);
1218 } 1297 }
1219} 1298}
1220 1299
@@ -1232,7 +1311,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1232 int pipe_num; 1311 int pipe_num;
1233 1312
1234 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { 1313 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1235 struct hif_ce_pipe_info *pipe_info; 1314 struct ath10k_pci_pipe *pipe_info;
1236 1315
1237 pipe_info = &ar_pci->pipe_info[pipe_num]; 1316 pipe_info = &ar_pci->pipe_info[pipe_num];
1238 ath10k_pci_rx_pipe_cleanup(pipe_info); 1317 ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,7 +1322,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1243static void ath10k_pci_ce_deinit(struct ath10k *ar) 1322static void ath10k_pci_ce_deinit(struct ath10k *ar)
1244{ 1323{
1245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1324 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1246 struct hif_ce_pipe_info *pipe_info; 1325 struct ath10k_pci_pipe *pipe_info;
1247 int pipe_num; 1326 int pipe_num;
1248 1327
1249 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { 1328 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@@ -1293,8 +1372,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1293 void *resp, u32 *resp_len) 1372 void *resp, u32 *resp_len)
1294{ 1373{
1295 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1374 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1296 struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl; 1375 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1297 struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl; 1376 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1377 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1378 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1298 dma_addr_t req_paddr = 0; 1379 dma_addr_t req_paddr = 0;
1299 dma_addr_t resp_paddr = 0; 1380 dma_addr_t resp_paddr = 0;
1300 struct bmi_xfer xfer = {}; 1381 struct bmi_xfer xfer = {};
@@ -1378,13 +1459,16 @@ err_dma:
1378 return ret; 1459 return ret;
1379} 1460}
1380 1461
1381static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, 1462static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1382 void *transfer_context,
1383 u32 data,
1384 unsigned int nbytes,
1385 unsigned int transfer_id)
1386{ 1463{
1387 struct bmi_xfer *xfer = transfer_context; 1464 struct bmi_xfer *xfer;
1465 u32 ce_data;
1466 unsigned int nbytes;
1467 unsigned int transfer_id;
1468
1469 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1470 &nbytes, &transfer_id))
1471 return;
1388 1472
1389 if (xfer->wait_for_resp) 1473 if (xfer->wait_for_resp)
1390 return; 1474 return;
@@ -1392,14 +1476,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1392 complete(&xfer->done); 1476 complete(&xfer->done);
1393} 1477}
1394 1478
1395static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, 1479static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1396 void *transfer_context,
1397 u32 data,
1398 unsigned int nbytes,
1399 unsigned int transfer_id,
1400 unsigned int flags)
1401{ 1480{
1402 struct bmi_xfer *xfer = transfer_context; 1481 struct bmi_xfer *xfer;
1482 u32 ce_data;
1483 unsigned int nbytes;
1484 unsigned int transfer_id;
1485 unsigned int flags;
1486
1487 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1488 &nbytes, &transfer_id, &flags))
1489 return;
1403 1490
1404 if (!xfer->wait_for_resp) { 1491 if (!xfer->wait_for_resp) {
1405 ath10k_warn("unexpected: BMI data received; ignoring\n"); 1492 ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1679,7 +1766,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
1679static int ath10k_pci_ce_init(struct ath10k *ar) 1766static int ath10k_pci_ce_init(struct ath10k *ar)
1680{ 1767{
1681 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1768 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1682 struct hif_ce_pipe_info *pipe_info; 1769 struct ath10k_pci_pipe *pipe_info;
1683 const struct ce_attr *attr; 1770 const struct ce_attr *attr;
1684 int pipe_num; 1771 int pipe_num;
1685 1772
@@ -1895,7 +1982,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1895 1982
1896static void ath10k_pci_ce_tasklet(unsigned long ptr) 1983static void ath10k_pci_ce_tasklet(unsigned long ptr)
1897{ 1984{
1898 struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr; 1985 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1899 struct ath10k_pci *ar_pci = pipe->ar_pci; 1986 struct ath10k_pci *ar_pci = pipe->ar_pci;
1900 1987
1901 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); 1988 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -2212,18 +2299,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
2212 2299
2213static void ath10k_pci_device_reset(struct ath10k *ar) 2300static void ath10k_pci_device_reset(struct ath10k *ar)
2214{ 2301{
2215 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2216 void __iomem *mem = ar_pci->mem;
2217 int i; 2302 int i;
2218 u32 val; 2303 u32 val;
2219 2304
2220 if (!SOC_GLOBAL_RESET_ADDRESS) 2305 if (!SOC_GLOBAL_RESET_ADDRESS)
2221 return; 2306 return;
2222 2307
2223 if (!mem) 2308 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2224 return;
2225
2226 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2227 PCIE_SOC_WAKE_V_MASK); 2309 PCIE_SOC_WAKE_V_MASK);
2228 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 2310 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2229 if (ath10k_pci_target_is_awake(ar)) 2311 if (ath10k_pci_target_is_awake(ar))
@@ -2232,12 +2314,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
2232 } 2314 }
2233 2315
2234 /* Put Target, including PCIe, into RESET. */ 2316 /* Put Target, including PCIe, into RESET. */
2235 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); 2317 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2236 val |= 1; 2318 val |= 1;
2237 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); 2319 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2238 2320
2239 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 2321 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2240 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & 2322 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2241 RTC_STATE_COLD_RESET_MASK) 2323 RTC_STATE_COLD_RESET_MASK)
2242 break; 2324 break;
2243 msleep(1); 2325 msleep(1);
@@ -2245,16 +2327,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
2245 2327
2246 /* Pull Target, including PCIe, out of RESET. */ 2328 /* Pull Target, including PCIe, out of RESET. */
2247 val &= ~1; 2329 val &= ~1;
2248 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); 2330 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2249 2331
2250 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 2332 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2251 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & 2333 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2252 RTC_STATE_COLD_RESET_MASK)) 2334 RTC_STATE_COLD_RESET_MASK))
2253 break; 2335 break;
2254 msleep(1); 2336 msleep(1);
2255 } 2337 }
2256 2338
2257 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 2339 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2258} 2340}
2259 2341
2260static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) 2342static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2349,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2267 2349
2268 switch (i) { 2350 switch (i) {
2269 case ATH10K_PCI_FEATURE_MSI_X: 2351 case ATH10K_PCI_FEATURE_MSI_X:
2270 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); 2352 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2271 break;
2272 case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
2273 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2274 break; 2353 break;
2275 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: 2354 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2276 ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n"); 2355 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2277 break; 2356 break;
2278 } 2357 }
2279 } 2358 }
@@ -2286,7 +2365,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2286 int ret = 0; 2365 int ret = 0;
2287 struct ath10k *ar; 2366 struct ath10k *ar;
2288 struct ath10k_pci *ar_pci; 2367 struct ath10k_pci *ar_pci;
2289 u32 lcr_val; 2368 u32 lcr_val, chip_id;
2290 2369
2291 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 2370 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2292 2371
@@ -2298,9 +2377,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2298 ar_pci->dev = &pdev->dev; 2377 ar_pci->dev = &pdev->dev;
2299 2378
2300 switch (pci_dev->device) { 2379 switch (pci_dev->device) {
2301 case QCA988X_1_0_DEVICE_ID:
2302 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
2303 break;
2304 case QCA988X_2_0_DEVICE_ID: 2380 case QCA988X_2_0_DEVICE_ID:
2305 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); 2381 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2306 break; 2382 break;
@@ -2322,10 +2398,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2322 goto err_ar_pci; 2398 goto err_ar_pci;
2323 } 2399 }
2324 2400
2325 /* Enable QCA988X_1.0 HW workarounds */
2326 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
2327 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2328
2329 ar_pci->ar = ar; 2401 ar_pci->ar = ar;
2330 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; 2402 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2331 atomic_set(&ar_pci->keep_awake_count, 0); 2403 atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2395,9 +2467,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2395 2467
2396 spin_lock_init(&ar_pci->ce_lock); 2468 spin_lock_init(&ar_pci->ce_lock);
2397 2469
2398 ar_pci->cacheline_sz = dma_get_cache_alignment(); 2470 ret = ath10k_do_pci_wake(ar);
2471 if (ret) {
2472 ath10k_err("Failed to get chip id: %d\n", ret);
2473 return ret;
2474 }
2475
2476 chip_id = ath10k_pci_read32(ar,
2477 RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
2478
2479 ath10k_do_pci_sleep(ar);
2480
2481 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2399 2482
2400 ret = ath10k_core_register(ar); 2483 ret = ath10k_core_register(ar, chip_id);
2401 if (ret) { 2484 if (ret) {
2402 ath10k_err("could not register driver core (%d)\n", ret); 2485 ath10k_err("could not register driver core (%d)\n", ret);
2403 goto err_iomap; 2486 goto err_iomap;
@@ -2414,7 +2497,6 @@ err_region:
2414err_device: 2497err_device:
2415 pci_disable_device(pdev); 2498 pci_disable_device(pdev);
2416err_ar: 2499err_ar:
2417 pci_set_drvdata(pdev, NULL);
2418 ath10k_core_destroy(ar); 2500 ath10k_core_destroy(ar);
2419err_ar_pci: 2501err_ar_pci:
2420 /* call HIF PCI free here */ 2502 /* call HIF PCI free here */
@@ -2442,7 +2524,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2442 2524
2443 ath10k_core_unregister(ar); 2525 ath10k_core_unregister(ar);
2444 2526
2445 pci_set_drvdata(pdev, NULL);
2446 pci_iounmap(pdev, ar_pci->mem); 2527 pci_iounmap(pdev, ar_pci->mem);
2447 pci_release_region(pdev, BAR_NUM); 2528 pci_release_region(pdev, BAR_NUM);
2448 pci_clear_master(pdev); 2529 pci_clear_master(pdev);
@@ -2483,9 +2564,6 @@ module_exit(ath10k_pci_exit);
2483MODULE_AUTHOR("Qualcomm Atheros"); 2564MODULE_AUTHOR("Qualcomm Atheros");
2484MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); 2565MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2485MODULE_LICENSE("Dual BSD/GPL"); 2566MODULE_LICENSE("Dual BSD/GPL");
2486MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2487MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2488MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2489MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); 2567MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2490MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); 2568MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2491MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 2569MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 871bb339d56d..7c49f6f96f70 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,22 +43,23 @@ struct bmi_xfer {
43 u32 resp_len; 43 u32 resp_len;
44}; 44};
45 45
46enum ath10k_pci_compl_state {
47 ATH10K_PCI_COMPL_FREE = 0,
48 ATH10K_PCI_COMPL_SEND,
49 ATH10K_PCI_COMPL_RECV,
50};
51
46struct ath10k_pci_compl { 52struct ath10k_pci_compl {
47 struct list_head list; 53 struct list_head list;
48 int send_or_recv; 54 enum ath10k_pci_compl_state state;
49 struct ce_state *ce_state; 55 struct ath10k_ce_pipe *ce_state;
50 struct hif_ce_pipe_info *pipe_info; 56 struct ath10k_pci_pipe *pipe_info;
51 void *transfer_context; 57 struct sk_buff *skb;
52 unsigned int nbytes; 58 unsigned int nbytes;
53 unsigned int transfer_id; 59 unsigned int transfer_id;
54 unsigned int flags; 60 unsigned int flags;
55}; 61};
56 62
57/* compl_state.send_or_recv */
58#define HIF_CE_COMPLETE_FREE 0
59#define HIF_CE_COMPLETE_SEND 1
60#define HIF_CE_COMPLETE_RECV 2
61
62/* 63/*
63 * PCI-specific Target state 64 * PCI-specific Target state
64 * 65 *
@@ -152,17 +153,16 @@ struct service_to_pipe {
152 153
153enum ath10k_pci_features { 154enum ath10k_pci_features {
154 ATH10K_PCI_FEATURE_MSI_X = 0, 155 ATH10K_PCI_FEATURE_MSI_X = 0,
155 ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1, 156 ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
156 ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
157 157
158 /* keep last */ 158 /* keep last */
159 ATH10K_PCI_FEATURE_COUNT 159 ATH10K_PCI_FEATURE_COUNT
160}; 160};
161 161
162/* Per-pipe state. */ 162/* Per-pipe state. */
163struct hif_ce_pipe_info { 163struct ath10k_pci_pipe {
164 /* Handle of underlying Copy Engine */ 164 /* Handle of underlying Copy Engine */
165 struct ce_state *ce_hdl; 165 struct ath10k_ce_pipe *ce_hdl;
166 166
167 /* Our pipe number; facilitiates use of pipe_info ptrs. */ 167 /* Our pipe number; facilitiates use of pipe_info ptrs. */
168 u8 pipe_num; 168 u8 pipe_num;
@@ -190,7 +190,6 @@ struct ath10k_pci {
190 struct device *dev; 190 struct device *dev;
191 struct ath10k *ar; 191 struct ath10k *ar;
192 void __iomem *mem; 192 void __iomem *mem;
193 int cacheline_sz;
194 193
195 DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); 194 DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
196 195
@@ -219,7 +218,7 @@ struct ath10k_pci {
219 218
220 bool compl_processing; 219 bool compl_processing;
221 220
222 struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX]; 221 struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
223 222
224 struct ath10k_hif_cb msg_callbacks_current; 223 struct ath10k_hif_cb msg_callbacks_current;
225 224
@@ -227,16 +226,13 @@ struct ath10k_pci {
227 u32 fw_indicator_address; 226 u32 fw_indicator_address;
228 227
229 /* Copy Engine used for Diagnostic Accesses */ 228 /* Copy Engine used for Diagnostic Accesses */
230 struct ce_state *ce_diag; 229 struct ath10k_ce_pipe *ce_diag;
231 230
232 /* FIXME: document what this really protects */ 231 /* FIXME: document what this really protects */
233 spinlock_t ce_lock; 232 spinlock_t ce_lock;
234 233
235 /* Map CE id to ce_state */ 234 /* Map CE id to ce_state */
236 struct ce_state *ce_id_to_state[CE_COUNT_MAX]; 235 struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
237
238 /* makes sure that dummy reads are atomic */
239 spinlock_t hw_v1_workaround_lock;
240}; 236};
241 237
242static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) 238static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -244,14 +240,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
244 return ar->hif.priv; 240 return ar->hif.priv;
245} 241}
246 242
247static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr) 243static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
248{ 244{
249 return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr); 245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
246
247 return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
250} 248}
251 249
252static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val) 250static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
253{ 251{
254 iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr); 252 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
253
254 iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
255} 255}
256 256
257#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ 257#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
@@ -310,23 +310,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
310 u32 value) 310 u32 value)
311{ 311{
312 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 312 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
313 void __iomem *addr = ar_pci->mem;
314
315 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
316 unsigned long irq_flags;
317 313
318 spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); 314 iowrite32(value, ar_pci->mem + offset);
319
320 ioread32(addr+offset+4); /* 3rd read prior to write */
321 ioread32(addr+offset+4); /* 2nd read prior to write */
322 ioread32(addr+offset+4); /* 1st read prior to write */
323 iowrite32(value, addr+offset);
324
325 spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
326 irq_flags);
327 } else {
328 iowrite32(value, addr+offset);
329 }
330} 315}
331 316
332static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 317static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
@@ -336,15 +321,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
336 return ioread32(ar_pci->mem + offset); 321 return ioread32(ar_pci->mem + offset);
337} 322}
338 323
339void ath10k_do_pci_wake(struct ath10k *ar); 324int ath10k_do_pci_wake(struct ath10k *ar);
340void ath10k_do_pci_sleep(struct ath10k *ar); 325void ath10k_do_pci_sleep(struct ath10k *ar);
341 326
342static inline void ath10k_pci_wake(struct ath10k *ar) 327static inline int ath10k_pci_wake(struct ath10k *ar)
343{ 328{
344 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 329 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
345 330
346 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 331 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
347 ath10k_do_pci_wake(ar); 332 return ath10k_do_pci_wake(ar);
333
334 return 0;
348} 335}
349 336
350static inline void ath10k_pci_sleep(struct ath10k *ar) 337static inline void ath10k_pci_sleep(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index bfec6c8f2ecb..1c584c4b019c 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,10 +422,30 @@ struct rx_mpdu_end {
422#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) 422#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
423#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) 423#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
424 424
425/* The decapped header (rx_hdr_status) contains the following:
426 * a) 802.11 header
427 * [padding to 4 bytes]
428 * b) HW crypto parameter
429 * - 0 bytes for no security
430 * - 4 bytes for WEP
431 * - 8 bytes for TKIP, AES
432 * [padding to 4 bytes]
433 * c) A-MSDU subframe header (14 bytes) if appliable
434 * d) LLC/SNAP (RFC1042, 8 bytes)
435 *
436 * In case of A-MSDU only first frame in sequence contains (a) and (b). */
425enum rx_msdu_decap_format { 437enum rx_msdu_decap_format {
426 RX_MSDU_DECAP_RAW = 0, 438 RX_MSDU_DECAP_RAW = 0,
427 RX_MSDU_DECAP_NATIVE_WIFI = 1, 439
440 /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
441 * htt_rx_desc contains the original decapped 802.11 header. */
442 RX_MSDU_DECAP_NATIVE_WIFI = 1,
443
444 /* Payload contains an ethernet header (struct ethhdr). */
428 RX_MSDU_DECAP_ETHERNET2_DIX = 2, 445 RX_MSDU_DECAP_ETHERNET2_DIX = 2,
446
447 /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
448 * total), followed by an RFC1042 header (8 bytes). */
429 RX_MSDU_DECAP_8023_SNAP_LLC = 3 449 RX_MSDU_DECAP_8023_SNAP_LLC = 3
430}; 450};
431 451
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 85e806bf7257..90817ddc92ba 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
111); 111);
112 112
113TRACE_EVENT(ath10k_wmi_cmd, 113TRACE_EVENT(ath10k_wmi_cmd,
114 TP_PROTO(int id, void *buf, size_t buf_len), 114 TP_PROTO(int id, void *buf, size_t buf_len, int ret),
115 115
116 TP_ARGS(id, buf, buf_len), 116 TP_ARGS(id, buf, buf_len, ret),
117 117
118 TP_STRUCT__entry( 118 TP_STRUCT__entry(
119 __field(unsigned int, id) 119 __field(unsigned int, id)
120 __field(size_t, buf_len) 120 __field(size_t, buf_len)
121 __dynamic_array(u8, buf, buf_len) 121 __dynamic_array(u8, buf, buf_len)
122 __field(int, ret)
122 ), 123 ),
123 124
124 TP_fast_assign( 125 TP_fast_assign(
125 __entry->id = id; 126 __entry->id = id;
126 __entry->buf_len = buf_len; 127 __entry->buf_len = buf_len;
128 __entry->ret = ret;
127 memcpy(__get_dynamic_array(buf), buf, buf_len); 129 memcpy(__get_dynamic_array(buf), buf, buf_len);
128 ), 130 ),
129 131
130 TP_printk( 132 TP_printk(
131 "id %d len %zu", 133 "id %d len %zu ret %d",
132 __entry->id, 134 __entry->id,
133 __entry->buf_len 135 __entry->buf_len,
136 __entry->ret
134 ) 137 )
135); 138);
136 139
@@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
158 ) 161 )
159); 162);
160 163
164TRACE_EVENT(ath10k_htt_stats,
165 TP_PROTO(void *buf, size_t buf_len),
166
167 TP_ARGS(buf, buf_len),
168
169 TP_STRUCT__entry(
170 __field(size_t, buf_len)
171 __dynamic_array(u8, buf, buf_len)
172 ),
173
174 TP_fast_assign(
175 __entry->buf_len = buf_len;
176 memcpy(__get_dynamic_array(buf), buf, buf_len);
177 ),
178
179 TP_printk(
180 "len %zu",
181 __entry->buf_len
182 )
183);
184
161#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ 185#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
162 186
163/* we don't want to use include/trace/events */ 187/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 68b6faefd1d8..5ae373a1e294 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -44,40 +44,39 @@ out:
44 spin_unlock_bh(&ar->data_lock); 44 spin_unlock_bh(&ar->data_lock);
45} 45}
46 46
47void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc) 47void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
48 const struct htt_tx_done *tx_done)
48{ 49{
49 struct device *dev = htt->ar->dev; 50 struct device *dev = htt->ar->dev;
50 struct ieee80211_tx_info *info; 51 struct ieee80211_tx_info *info;
51 struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag; 52 struct ath10k_skb_cb *skb_cb;
52 struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu; 53 struct sk_buff *msdu;
53 int ret; 54 int ret;
54 55
55 if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0) 56 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
56 return; 57 tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
57
58 ATH10K_SKB_CB(txdesc)->htt.refcount--;
59 58
60 if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) 59 if (tx_done->msdu_id >= htt->max_num_pending_tx) {
60 ath10k_warn("warning: msdu_id %d too big, ignoring\n",
61 tx_done->msdu_id);
61 return; 62 return;
62
63 if (txfrag) {
64 ret = ath10k_skb_unmap(dev, txfrag);
65 if (ret)
66 ath10k_warn("txfrag unmap failed (%d)\n", ret);
67
68 dev_kfree_skb_any(txfrag);
69 } 63 }
70 64
65 msdu = htt->pending_tx[tx_done->msdu_id];
66 skb_cb = ATH10K_SKB_CB(msdu);
67
71 ret = ath10k_skb_unmap(dev, msdu); 68 ret = ath10k_skb_unmap(dev, msdu);
72 if (ret) 69 if (ret)
73 ath10k_warn("data skb unmap failed (%d)\n", ret); 70 ath10k_warn("data skb unmap failed (%d)\n", ret);
74 71
72 if (skb_cb->htt.frag_len)
73 skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
74
75 ath10k_report_offchan_tx(htt->ar, msdu); 75 ath10k_report_offchan_tx(htt->ar, msdu);
76 76
77 info = IEEE80211_SKB_CB(msdu); 77 info = IEEE80211_SKB_CB(msdu);
78 memset(&info->status, 0, sizeof(info->status));
79 78
80 if (ATH10K_SKB_CB(txdesc)->htt.discard) { 79 if (tx_done->discard) {
81 ieee80211_free_txskb(htt->ar->hw, msdu); 80 ieee80211_free_txskb(htt->ar->hw, msdu);
82 goto exit; 81 goto exit;
83 } 82 }
@@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
85 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 84 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
86 info->flags |= IEEE80211_TX_STAT_ACK; 85 info->flags |= IEEE80211_TX_STAT_ACK;
87 86
88 if (ATH10K_SKB_CB(txdesc)->htt.no_ack) 87 if (tx_done->no_ack)
89 info->flags &= ~IEEE80211_TX_STAT_ACK; 88 info->flags &= ~IEEE80211_TX_STAT_ACK;
90 89
91 ieee80211_tx_status(htt->ar->hw, msdu); 90 ieee80211_tx_status(htt->ar->hw, msdu);
@@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
93 92
94exit: 93exit:
95 spin_lock_bh(&htt->tx_lock); 94 spin_lock_bh(&htt->tx_lock);
96 htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL; 95 htt->pending_tx[tx_done->msdu_id] = NULL;
97 ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id); 96 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
98 __ath10k_htt_tx_dec_pending(htt); 97 __ath10k_htt_tx_dec_pending(htt);
99 if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx)) 98 if (htt->num_pending_tx == 0)
100 wake_up(&htt->empty_tx_wq); 99 wake_up(&htt->empty_tx_wq);
101 spin_unlock_bh(&htt->tx_lock); 100 spin_unlock_bh(&htt->tx_lock);
102
103 dev_kfree_skb_any(txdesc);
104}
105
106void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
107 const struct htt_tx_done *tx_done)
108{
109 struct sk_buff *txdesc;
110
111 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
112 tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
113
114 if (tx_done->msdu_id >= htt->max_num_pending_tx) {
115 ath10k_warn("warning: msdu_id %d too big, ignoring\n",
116 tx_done->msdu_id);
117 return;
118 }
119
120 txdesc = htt->pending_tx[tx_done->msdu_id];
121
122 ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
123 ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
124
125 ath10k_txrx_tx_unref(htt, txdesc);
126} 101}
127 102
128static const u8 rx_legacy_rate_idx[] = { 103static const u8 rx_legacy_rate_idx[] = {
@@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
293 status->vht_nss, 268 status->vht_nss,
294 status->freq, 269 status->freq,
295 status->band); 270 status->band);
271 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
272 info->skb->data, info->skb->len);
296 273
297 ieee80211_rx(ar->hw, info->skb); 274 ieee80211_rx(ar->hw, info->skb);
298} 275}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e78632a76df7..356dc9c04c9e 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,9 +19,8 @@
19 19
20#include "htt.h" 20#include "htt.h"
21 21
22void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc); 22void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
23void ath10k_txrx_tx_completed(struct ath10k_htt *htt, 23 const struct htt_tx_done *tx_done);
24 const struct htt_tx_done *tx_done);
25void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info); 24void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
26 25
27struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, 26struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 55f90c761868..33cb19eb3d89 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -23,30 +23,6 @@
23#include "wmi.h" 23#include "wmi.h"
24#include "mac.h" 24#include "mac.h"
25 25
26void ath10k_wmi_flush_tx(struct ath10k *ar)
27{
28 int ret;
29
30 lockdep_assert_held(&ar->conf_mutex);
31
32 if (ar->state == ATH10K_STATE_WEDGED) {
33 ath10k_warn("wmi flush skipped - device is wedged anyway\n");
34 return;
35 }
36
37 ret = wait_event_timeout(ar->wmi.wq,
38 atomic_read(&ar->wmi.pending_tx_count) == 0,
39 5*HZ);
40 if (atomic_read(&ar->wmi.pending_tx_count) == 0)
41 return;
42
43 if (ret == 0)
44 ret = -ETIMEDOUT;
45
46 if (ret < 0)
47 ath10k_warn("wmi flush failed (%d)\n", ret);
48}
49
50int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) 26int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
51{ 27{
52 int ret; 28 int ret;
@@ -85,18 +61,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
85static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 61static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
86{ 62{
87 dev_kfree_skb(skb); 63 dev_kfree_skb(skb);
88
89 if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
90 wake_up(&ar->wmi.wq);
91} 64}
92 65
93/* WMI command API */ 66static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
94static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, 67 enum wmi_cmd_id cmd_id)
95 enum wmi_cmd_id cmd_id)
96{ 68{
97 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 69 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
98 struct wmi_cmd_hdr *cmd_hdr; 70 struct wmi_cmd_hdr *cmd_hdr;
99 int status; 71 int ret;
100 u32 cmd = 0; 72 u32 cmd = 0;
101 73
102 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 74 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
@@ -107,25 +79,87 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
107 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 79 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
108 cmd_hdr->cmd_id = __cpu_to_le32(cmd); 80 cmd_hdr->cmd_id = __cpu_to_le32(cmd);
109 81
110 if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
111 WMI_MAX_PENDING_TX_COUNT) {
112 /* avoid using up memory when FW hangs */
113 atomic_dec(&ar->wmi.pending_tx_count);
114 return -EBUSY;
115 }
116
117 memset(skb_cb, 0, sizeof(*skb_cb)); 82 memset(skb_cb, 0, sizeof(*skb_cb));
83 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
84 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
118 85
119 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); 86 if (ret)
87 goto err_pull;
120 88
121 status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); 89 return 0;
122 if (status) { 90
91err_pull:
92 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
93 return ret;
94}
95
96static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
97{
98 struct wmi_bcn_tx_arg arg = {0};
99 int ret;
100
101 lockdep_assert_held(&arvif->ar->data_lock);
102
103 if (arvif->beacon == NULL)
104 return;
105
106 arg.vdev_id = arvif->vdev_id;
107 arg.tx_rate = 0;
108 arg.tx_power = 0;
109 arg.bcn = arvif->beacon->data;
110 arg.bcn_len = arvif->beacon->len;
111
112 ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
113 if (ret)
114 return;
115
116 dev_kfree_skb_any(arvif->beacon);
117 arvif->beacon = NULL;
118}
119
120static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
121 struct ieee80211_vif *vif)
122{
123 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
124
125 ath10k_wmi_tx_beacon_nowait(arvif);
126}
127
128static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
129{
130 spin_lock_bh(&ar->data_lock);
131 ieee80211_iterate_active_interfaces_atomic(ar->hw,
132 IEEE80211_IFACE_ITER_NORMAL,
133 ath10k_wmi_tx_beacons_iter,
134 NULL);
135 spin_unlock_bh(&ar->data_lock);
136}
137
138static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
139{
140 /* try to send pending beacons first. they take priority */
141 ath10k_wmi_tx_beacons_nowait(ar);
142
143 wake_up(&ar->wmi.tx_credits_wq);
144}
145
146static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
147 enum wmi_cmd_id cmd_id)
148{
149 int ret = -EINVAL;
150
151 wait_event_timeout(ar->wmi.tx_credits_wq, ({
152 /* try to send pending beacons first. they take priority */
153 ath10k_wmi_tx_beacons_nowait(ar);
154
155 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
156 (ret != -EAGAIN);
157 }), 3*HZ);
158
159 if (ret)
123 dev_kfree_skb_any(skb); 160 dev_kfree_skb_any(skb);
124 atomic_dec(&ar->wmi.pending_tx_count);
125 return status;
126 }
127 161
128 return 0; 162 return ret;
129} 163}
130 164
131static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) 165static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
@@ -315,7 +349,9 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
315 349
316static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) 350static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
317{ 351{
318 struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data; 352 struct wmi_mgmt_rx_event_v1 *ev_v1;
353 struct wmi_mgmt_rx_event_v2 *ev_v2;
354 struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
319 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 355 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
320 struct ieee80211_hdr *hdr; 356 struct ieee80211_hdr *hdr;
321 u32 rx_status; 357 u32 rx_status;
@@ -325,13 +361,24 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
325 u32 rate; 361 u32 rate;
326 u32 buf_len; 362 u32 buf_len;
327 u16 fc; 363 u16 fc;
364 int pull_len;
365
366 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
367 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
368 ev_hdr = &ev_v2->hdr.v1;
369 pull_len = sizeof(*ev_v2);
370 } else {
371 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
372 ev_hdr = &ev_v1->hdr;
373 pull_len = sizeof(*ev_v1);
374 }
328 375
329 channel = __le32_to_cpu(event->hdr.channel); 376 channel = __le32_to_cpu(ev_hdr->channel);
330 buf_len = __le32_to_cpu(event->hdr.buf_len); 377 buf_len = __le32_to_cpu(ev_hdr->buf_len);
331 rx_status = __le32_to_cpu(event->hdr.status); 378 rx_status = __le32_to_cpu(ev_hdr->status);
332 snr = __le32_to_cpu(event->hdr.snr); 379 snr = __le32_to_cpu(ev_hdr->snr);
333 phy_mode = __le32_to_cpu(event->hdr.phy_mode); 380 phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
334 rate = __le32_to_cpu(event->hdr.rate); 381 rate = __le32_to_cpu(ev_hdr->rate);
335 382
336 memset(status, 0, sizeof(*status)); 383 memset(status, 0, sizeof(*status));
337 384
@@ -358,7 +405,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
358 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; 405 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
359 status->rate_idx = get_rate_idx(rate, status->band); 406 status->rate_idx = get_rate_idx(rate, status->band);
360 407
361 skb_pull(skb, sizeof(event->hdr)); 408 skb_pull(skb, pull_len);
362 409
363 hdr = (struct ieee80211_hdr *)skb->data; 410 hdr = (struct ieee80211_hdr *)skb->data;
364 fc = le16_to_cpu(hdr->frame_control); 411 fc = le16_to_cpu(hdr->frame_control);
@@ -734,10 +781,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
734 int i = -1; 781 int i = -1;
735 struct wmi_bcn_info *bcn_info; 782 struct wmi_bcn_info *bcn_info;
736 struct ath10k_vif *arvif; 783 struct ath10k_vif *arvif;
737 struct wmi_bcn_tx_arg arg;
738 struct sk_buff *bcn; 784 struct sk_buff *bcn;
739 int vdev_id = 0; 785 int vdev_id = 0;
740 int ret;
741 786
742 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); 787 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
743 788
@@ -794,17 +839,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
794 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); 839 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
795 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); 840 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
796 841
797 arg.vdev_id = arvif->vdev_id; 842 spin_lock_bh(&ar->data_lock);
798 arg.tx_rate = 0; 843 if (arvif->beacon) {
799 arg.tx_power = 0; 844 ath10k_warn("SWBA overrun on vdev %d\n",
800 arg.bcn = bcn->data; 845 arvif->vdev_id);
801 arg.bcn_len = bcn->len; 846 dev_kfree_skb_any(arvif->beacon);
847 }
802 848
803 ret = ath10k_wmi_beacon_send(ar, &arg); 849 arvif->beacon = bcn;
804 if (ret)
805 ath10k_warn("could not send beacon (%d)\n", ret);
806 850
807 dev_kfree_skb_any(bcn); 851 ath10k_wmi_tx_beacon_nowait(arvif);
852 spin_unlock_bh(&ar->data_lock);
808 } 853 }
809} 854}
810 855
@@ -943,6 +988,9 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
943 ar->phy_capability = __le32_to_cpu(ev->phy_capability); 988 ar->phy_capability = __le32_to_cpu(ev->phy_capability);
944 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 989 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
945 990
991 if (ar->fw_version_build > 636)
992 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
993
946 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 994 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
947 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 995 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
948 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 996 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
@@ -1007,7 +1055,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
1007 return 0; 1055 return 0;
1008} 1056}
1009 1057
1010static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) 1058static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
1011{ 1059{
1012 struct wmi_cmd_hdr *cmd_hdr; 1060 struct wmi_cmd_hdr *cmd_hdr;
1013 enum wmi_event_id id; 1061 enum wmi_event_id id;
@@ -1126,64 +1174,18 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
1126 dev_kfree_skb(skb); 1174 dev_kfree_skb(skb);
1127} 1175}
1128 1176
1129static void ath10k_wmi_event_work(struct work_struct *work)
1130{
1131 struct ath10k *ar = container_of(work, struct ath10k,
1132 wmi.wmi_event_work);
1133 struct sk_buff *skb;
1134
1135 for (;;) {
1136 skb = skb_dequeue(&ar->wmi.wmi_event_list);
1137 if (!skb)
1138 break;
1139
1140 ath10k_wmi_event_process(ar, skb);
1141 }
1142}
1143
1144static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
1145{
1146 struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1147 enum wmi_event_id event_id;
1148
1149 event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
1150
1151 /* some events require to be handled ASAP
1152 * thus can't be defered to a worker thread */
1153 switch (event_id) {
1154 case WMI_HOST_SWBA_EVENTID:
1155 case WMI_MGMT_RX_EVENTID:
1156 ath10k_wmi_event_process(ar, skb);
1157 return;
1158 default:
1159 break;
1160 }
1161
1162 skb_queue_tail(&ar->wmi.wmi_event_list, skb);
1163 queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
1164}
1165
1166/* WMI Initialization functions */ 1177/* WMI Initialization functions */
1167int ath10k_wmi_attach(struct ath10k *ar) 1178int ath10k_wmi_attach(struct ath10k *ar)
1168{ 1179{
1169 init_completion(&ar->wmi.service_ready); 1180 init_completion(&ar->wmi.service_ready);
1170 init_completion(&ar->wmi.unified_ready); 1181 init_completion(&ar->wmi.unified_ready);
1171 init_waitqueue_head(&ar->wmi.wq); 1182 init_waitqueue_head(&ar->wmi.tx_credits_wq);
1172
1173 skb_queue_head_init(&ar->wmi.wmi_event_list);
1174 INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
1175 1183
1176 return 0; 1184 return 0;
1177} 1185}
1178 1186
1179void ath10k_wmi_detach(struct ath10k *ar) 1187void ath10k_wmi_detach(struct ath10k *ar)
1180{ 1188{
1181 /* HTC should've drained the packets already */
1182 if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
1183 ath10k_warn("there are still pending packets\n");
1184
1185 cancel_work_sync(&ar->wmi.wmi_event_work);
1186 skb_queue_purge(&ar->wmi.wmi_event_list);
1187} 1189}
1188 1190
1189int ath10k_wmi_connect_htc_service(struct ath10k *ar) 1191int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@@ -1198,6 +1200,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
1198 /* these fields are the same for all service endpoints */ 1200 /* these fields are the same for all service endpoints */
1199 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; 1201 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
1200 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; 1202 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
1203 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
1201 1204
1202 /* connect to control service */ 1205 /* connect to control service */
1203 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; 1206 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
@@ -1758,7 +1761,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1758 cmd = (struct wmi_vdev_up_cmd *)skb->data; 1761 cmd = (struct wmi_vdev_up_cmd *)skb->data;
1759 cmd->vdev_id = __cpu_to_le32(vdev_id); 1762 cmd->vdev_id = __cpu_to_le32(vdev_id);
1760 cmd->vdev_assoc_id = __cpu_to_le32(aid); 1763 cmd->vdev_assoc_id = __cpu_to_le32(aid);
1761 memcpy(&cmd->vdev_bssid.addr, bssid, 6); 1764 memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
1762 1765
1763 ath10k_dbg(ATH10K_DBG_WMI, 1766 ath10k_dbg(ATH10K_DBG_WMI,
1764 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 1767 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
@@ -1918,7 +1921,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
1918 cmd->vdev_id = __cpu_to_le32(vdev_id); 1921 cmd->vdev_id = __cpu_to_le32(vdev_id);
1919 cmd->param_id = __cpu_to_le32(param_id); 1922 cmd->param_id = __cpu_to_le32(param_id);
1920 cmd->param_value = __cpu_to_le32(param_value); 1923 cmd->param_value = __cpu_to_le32(param_value);
1921 memcpy(&cmd->peer_macaddr.addr, peer_addr, 6); 1924 memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1922 1925
1923 ath10k_dbg(ATH10K_DBG_WMI, 1926 ath10k_dbg(ATH10K_DBG_WMI,
1924 "wmi vdev %d peer 0x%pM set param %d value %d\n", 1927 "wmi vdev %d peer 0x%pM set param %d value %d\n",
@@ -2108,7 +2111,8 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
2108 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); 2111 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
2109} 2112}
2110 2113
2111int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg) 2114int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
2115 const struct wmi_bcn_tx_arg *arg)
2112{ 2116{
2113 struct wmi_bcn_tx_cmd *cmd; 2117 struct wmi_bcn_tx_cmd *cmd;
2114 struct sk_buff *skb; 2118 struct sk_buff *skb;
@@ -2124,7 +2128,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
2124 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); 2128 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
2125 memcpy(cmd->bcn, arg->bcn, arg->bcn_len); 2129 memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
2126 2130
2127 return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID); 2131 return ath10k_wmi_cmd_send_nowait(ar, skb, WMI_BCN_TX_CMDID);
2128} 2132}
2129 2133
2130static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, 2134static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2c5a4f8daf2e..2c52c23107dd 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -508,6 +508,48 @@ enum wmi_phy_mode {
508 MODE_MAX = 14 508 MODE_MAX = 14
509}; 509};
510 510
511static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
512{
513 switch (mode) {
514 case MODE_11A:
515 return "11a";
516 case MODE_11G:
517 return "11g";
518 case MODE_11B:
519 return "11b";
520 case MODE_11GONLY:
521 return "11gonly";
522 case MODE_11NA_HT20:
523 return "11na-ht20";
524 case MODE_11NG_HT20:
525 return "11ng-ht20";
526 case MODE_11NA_HT40:
527 return "11na-ht40";
528 case MODE_11NG_HT40:
529 return "11ng-ht40";
530 case MODE_11AC_VHT20:
531 return "11ac-vht20";
532 case MODE_11AC_VHT40:
533 return "11ac-vht40";
534 case MODE_11AC_VHT80:
535 return "11ac-vht80";
536 case MODE_11AC_VHT20_2G:
537 return "11ac-vht20-2g";
538 case MODE_11AC_VHT40_2G:
539 return "11ac-vht40-2g";
540 case MODE_11AC_VHT80_2G:
541 return "11ac-vht80-2g";
542 case MODE_UNKNOWN:
543 /* skip */
544 break;
545
546 /* no default handler to allow compiler to check that the
547 * enum is fully handled */
548 };
549
550 return "<unknown>";
551}
552
511#define WMI_CHAN_LIST_TAG 0x1 553#define WMI_CHAN_LIST_TAG 0x1
512#define WMI_SSID_LIST_TAG 0x2 554#define WMI_SSID_LIST_TAG 0x2
513#define WMI_BSSID_LIST_TAG 0x3 555#define WMI_BSSID_LIST_TAG 0x3
@@ -763,14 +805,6 @@ struct wmi_service_ready_event {
763 struct wlan_host_mem_req mem_reqs[1]; 805 struct wlan_host_mem_req mem_reqs[1];
764} __packed; 806} __packed;
765 807
766/*
767 * status consists of upper 16 bits fo int status and lower 16 bits of
768 * module ID that retuned status
769 */
770#define WLAN_INIT_STATUS_SUCCESS 0x0
771#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
772#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
773
774#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) 808#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
775#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) 809#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
776 810
@@ -1268,7 +1302,7 @@ struct wmi_scan_event {
1268 * good idea to pass all the fields in the RX status 1302 * good idea to pass all the fields in the RX status
1269 * descriptor up to the host. 1303 * descriptor up to the host.
1270 */ 1304 */
1271struct wmi_mgmt_rx_hdr { 1305struct wmi_mgmt_rx_hdr_v1 {
1272 __le32 channel; 1306 __le32 channel;
1273 __le32 snr; 1307 __le32 snr;
1274 __le32 rate; 1308 __le32 rate;
@@ -1277,8 +1311,18 @@ struct wmi_mgmt_rx_hdr {
1277 __le32 status; /* %WMI_RX_STATUS_ */ 1311 __le32 status; /* %WMI_RX_STATUS_ */
1278} __packed; 1312} __packed;
1279 1313
1280struct wmi_mgmt_rx_event { 1314struct wmi_mgmt_rx_hdr_v2 {
1281 struct wmi_mgmt_rx_hdr hdr; 1315 struct wmi_mgmt_rx_hdr_v1 v1;
1316 __le32 rssi_ctl[4];
1317} __packed;
1318
1319struct wmi_mgmt_rx_event_v1 {
1320 struct wmi_mgmt_rx_hdr_v1 hdr;
1321 u8 buf[0];
1322} __packed;
1323
1324struct wmi_mgmt_rx_event_v2 {
1325 struct wmi_mgmt_rx_hdr_v2 hdr;
1282 u8 buf[0]; 1326 u8 buf[0];
1283} __packed; 1327} __packed;
1284 1328
@@ -3000,7 +3044,6 @@ struct wmi_force_fw_hang_cmd {
3000 3044
3001#define WMI_MAX_EVENT 0x1000 3045#define WMI_MAX_EVENT 0x1000
3002/* Maximum number of pending TXed WMI packets */ 3046/* Maximum number of pending TXed WMI packets */
3003#define WMI_MAX_PENDING_TX_COUNT 128
3004#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr) 3047#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
3005 3048
3006/* By default disable power save for IBSS */ 3049/* By default disable power save for IBSS */
@@ -3013,7 +3056,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
3013void ath10k_wmi_detach(struct ath10k *ar); 3056void ath10k_wmi_detach(struct ath10k *ar);
3014int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); 3057int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
3015int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); 3058int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
3016void ath10k_wmi_flush_tx(struct ath10k *ar);
3017 3059
3018int ath10k_wmi_connect_htc_service(struct ath10k *ar); 3060int ath10k_wmi_connect_htc_service(struct ath10k *ar);
3019int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 3061int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -3066,7 +3108,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
3066 enum wmi_ap_ps_peer_param param_id, u32 value); 3108 enum wmi_ap_ps_peer_param param_id, u32 value);
3067int ath10k_wmi_scan_chan_list(struct ath10k *ar, 3109int ath10k_wmi_scan_chan_list(struct ath10k *ar,
3068 const struct wmi_scan_chan_list_arg *arg); 3110 const struct wmi_scan_chan_list_arg *arg);
3069int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg); 3111int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
3112 const struct wmi_bcn_tx_arg *arg);
3070int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 3113int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
3071 const struct wmi_pdev_set_wmm_params_arg *arg); 3114 const struct wmi_pdev_set_wmm_params_arg *arg);
3072int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); 3115int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e9bc9e616b69..79bffe165cab 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -37,12 +37,9 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
37{ 37{
38 struct ath5k_hw *ah = common->priv; 38 struct ath5k_hw *ah = common->priv;
39 struct platform_device *pdev = to_platform_device(ah->dev); 39 struct platform_device *pdev = to_platform_device(ah->dev);
40 struct ar231x_board_config *bcfg = pdev->dev.platform_data; 40 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
41 u16 *eeprom, *eeprom_end; 41 u16 *eeprom, *eeprom_end;
42 42
43
44
45 bcfg = pdev->dev.platform_data;
46 eeprom = (u16 *) bcfg->radio; 43 eeprom = (u16 *) bcfg->radio;
47 eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ; 44 eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
48 45
@@ -57,7 +54,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
57int ath5k_hw_read_srev(struct ath5k_hw *ah) 54int ath5k_hw_read_srev(struct ath5k_hw *ah)
58{ 55{
59 struct platform_device *pdev = to_platform_device(ah->dev); 56 struct platform_device *pdev = to_platform_device(ah->dev);
60 struct ar231x_board_config *bcfg = pdev->dev.platform_data; 57 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
61 ah->ah_mac_srev = bcfg->devid; 58 ah->ah_mac_srev = bcfg->devid;
62 return 0; 59 return 0;
63} 60}
@@ -65,7 +62,7 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
65static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) 62static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
66{ 63{
67 struct platform_device *pdev = to_platform_device(ah->dev); 64 struct platform_device *pdev = to_platform_device(ah->dev);
68 struct ar231x_board_config *bcfg = pdev->dev.platform_data; 65 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
69 u8 *cfg_mac; 66 u8 *cfg_mac;
70 67
71 if (to_platform_device(ah->dev)->id == 0) 68 if (to_platform_device(ah->dev)->id == 0)
@@ -87,7 +84,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
87/*Initialization*/ 84/*Initialization*/
88static int ath_ahb_probe(struct platform_device *pdev) 85static int ath_ahb_probe(struct platform_device *pdev)
89{ 86{
90 struct ar231x_board_config *bcfg = pdev->dev.platform_data; 87 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
91 struct ath5k_hw *ah; 88 struct ath5k_hw *ah;
92 struct ieee80211_hw *hw; 89 struct ieee80211_hw *hw;
93 struct resource *res; 90 struct resource *res;
@@ -96,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
96 int ret = 0; 93 int ret = 0;
97 u32 reg; 94 u32 reg;
98 95
99 if (!pdev->dev.platform_data) { 96 if (!dev_get_platdata(&pdev->dev)) {
100 dev_err(&pdev->dev, "no platform data specified\n"); 97 dev_err(&pdev->dev, "no platform data specified\n");
101 ret = -EINVAL; 98 ret = -EINVAL;
102 goto err_out; 99 goto err_out;
@@ -193,7 +190,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
193 190
194static int ath_ahb_remove(struct platform_device *pdev) 191static int ath_ahb_remove(struct platform_device *pdev)
195{ 192{
196 struct ar231x_board_config *bcfg = pdev->dev.platform_data; 193 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
197 struct ieee80211_hw *hw = platform_get_drvdata(pdev); 194 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
198 struct ath5k_hw *ah; 195 struct ath5k_hw *ah;
199 u32 reg; 196 u32 reg;
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index 98a886154d9c..05debf700a84 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,8 +22,7 @@
22 22
23#define ATH6KL_MAX_IE 256 23#define ATH6KL_MAX_IE 256
24 24
25extern __printf(2, 3) 25__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
26int ath6kl_printk(const char *level, const char *fmt, ...);
27 26
28/* 27/*
29 * Reflects the version of binary interface exposed by ATH6KL target 28 * Reflects the version of binary interface exposed by ATH6KL target
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 74369de00fb5..ca9ba005f287 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -50,11 +50,10 @@ enum ATH6K_DEBUG_MASK {
50}; 50};
51 51
52extern unsigned int debug_mask; 52extern unsigned int debug_mask;
53extern __printf(2, 3) 53__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
54int ath6kl_printk(const char *level, const char *fmt, ...); 54__printf(1, 2) int ath6kl_info(const char *fmt, ...);
55extern __printf(1, 2) int ath6kl_info(const char *fmt, ...); 55__printf(1, 2) int ath6kl_err(const char *fmt, ...);
56extern __printf(1, 2) int ath6kl_err(const char *fmt, ...); 56__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
57extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
58 57
59enum ath6kl_war { 58enum ath6kl_war {
60 ATH6KL_WAR_INVALID_RATE, 59 ATH6KL_WAR_INVALID_RATE,
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 072e4b531067..2dff2765769b 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
54 struct platform_device *pdev = to_platform_device(sc->dev); 54 struct platform_device *pdev = to_platform_device(sc->dev);
55 struct ath9k_platform_data *pdata; 55 struct ath9k_platform_data *pdata;
56 56
57 pdata = (struct ath9k_platform_data *) pdev->dev.platform_data; 57 pdata = dev_get_platdata(&pdev->dev);
58 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { 58 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
59 ath_err(common, 59 ath_err(common,
60 "%s: flash read failed, offset %08x is out of range\n", 60 "%s: flash read failed, offset %08x is out of range\n",
@@ -84,7 +84,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
84 struct ath_hw *ah; 84 struct ath_hw *ah;
85 char hw_name[64]; 85 char hw_name[64];
86 86
87 if (!pdev->dev.platform_data) { 87 if (!dev_get_platdata(&pdev->dev)) {
88 dev_err(&pdev->dev, "no platform data specified\n"); 88 dev_err(&pdev->dev, "no platform data specified\n");
89 return -EINVAL; 89 return -EINVAL;
90 } 90 }
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index be466b0ef7a7..d28923b7435b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -338,10 +338,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
338 aniState->cckNoiseImmunityLevel != 338 aniState->cckNoiseImmunityLevel !=
339 ATH9K_ANI_CCK_DEF_LEVEL) { 339 ATH9K_ANI_CCK_DEF_LEVEL) {
340 ath_dbg(common, ANI, 340 ath_dbg(common, ANI,
341 "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n", 341 "Restore defaults: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
342 ah->opmode, 342 ah->opmode,
343 chan->channel, 343 chan->channel,
344 chan->channelFlags,
345 is_scanning, 344 is_scanning,
346 aniState->ofdmNoiseImmunityLevel, 345 aniState->ofdmNoiseImmunityLevel,
347 aniState->cckNoiseImmunityLevel); 346 aniState->cckNoiseImmunityLevel);
@@ -354,10 +353,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
354 * restore historical levels for this channel 353 * restore historical levels for this channel
355 */ 354 */
356 ath_dbg(common, ANI, 355 ath_dbg(common, ANI,
357 "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n", 356 "Restore history: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
358 ah->opmode, 357 ah->opmode,
359 chan->channel, 358 chan->channel,
360 chan->channelFlags,
361 is_scanning, 359 is_scanning,
362 aniState->ofdmNoiseImmunityLevel, 360 aniState->ofdmNoiseImmunityLevel,
363 aniState->cckNoiseImmunityLevel); 361 aniState->cckNoiseImmunityLevel);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index dd1cc73d7946..bd048cc69a33 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -332,7 +332,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
332 } 332 }
333 333
334 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 334 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
335 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 335 div_ant_conf->lna1_lna2_switch_delta)
336 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 336 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
337 else 337 else
338 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 338 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
@@ -554,42 +554,22 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
554 ant_conf->fast_div_bias = 0x1; 554 ant_conf->fast_div_bias = 0x1;
555 break; 555 break;
556 case 0x10: /* LNA2 A-B */ 556 case 0x10: /* LNA2 A-B */
557 if ((antcomb->scan == 0) && 557 ant_conf->fast_div_bias = 0x2;
558 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
559 ant_conf->fast_div_bias = 0x3f;
560 } else {
561 ant_conf->fast_div_bias = 0x1;
562 }
563 break; 558 break;
564 case 0x12: /* LNA2 LNA1 */ 559 case 0x12: /* LNA2 LNA1 */
565 ant_conf->fast_div_bias = 0x39; 560 ant_conf->fast_div_bias = 0x3f;
566 break; 561 break;
567 case 0x13: /* LNA2 A+B */ 562 case 0x13: /* LNA2 A+B */
568 if ((antcomb->scan == 0) && 563 ant_conf->fast_div_bias = 0x2;
569 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
570 ant_conf->fast_div_bias = 0x3f;
571 } else {
572 ant_conf->fast_div_bias = 0x1;
573 }
574 break; 564 break;
575 case 0x20: /* LNA1 A-B */ 565 case 0x20: /* LNA1 A-B */
576 if ((antcomb->scan == 0) && 566 ant_conf->fast_div_bias = 0x3;
577 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
578 ant_conf->fast_div_bias = 0x3f;
579 } else {
580 ant_conf->fast_div_bias = 0x4;
581 }
582 break; 567 break;
583 case 0x21: /* LNA1 LNA2 */ 568 case 0x21: /* LNA1 LNA2 */
584 ant_conf->fast_div_bias = 0x6; 569 ant_conf->fast_div_bias = 0x3;
585 break; 570 break;
586 case 0x23: /* LNA1 A+B */ 571 case 0x23: /* LNA1 A+B */
587 if ((antcomb->scan == 0) && 572 ant_conf->fast_div_bias = 0x3;
588 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
589 ant_conf->fast_div_bias = 0x3f;
590 } else {
591 ant_conf->fast_div_bias = 0x6;
592 }
593 break; 573 break;
594 case 0x30: /* A+B A-B */ 574 case 0x30: /* A+B A-B */
595 ant_conf->fast_div_bias = 0x1; 575 ant_conf->fast_div_bias = 0x1;
@@ -638,7 +618,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
638 antcomb->rssi_sub = alt_rssi_avg; 618 antcomb->rssi_sub = alt_rssi_avg;
639 antcomb->scan = false; 619 antcomb->scan = false;
640 if (antcomb->rssi_lna2 > 620 if (antcomb->rssi_lna2 >
641 (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 621 (antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
642 /* use LNA2 as main LNA */ 622 /* use LNA2 as main LNA */
643 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 623 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
644 (antcomb->rssi_add > antcomb->rssi_sub)) { 624 (antcomb->rssi_add > antcomb->rssi_sub)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 08656473c63e..ff415e863ee9 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -626,12 +626,11 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
626 if (AR_SREV_9287_11_OR_LATER(ah)) 626 if (AR_SREV_9287_11_OR_LATER(ah))
627 val = val & (~AR_PCU_MISC_MODE2_HWWAR2); 627 val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
628 628
629 val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
630
629 REG_WRITE(ah, AR_PCU_MISC_MODE2, val); 631 REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
630 } 632 }
631 633
632 REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
633 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
634
635 if (AR_SREV_9280_20_OR_LATER(ah)) 634 if (AR_SREV_9280_20_OR_LATER(ah))
636 return; 635 return;
637 /* 636 /*
@@ -667,14 +666,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
667 if (IS_CHAN_HT40(chan)) { 666 if (IS_CHAN_HT40(chan)) {
668 phymode |= AR_PHY_FC_DYN2040_EN; 667 phymode |= AR_PHY_FC_DYN2040_EN;
669 668
670 if ((chan->chanmode == CHANNEL_A_HT40PLUS) || 669 if (IS_CHAN_HT40PLUS(chan))
671 (chan->chanmode == CHANNEL_G_HT40PLUS))
672 phymode |= AR_PHY_FC_DYN2040_PRI_CH; 670 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
673 671
674 } 672 }
675 REG_WRITE(ah, AR_PHY_TURBO, phymode); 673 REG_WRITE(ah, AR_PHY_TURBO, phymode);
676 674
677 ath9k_hw_set11nmac2040(ah); 675 ath9k_hw_set11nmac2040(ah, chan);
678 676
679 ENABLE_REGWRITE_BUFFER(ah); 677 ENABLE_REGWRITE_BUFFER(ah);
680 678
@@ -692,31 +690,12 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
692 int i, regWrites = 0; 690 int i, regWrites = 0;
693 u32 modesIndex, freqIndex; 691 u32 modesIndex, freqIndex;
694 692
695 switch (chan->chanmode) { 693 if (IS_CHAN_5GHZ(chan)) {
696 case CHANNEL_A:
697 case CHANNEL_A_HT20:
698 modesIndex = 1;
699 freqIndex = 1;
700 break;
701 case CHANNEL_A_HT40PLUS:
702 case CHANNEL_A_HT40MINUS:
703 modesIndex = 2;
704 freqIndex = 1; 694 freqIndex = 1;
705 break; 695 modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
706 case CHANNEL_G: 696 } else {
707 case CHANNEL_G_HT20:
708 case CHANNEL_B:
709 modesIndex = 4;
710 freqIndex = 2;
711 break;
712 case CHANNEL_G_HT40PLUS:
713 case CHANNEL_G_HT40MINUS:
714 modesIndex = 3;
715 freqIndex = 2; 697 freqIndex = 2;
716 break; 698 modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
717
718 default:
719 return -EINVAL;
720 } 699 }
721 700
722 /* 701 /*
@@ -815,8 +794,10 @@ static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
815 if (chan == NULL) 794 if (chan == NULL)
816 return; 795 return;
817 796
818 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan)) 797 if (IS_CHAN_2GHZ(chan))
819 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM; 798 rfMode |= AR_PHY_MODE_DYNAMIC;
799 else
800 rfMode |= AR_PHY_MODE_OFDM;
820 801
821 if (!AR_SREV_9280_20_OR_LATER(ah)) 802 if (!AR_SREV_9280_20_OR_LATER(ah))
822 rfMode |= (IS_CHAN_5GHZ(chan)) ? 803 rfMode |= (IS_CHAN_5GHZ(chan)) ?
@@ -1219,12 +1200,11 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1219 1200
1220 iniDef = &aniState->iniDef; 1201 iniDef = &aniState->iniDef;
1221 1202
1222 ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", 1203 ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
1223 ah->hw_version.macVersion, 1204 ah->hw_version.macVersion,
1224 ah->hw_version.macRev, 1205 ah->hw_version.macRev,
1225 ah->opmode, 1206 ah->opmode,
1226 chan->channel, 1207 chan->channel);
1227 chan->channelFlags);
1228 1208
1229 val = REG_READ(ah, AR_PHY_SFCORR); 1209 val = REG_READ(ah, AR_PHY_SFCORR);
1230 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH); 1210 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 9f589744a9f9..cdc74005650c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -33,15 +33,12 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
33 bool supported = false; 33 bool supported = false;
34 switch (ah->supp_cals & cal_type) { 34 switch (ah->supp_cals & cal_type) {
35 case IQ_MISMATCH_CAL: 35 case IQ_MISMATCH_CAL:
36 /* Run IQ Mismatch for non-CCK only */ 36 supported = true;
37 if (!IS_CHAN_B(chan))
38 supported = true;
39 break; 37 break;
40 case ADC_GAIN_CAL: 38 case ADC_GAIN_CAL:
41 case ADC_DC_CAL: 39 case ADC_DC_CAL:
42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ 40 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
43 if (!IS_CHAN_B(chan) && 41 if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
44 !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
45 IS_CHAN_HT20(chan))) 42 IS_CHAN_HT20(chan)))
46 supported = true; 43 supported = true;
47 break; 44 break;
@@ -671,7 +668,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
671 668
672 nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF); 669 nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
673 if (ah->caldata) 670 if (ah->caldata)
674 nfcal_pending = ah->caldata->nfcal_pending; 671 nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
675 672
676 if (currCal && !nfcal && 673 if (currCal && !nfcal &&
677 (currCal->calState == CAL_RUNNING || 674 (currCal->calState == CAL_RUNNING ||
@@ -861,7 +858,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
861 ar9002_hw_pa_cal(ah, true); 858 ar9002_hw_pa_cal(ah, true);
862 859
863 if (ah->caldata) 860 if (ah->caldata)
864 ah->caldata->nfcal_pending = true; 861 set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
865 862
866 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 863 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
867 864
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index fb61b081d172..5c95fd9e9c9e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -419,28 +419,10 @@ void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
419 u32 modesIndex; 419 u32 modesIndex;
420 int i; 420 int i;
421 421
422 switch (chan->chanmode) { 422 if (IS_CHAN_5GHZ(chan))
423 case CHANNEL_A: 423 modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
424 case CHANNEL_A_HT20: 424 else
425 modesIndex = 1; 425 modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
426 break;
427 case CHANNEL_A_HT40PLUS:
428 case CHANNEL_A_HT40MINUS:
429 modesIndex = 2;
430 break;
431 case CHANNEL_G:
432 case CHANNEL_G_HT20:
433 case CHANNEL_B:
434 modesIndex = 4;
435 break;
436 case CHANNEL_G_HT40PLUS:
437 case CHANNEL_G_HT40MINUS:
438 modesIndex = 3;
439 break;
440
441 default:
442 return;
443 }
444 426
445 ENABLE_REGWRITE_BUFFER(ah); 427 ENABLE_REGWRITE_BUFFER(ah);
446 428
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 1fc1fa955d44..17970d49d858 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -485,7 +485,7 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
485 if (IS_CHAN_HT40(ah->curchan)) 485 if (IS_CHAN_HT40(ah->curchan))
486 nfarray[3] = sign_extend32(nf, 8); 486 nfarray[3] = sign_extend32(nf, 8);
487 487
488 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) 488 if (!(ah->rxchainmask & BIT(1)))
489 return; 489 return;
490 490
491 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR); 491 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
@@ -532,6 +532,7 @@ static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
532 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S; 532 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
533 antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >> 533 antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
534 AR_PHY_9285_FAST_DIV_BIAS_S; 534 AR_PHY_9285_FAST_DIV_BIAS_S;
535 antconf->lna1_lna2_switch_delta = -1;
535 antconf->lna1_lna2_delta = -3; 536 antconf->lna1_lna2_delta = -3;
536 antconf->div_group = 0; 537 antconf->div_group = 0;
537} 538}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 6988e1d081f2..22934d3ca544 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -727,8 +727,12 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
727 REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0, 727 REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
728 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1); 728 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
729 729
730 if (caldata) 730 if (caldata) {
731 caldata->done_txiqcal_once = is_reusable; 731 if (is_reusable)
732 set_bit(TXIQCAL_DONE, &caldata->cal_flags);
733 else
734 clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
735 }
732 736
733 return; 737 return;
734} 738}
@@ -961,18 +965,44 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
961} 965}
962 966
963static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah, 967static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
964 struct ath9k_channel *chan) 968 struct ath9k_channel *chan,
969 bool run_rtt_cal)
965{ 970{
971 struct ath9k_hw_cal_data *caldata = ah->caldata;
966 int i; 972 int i;
967 973
968 if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah)) 974 if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
969 return; 975 return;
970 976
977 if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
978 return;
979
971 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 980 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
972 if (!(ah->rxchainmask & (1 << i))) 981 if (!(ah->rxchainmask & (1 << i)))
973 continue; 982 continue;
974 ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan)); 983 ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
975 } 984 }
985
986 if (caldata)
987 set_bit(SW_PKDET_DONE, &caldata->cal_flags);
988
989 if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
990 if (IS_CHAN_2GHZ(chan)){
991 caldata->caldac[0] = REG_READ_FIELD(ah,
992 AR_PHY_65NM_RXRF_AGC(0),
993 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
994 caldata->caldac[1] = REG_READ_FIELD(ah,
995 AR_PHY_65NM_RXRF_AGC(1),
996 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
997 } else {
998 caldata->caldac[0] = REG_READ_FIELD(ah,
999 AR_PHY_65NM_RXRF_AGC(0),
1000 AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
1001 caldata->caldac[1] = REG_READ_FIELD(ah,
1002 AR_PHY_65NM_RXRF_AGC(1),
1003 AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
1004 }
1005 }
976} 1006}
977 1007
978static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable) 1008static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
@@ -990,7 +1020,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
990 txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & 1020 txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
991 AR_PHY_AGC_CONTROL_CLC_SUCCESS); 1021 AR_PHY_AGC_CONTROL_CLC_SUCCESS);
992 1022
993 if (caldata->done_txclcal_once) { 1023 if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
994 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 1024 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
995 if (!(ah->txchainmask & (1 << i))) 1025 if (!(ah->txchainmask & (1 << i)))
996 continue; 1026 continue;
@@ -1006,7 +1036,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
1006 caldata->tx_clcal[i][j] = 1036 caldata->tx_clcal[i][j] =
1007 REG_READ(ah, CL_TAB_ENTRY(cl_idx[i])); 1037 REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
1008 } 1038 }
1009 caldata->done_txclcal_once = true; 1039 set_bit(TXCLCAL_DONE, &caldata->cal_flags);
1010 } 1040 }
1011} 1041}
1012 1042
@@ -1019,6 +1049,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
1019 bool is_reusable = true, status = true; 1049 bool is_reusable = true, status = true;
1020 bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false; 1050 bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
1021 bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT); 1051 bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
1052 u32 rx_delay = 0;
1022 u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL | 1053 u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
1023 AR_PHY_AGC_CONTROL_FLTR_CAL | 1054 AR_PHY_AGC_CONTROL_FLTR_CAL |
1024 AR_PHY_AGC_CONTROL_PKDET_CAL; 1055 AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1042,17 +1073,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
1042 ar9003_hw_rtt_clear_hist(ah); 1073 ar9003_hw_rtt_clear_hist(ah);
1043 } 1074 }
1044 1075
1045 if (rtt && !run_rtt_cal) { 1076 if (rtt) {
1046 agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL); 1077 if (!run_rtt_cal) {
1047 agc_supp_cals &= agc_ctrl; 1078 agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
1048 agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL | 1079 agc_supp_cals &= agc_ctrl;
1049 AR_PHY_AGC_CONTROL_FLTR_CAL | 1080 agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
1050 AR_PHY_AGC_CONTROL_PKDET_CAL); 1081 AR_PHY_AGC_CONTROL_FLTR_CAL |
1051 REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl); 1082 AR_PHY_AGC_CONTROL_PKDET_CAL);
1083 REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
1084 } else {
1085 if (ah->ah_flags & AH_FASTCC)
1086 run_agc_cal = true;
1087 }
1052 } 1088 }
1053 1089
1054 if (ah->enabled_cals & TX_CL_CAL) { 1090 if (ah->enabled_cals & TX_CL_CAL) {
1055 if (caldata && caldata->done_txclcal_once) 1091 if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
1056 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, 1092 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
1057 AR_PHY_CL_CAL_ENABLE); 1093 AR_PHY_CL_CAL_ENABLE);
1058 else { 1094 else {
@@ -1076,14 +1112,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
1076 * AGC calibration 1112 * AGC calibration
1077 */ 1113 */
1078 if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) { 1114 if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
1079 if (caldata && !caldata->done_txiqcal_once) 1115 if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
1080 REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0, 1116 REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
1081 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); 1117 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
1082 else 1118 else
1083 REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0, 1119 REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
1084 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); 1120 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
1085 txiqcal_done = run_agc_cal = true; 1121 txiqcal_done = run_agc_cal = true;
1086 } else if (caldata && !caldata->done_txiqcal_once) { 1122 } else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
1087 run_agc_cal = true; 1123 run_agc_cal = true;
1088 sep_iq_cal = true; 1124 sep_iq_cal = true;
1089 } 1125 }
@@ -1099,6 +1135,15 @@ skip_tx_iqcal:
1099 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 1135 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
1100 } 1136 }
1101 1137
1138 if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
1139 rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
1140 /* Disable BB_active */
1141 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1142 udelay(5);
1143 REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
1144 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
1145 }
1146
1102 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { 1147 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
1103 /* Calibrate the AGC */ 1148 /* Calibrate the AGC */
1104 REG_WRITE(ah, AR_PHY_AGC_CONTROL, 1149 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1110,7 +1155,12 @@ skip_tx_iqcal:
1110 AR_PHY_AGC_CONTROL_CAL, 1155 AR_PHY_AGC_CONTROL_CAL,
1111 0, AH_WAIT_TIMEOUT); 1156 0, AH_WAIT_TIMEOUT);
1112 1157
1113 ar9003_hw_do_manual_peak_cal(ah, chan); 1158 ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
1159 }
1160
1161 if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
1162 REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
1163 udelay(5);
1114 } 1164 }
1115 1165
1116 if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal) 1166 if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1133,19 +1183,23 @@ skip_tx_iqcal:
1133 1183
1134 if (txiqcal_done) 1184 if (txiqcal_done)
1135 ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable); 1185 ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
1136 else if (caldata && caldata->done_txiqcal_once) 1186 else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
1137 ar9003_hw_tx_iq_cal_reload(ah); 1187 ar9003_hw_tx_iq_cal_reload(ah);
1138 1188
1139 ar9003_hw_cl_cal_post_proc(ah, is_reusable); 1189 ar9003_hw_cl_cal_post_proc(ah, is_reusable);
1140 1190
1141 if (run_rtt_cal && caldata) { 1191 if (run_rtt_cal && caldata) {
1142 if (is_reusable) { 1192 if (is_reusable) {
1143 if (!ath9k_hw_rfbus_req(ah)) 1193 if (!ath9k_hw_rfbus_req(ah)) {
1144 ath_err(ath9k_hw_common(ah), 1194 ath_err(ath9k_hw_common(ah),
1145 "Could not stop baseband\n"); 1195 "Could not stop baseband\n");
1146 else 1196 } else {
1147 ar9003_hw_rtt_fill_hist(ah); 1197 ar9003_hw_rtt_fill_hist(ah);
1148 1198
1199 if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
1200 ar9003_hw_rtt_load_hist(ah);
1201 }
1202
1149 ath9k_hw_rfbus_done(ah); 1203 ath9k_hw_rfbus_done(ah);
1150 } 1204 }
1151 1205
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index f4864807e15b..1ec52356b5a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -2991,7 +2991,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
2991 case EEP_CHAIN_MASK_REDUCE: 2991 case EEP_CHAIN_MASK_REDUCE:
2992 return (pBase->miscConfiguration >> 0x3) & 0x1; 2992 return (pBase->miscConfiguration >> 0x3) & 0x1;
2993 case EEP_ANT_DIV_CTL1: 2993 case EEP_ANT_DIV_CTL1:
2994 return eep->base_ext1.ant_div_control; 2994 if (AR_SREV_9565(ah))
2995 return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
2996 else
2997 return eep->base_ext1.ant_div_control;
2995 case EEP_ANTENNA_GAIN_5G: 2998 case EEP_ANTENNA_GAIN_5G:
2996 return eep->modalHeader5G.antennaGain; 2999 return eep->modalHeader5G.antennaGain;
2997 case EEP_ANTENNA_GAIN_2G: 3000 case EEP_ANTENNA_GAIN_2G:
@@ -3424,12 +3427,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
3424 struct ar9300_base_eep_hdr *pBase; 3427 struct ar9300_base_eep_hdr *pBase;
3425 3428
3426 if (!dump_base_hdr) { 3429 if (!dump_base_hdr) {
3427 len += snprintf(buf + len, size - len, 3430 len += scnprintf(buf + len, size - len,
3428 "%20s :\n", "2GHz modal Header"); 3431 "%20s :\n", "2GHz modal Header");
3429 len = ar9003_dump_modal_eeprom(buf, len, size, 3432 len = ar9003_dump_modal_eeprom(buf, len, size,
3430 &eep->modalHeader2G); 3433 &eep->modalHeader2G);
3431 len += snprintf(buf + len, size - len, 3434 len += scnprintf(buf + len, size - len,
3432 "%20s :\n", "5GHz modal Header"); 3435 "%20s :\n", "5GHz modal Header");
3433 len = ar9003_dump_modal_eeprom(buf, len, size, 3436 len = ar9003_dump_modal_eeprom(buf, len, size,
3434 &eep->modalHeader5G); 3437 &eep->modalHeader5G);
3435 goto out; 3438 goto out;
@@ -3479,8 +3482,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
3479 PR_EEP("Rx Gain", pBase->txrxgain & 0xf); 3482 PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
3480 PR_EEP("SW Reg", le32_to_cpu(pBase->swreg)); 3483 PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
3481 3484
3482 len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", 3485 len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
3483 ah->eeprom.ar9300_eep.macAddr); 3486 ah->eeprom.ar9300_eep.macAddr);
3484out: 3487out:
3485 if (len > size) 3488 if (len > size)
3486 len = size; 3489 len = size;
@@ -3656,9 +3659,23 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3656 if (AR_SREV_9565(ah)) { 3659 if (AR_SREV_9565(ah)) {
3657 if (common->bt_ant_diversity) { 3660 if (common->bt_ant_diversity) {
3658 regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S); 3661 regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
3662
3663 REG_SET_BIT(ah, AR_PHY_RESTART,
3664 AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
3665
3666 /* Force WLAN LNA diversity ON */
3667 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
3668 AR_BTCOEX_WL_LNADIV_FORCE_ON);
3659 } else { 3669 } else {
3660 regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S); 3670 regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
3661 regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S); 3671 regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
3672
3673 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
3674 (1 << AR_PHY_ANT_SW_RX_PROT_S));
3675
3676 /* Force WLAN LNA diversity OFF */
3677 REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
3678 AR_BTCOEX_WL_LNADIV_FORCE_ON);
3662 } 3679 }
3663 } 3680 }
3664 3681
@@ -3669,7 +3686,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3669 regval &= (~AR_FAST_DIV_ENABLE); 3686 regval &= (~AR_FAST_DIV_ENABLE);
3670 regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; 3687 regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
3671 3688
3672 if (AR_SREV_9485(ah) && common->bt_ant_diversity) 3689 if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
3690 && common->bt_ant_diversity)
3673 regval |= AR_FAST_DIV_ENABLE; 3691 regval |= AR_FAST_DIV_ENABLE;
3674 3692
3675 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); 3693 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 75d4fb41962f..0e5daa58a4fc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -52,6 +52,8 @@
52#define AR9300_PAPRD_SCALE_2 0x70000000 52#define AR9300_PAPRD_SCALE_2 0x70000000
53#define AR9300_PAPRD_SCALE_2_S 28 53#define AR9300_PAPRD_SCALE_2_S 28
54 54
55#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
56
55/* Delta from which to start power to pdadc table */ 57/* Delta from which to start power to pdadc table */
56/* This offset is used in both open loop and closed loop power control 58/* This offset is used in both open loop and closed loop power control
57 * schemes. In open loop power control, it is not really needed, but for 59 * schemes. In open loop power control, it is not really needed, but for
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 608bb4824e2a..b07f164d65cf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -364,6 +364,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
364 364
365 INIT_INI_ARRAY(&ah->iniModesFastClock, 365 INIT_INI_ARRAY(&ah->iniModesFastClock,
366 ar9565_1p0_modes_fast_clock); 366 ar9565_1p0_modes_fast_clock);
367 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
368 ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
367 } else { 369 } else {
368 /* mac */ 370 /* mac */
369 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 371 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -628,6 +630,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
628 else if (AR_SREV_9462_20(ah)) 630 else if (AR_SREV_9462_20(ah))
629 INIT_INI_ARRAY(&ah->iniModesRxGain, 631 INIT_INI_ARRAY(&ah->iniModesRxGain,
630 ar9462_common_rx_gain_table_2p0); 632 ar9462_common_rx_gain_table_2p0);
633 else if (AR_SREV_9565(ah))
634 INIT_INI_ARRAY(&ah->iniModesRxGain,
635 ar9565_1p0_Common_rx_gain_table);
631 else 636 else
632 INIT_INI_ARRAY(&ah->iniModesRxGain, 637 INIT_INI_ARRAY(&ah->iniModesRxGain,
633 ar9300Common_rx_gain_table_2p2); 638 ar9300Common_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 8dd069259e7b..7b94a6c7db3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -753,9 +753,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
753 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT); 753 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
754 754
755 if (caldata) { 755 if (caldata) {
756 caldata->done_txiqcal_once = false; 756 clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
757 caldata->done_txclcal_once = false; 757 clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
758 caldata->rtt_done = false; 758 clear_bit(RTT_DONE, &caldata->cal_flags);
759 } 759 }
760 760
761 if (!ath9k_hw_init_cal(ah, chan)) 761 if (!ath9k_hw_init_cal(ah, chan))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e897648d3233..f3adafd33704 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -551,8 +551,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
551 if (IS_CHAN_HT40(chan)) { 551 if (IS_CHAN_HT40(chan)) {
552 phymode |= AR_PHY_GC_DYN2040_EN; 552 phymode |= AR_PHY_GC_DYN2040_EN;
553 /* Configure control (primary) channel at +-10MHz */ 553 /* Configure control (primary) channel at +-10MHz */
554 if ((chan->chanmode == CHANNEL_A_HT40PLUS) || 554 if (IS_CHAN_HT40PLUS(chan))
555 (chan->chanmode == CHANNEL_G_HT40PLUS))
556 phymode |= AR_PHY_GC_DYN2040_PRI_CH; 555 phymode |= AR_PHY_GC_DYN2040_PRI_CH;
557 556
558 } 557 }
@@ -565,7 +564,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
565 REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode); 564 REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
566 565
567 /* Configure MAC for 20/40 operation */ 566 /* Configure MAC for 20/40 operation */
568 ath9k_hw_set11nmac2040(ah); 567 ath9k_hw_set11nmac2040(ah, chan);
569 568
570 /* global transmit timeout (25 TUs default)*/ 569 /* global transmit timeout (25 TUs default)*/
571 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); 570 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
@@ -627,11 +626,10 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
627 * MAC addr only will fail. 626 * MAC addr only will fail.
628 */ 627 */
629 val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE); 628 val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
630 REG_WRITE(ah, AR_PCU_MISC_MODE2, 629 val |= AR_AGG_WEP_ENABLE_FIX |
631 val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE); 630 AR_AGG_WEP_ENABLE |
632 631 AR_PCU_MISC_MODE2_CFP_IGNORE;
633 REG_SET_BIT(ah, AR_PHY_CCK_DETECT, 632 REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
634 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
635 633
636 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 634 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
637 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE, 635 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
@@ -683,41 +681,22 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
683{ 681{
684 int ret; 682 int ret;
685 683
686 switch (chan->chanmode) { 684 if (IS_CHAN_2GHZ(chan)) {
687 case CHANNEL_A: 685 if (IS_CHAN_HT40(chan))
688 case CHANNEL_A_HT20: 686 return 7;
689 if (chan->channel <= 5350)
690 ret = 1;
691 else if ((chan->channel > 5350) && (chan->channel <= 5600))
692 ret = 3;
693 else 687 else
694 ret = 5; 688 return 8;
695 break; 689 }
696
697 case CHANNEL_A_HT40PLUS:
698 case CHANNEL_A_HT40MINUS:
699 if (chan->channel <= 5350)
700 ret = 2;
701 else if ((chan->channel > 5350) && (chan->channel <= 5600))
702 ret = 4;
703 else
704 ret = 6;
705 break;
706
707 case CHANNEL_G:
708 case CHANNEL_G_HT20:
709 case CHANNEL_B:
710 ret = 8;
711 break;
712 690
713 case CHANNEL_G_HT40PLUS: 691 if (chan->channel <= 5350)
714 case CHANNEL_G_HT40MINUS: 692 ret = 1;
715 ret = 7; 693 else if ((chan->channel > 5350) && (chan->channel <= 5600))
716 break; 694 ret = 3;
695 else
696 ret = 5;
717 697
718 default: 698 if (IS_CHAN_HT40(chan))
719 ret = -EINVAL; 699 ret++;
720 }
721 700
722 return ret; 701 return ret;
723} 702}
@@ -728,28 +707,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
728 unsigned int regWrites = 0, i; 707 unsigned int regWrites = 0, i;
729 u32 modesIndex; 708 u32 modesIndex;
730 709
731 switch (chan->chanmode) { 710 if (IS_CHAN_5GHZ(chan))
732 case CHANNEL_A: 711 modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
733 case CHANNEL_A_HT20: 712 else
734 modesIndex = 1; 713 modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
735 break;
736 case CHANNEL_A_HT40PLUS:
737 case CHANNEL_A_HT40MINUS:
738 modesIndex = 2;
739 break;
740 case CHANNEL_G:
741 case CHANNEL_G_HT20:
742 case CHANNEL_B:
743 modesIndex = 4;
744 break;
745 case CHANNEL_G_HT40PLUS:
746 case CHANNEL_G_HT40MINUS:
747 modesIndex = 3;
748 break;
749
750 default:
751 return -EINVAL;
752 }
753 714
754 /* 715 /*
755 * SOC, MAC, BB, RADIO initvals. 716 * SOC, MAC, BB, RADIO initvals.
@@ -847,8 +808,10 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
847 if (chan == NULL) 808 if (chan == NULL)
848 return; 809 return;
849 810
850 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan)) 811 if (IS_CHAN_2GHZ(chan))
851 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM; 812 rfMode |= AR_PHY_MODE_DYNAMIC;
813 else
814 rfMode |= AR_PHY_MODE_OFDM;
852 815
853 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 816 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
854 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); 817 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
@@ -1274,12 +1237,11 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1274 aniState = &ah->ani; 1237 aniState = &ah->ani;
1275 iniDef = &aniState->iniDef; 1238 iniDef = &aniState->iniDef;
1276 1239
1277 ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", 1240 ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
1278 ah->hw_version.macVersion, 1241 ah->hw_version.macVersion,
1279 ah->hw_version.macRev, 1242 ah->hw_version.macRev,
1280 ah->opmode, 1243 ah->opmode,
1281 chan->channel, 1244 chan->channel);
1282 chan->channelFlags);
1283 1245
1284 val = REG_READ(ah, AR_PHY_SFCORR); 1246 val = REG_READ(ah, AR_PHY_SFCORR);
1285 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH); 1247 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1375,15 +1337,19 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
1375 AR_PHY_ANT_FAST_DIV_BIAS_S; 1337 AR_PHY_ANT_FAST_DIV_BIAS_S;
1376 1338
1377 if (AR_SREV_9330_11(ah)) { 1339 if (AR_SREV_9330_11(ah)) {
1340 antconf->lna1_lna2_switch_delta = -1;
1378 antconf->lna1_lna2_delta = -9; 1341 antconf->lna1_lna2_delta = -9;
1379 antconf->div_group = 1; 1342 antconf->div_group = 1;
1380 } else if (AR_SREV_9485(ah)) { 1343 } else if (AR_SREV_9485(ah)) {
1344 antconf->lna1_lna2_switch_delta = -1;
1381 antconf->lna1_lna2_delta = -9; 1345 antconf->lna1_lna2_delta = -9;
1382 antconf->div_group = 2; 1346 antconf->div_group = 2;
1383 } else if (AR_SREV_9565(ah)) { 1347 } else if (AR_SREV_9565(ah)) {
1384 antconf->lna1_lna2_delta = -3; 1348 antconf->lna1_lna2_switch_delta = 3;
1349 antconf->lna1_lna2_delta = -9;
1385 antconf->div_group = 3; 1350 antconf->div_group = 3;
1386 } else { 1351 } else {
1352 antconf->lna1_lna2_switch_delta = -1;
1387 antconf->lna1_lna2_delta = -3; 1353 antconf->lna1_lna2_delta = -3;
1388 antconf->div_group = 0; 1354 antconf->div_group = 0;
1389 } 1355 }
@@ -1489,17 +1455,24 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
1489 } else if (AR_SREV_9565(ah)) { 1455 } else if (AR_SREV_9565(ah)) {
1490 if (enable) { 1456 if (enable) {
1491 REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL, 1457 REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1458 AR_ANT_DIV_ENABLE);
1459 REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1492 (1 << AR_PHY_ANT_SW_RX_PROT_S)); 1460 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1493 if (ah->curchan && IS_CHAN_2GHZ(ah->curchan)) 1461 REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
1494 REG_SET_BIT(ah, AR_PHY_RESTART, 1462 AR_FAST_DIV_ENABLE);
1495 AR_PHY_RESTART_ENABLE_DIV_M2FLAG); 1463 REG_SET_BIT(ah, AR_PHY_RESTART,
1464 AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
1496 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, 1465 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
1497 AR_BTCOEX_WL_LNADIV_FORCE_ON); 1466 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1498 } else { 1467 } else {
1499 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE); 1468 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1469 AR_ANT_DIV_ENABLE);
1500 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, 1470 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1501 (1 << AR_PHY_ANT_SW_RX_PROT_S)); 1471 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1502 REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE); 1472 REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
1473 AR_FAST_DIV_ENABLE);
1474 REG_CLR_BIT(ah, AR_PHY_RESTART,
1475 AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
1503 REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV, 1476 REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
1504 AR_BTCOEX_WL_LNADIV_FORCE_ON); 1477 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1505 1478
@@ -1526,28 +1499,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1526 unsigned int regWrites = 0; 1499 unsigned int regWrites = 0;
1527 u32 modesIndex; 1500 u32 modesIndex;
1528 1501
1529 switch (chan->chanmode) { 1502 if (IS_CHAN_5GHZ(chan))
1530 case CHANNEL_A: 1503 modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
1531 case CHANNEL_A_HT20: 1504 else
1532 modesIndex = 1; 1505 modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
1533 break;
1534 case CHANNEL_A_HT40PLUS:
1535 case CHANNEL_A_HT40MINUS:
1536 modesIndex = 2;
1537 break;
1538 case CHANNEL_G:
1539 case CHANNEL_G_HT20:
1540 case CHANNEL_B:
1541 modesIndex = 4;
1542 break;
1543 case CHANNEL_G_HT40PLUS:
1544 case CHANNEL_G_HT40MINUS:
1545 modesIndex = 3;
1546 break;
1547
1548 default:
1549 return -EINVAL;
1550 }
1551 1506
1552 if (modesIndex == ah->modes_index) { 1507 if (modesIndex == ah->modes_index) {
1553 *ini_reloaded = false; 1508 *ini_reloaded = false;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6fd752321e36..fca624322dc8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -343,8 +343,12 @@
343 343
344#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127 344#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
345#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127 345#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
346#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
347#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
346#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127 348#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
347#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127 349#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
350#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
351#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
348 352
349#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118 353#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
350 354
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 74de3539c2c8..934418872e8e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -118,6 +118,27 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
118 } 118 }
119} 119}
120 120
121static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
122{
123 int agc, caldac;
124
125 if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
126 return;
127
128 if ((index != 5) || (chain >= 2))
129 return;
130
131 agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
132 AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
133 if (!agc)
134 return;
135
136 caldac = ah->caldata->caldac[chain];
137 ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
138 caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
139 ah->caldata->rtt_table[chain][index] |= (caldac << 4);
140}
141
121static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index) 142static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
122{ 143{
123 u32 val; 144 u32 val;
@@ -155,13 +176,16 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
155 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) { 176 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
156 ah->caldata->rtt_table[chain][i] = 177 ah->caldata->rtt_table[chain][i] =
157 ar9003_hw_rtt_fill_hist_entry(ah, chain, i); 178 ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
179
180 ar9003_hw_patch_rtt(ah, i, chain);
181
158 ath_dbg(ath9k_hw_common(ah), CALIBRATE, 182 ath_dbg(ath9k_hw_common(ah), CALIBRATE,
159 "RTT value at idx %d, chain %d is: 0x%x\n", 183 "RTT value at idx %d, chain %d is: 0x%x\n",
160 i, chain, ah->caldata->rtt_table[chain][i]); 184 i, chain, ah->caldata->rtt_table[chain][i]);
161 } 185 }
162 } 186 }
163 187
164 ah->caldata->rtt_done = true; 188 set_bit(RTT_DONE, &ah->caldata->cal_flags);
165} 189}
166 190
167void ar9003_hw_rtt_clear_hist(struct ath_hw *ah) 191void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
@@ -176,7 +200,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
176 } 200 }
177 201
178 if (ah->caldata) 202 if (ah->caldata)
179 ah->caldata->rtt_done = false; 203 clear_bit(RTT_DONE, &ah->caldata->cal_flags);
180} 204}
181 205
182bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan) 206bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -186,11 +210,37 @@ bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
186 if (!ah->caldata) 210 if (!ah->caldata)
187 return false; 211 return false;
188 212
189 if (!ah->caldata->rtt_done) 213 if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
214 if (IS_CHAN_2GHZ(chan)){
215 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
216 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
217 ah->caldata->caldac[0]);
218 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
219 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
220 ah->caldata->caldac[1]);
221 } else {
222 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
223 AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
224 ah->caldata->caldac[0]);
225 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
226 AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
227 ah->caldata->caldac[1]);
228 }
229 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
230 AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
231 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
232 AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
233 }
234
235 if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
190 return false; 236 return false;
191 237
192 ar9003_hw_rtt_enable(ah); 238 ar9003_hw_rtt_enable(ah);
193 ar9003_hw_rtt_set_mask(ah, 0x10); 239
240 if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
241 ar9003_hw_rtt_set_mask(ah, 0x30);
242 else
243 ar9003_hw_rtt_set_mask(ah, 0x10);
194 244
195 if (!ath9k_hw_rfbus_req(ah)) { 245 if (!ath9k_hw_rfbus_req(ah)) {
196 ath_err(ath9k_hw_common(ah), "Could not stop baseband\n"); 246 ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 88ff1d7b53ab..6f899c692647 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,7 +20,17 @@
20 20
21/* AR9485 1.1 */ 21/* AR9485 1.1 */
22 22
23#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble 23static const u32 ar9485_1_1_mac_postamble[][5] = {
24 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
25 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
26 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
27 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
28 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
29 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
30 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
31 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
32 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
33};
24 34
25static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { 35static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
26 /* Addr allmodes */ 36 /* Addr allmodes */
@@ -34,6 +44,7 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
34 {0x00009e00, 0x037216a0}, 44 {0x00009e00, 0x037216a0},
35 {0x00009e04, 0x00182020}, 45 {0x00009e04, 0x00182020},
36 {0x00009e18, 0x00000000}, 46 {0x00009e18, 0x00000000},
47 {0x00009e20, 0x000003a8},
37 {0x00009e2c, 0x00004121}, 48 {0x00009e2c, 0x00004121},
38 {0x00009e44, 0x02282324}, 49 {0x00009e44, 0x02282324},
39 {0x0000a000, 0x00060005}, 50 {0x0000a000, 0x00060005},
@@ -174,7 +185,7 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
174 {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552}, 185 {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
175 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552}, 186 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
176 {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552}, 187 {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
177 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 188 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
178 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 189 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
179 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, 190 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
180 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, 191 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -200,14 +211,14 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
200 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 211 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
201 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb}, 212 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
202 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 213 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
203 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 214 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
204 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 215 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
205 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 216 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
206 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 217 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
207 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 218 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
208 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 219 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
209 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 220 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
210 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 221 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
211 {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 222 {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
212 {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 223 {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
213 {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 224 {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -263,6 +274,11 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
263static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = { 274static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
264 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 275 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
265 {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003}, 276 {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
277 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
278 {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
279 {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
280 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
281 {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
266 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 282 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
267 {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000}, 283 {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
268 {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006}, 284 {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
@@ -297,6 +313,22 @@ static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
297 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 313 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
298 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 314 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
299 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 315 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
316 {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
317 {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
318 {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
319 {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
320 {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
321 {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
322 {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
323 {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
324 {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
325 {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
326 {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
327 {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
328 {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
329 {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
330 {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
331 {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
300 {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, 332 {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
301 {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, 333 {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
302 {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, 334 {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -341,6 +373,100 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
341 {0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84}, 373 {0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
342 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000}, 374 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
343 {0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000}, 375 {0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
376 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
377 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
378 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
379 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
380 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
381 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
382 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
383 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
384 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
385 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
386 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
387 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
388 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
389 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
390 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
391 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
392 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
393 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
394 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
395 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
396 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
397 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
398 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
399 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
400 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
401 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
402 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
403 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
404 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
405 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
406 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
407 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
408 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
409 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
410 {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
411 {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
412 {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
413 {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
414 {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
415 {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
416 {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
417 {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
418 {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
419 {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
420 {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
421 {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
422 {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
423 {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
424 {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
425 {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
426 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
427 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
428 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
429 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
430 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
431 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
432 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
433 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
434 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
435 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
436 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
437 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
438 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
439 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
440 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
441 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
442 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
443 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
444 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
445 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
446 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
447 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
448 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
449 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
450 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
451 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
452 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
453 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
454 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
455 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
456 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
457 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
458 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
459 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
460};
461
462static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
463 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
464 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
465 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
466 {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
467 {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
468 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
469 {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
344 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 470 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
345 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 471 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
346 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, 472 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -427,7 +553,7 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
427 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, 553 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
428}; 554};
429 555
430static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = { 556static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
431 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 557 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
432 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, 558 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
433 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a}, 559 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
@@ -521,12 +647,15 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
521 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, 647 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
522}; 648};
523 649
524#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
525
526static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = { 650static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
527 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 651 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
528 {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003}, 652 {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
529 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 653 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
654 {0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
655 {0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
656 {0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
657 {0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
658 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
530 {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000}, 659 {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
531 {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006}, 660 {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
532 {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201}, 661 {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
@@ -543,23 +672,39 @@ static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
543 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0}, 672 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
544 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0}, 673 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
545 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3}, 674 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
546 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5}, 675 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
547 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6}, 676 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
548 {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec}, 677 {0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
549 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1}, 678 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
550 {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3}, 679 {0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
551 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed}, 680 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
552 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1}, 681 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
553 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3}, 682 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
554 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5}, 683 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
555 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6}, 684 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
556 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6}, 685 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
557 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, 686 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
558 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, 687 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
559 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, 688 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
560 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, 689 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
561 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, 690 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
562 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, 691 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
692 {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
693 {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
694 {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
695 {0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
696 {0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
697 {0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
698 {0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
699 {0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
700 {0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
701 {0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
702 {0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
703 {0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
704 {0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
705 {0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
706 {0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
707 {0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
563 {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, 708 {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
564 {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, 709 {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
565 {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, 710 {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -823,6 +968,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
823 {0x00009e00, 0x03721b20}, 968 {0x00009e00, 0x03721b20},
824 {0x00009e04, 0x00082020}, 969 {0x00009e04, 0x00082020},
825 {0x00009e18, 0x0300501e}, 970 {0x00009e18, 0x0300501e},
971 {0x00009e20, 0x000003ba},
826 {0x00009e2c, 0x00002e21}, 972 {0x00009e2c, 0x00002e21},
827 {0x00009e44, 0x02182324}, 973 {0x00009e44, 0x02182324},
828 {0x0000a000, 0x00060005}, 974 {0x0000a000, 0x00060005},
@@ -1001,7 +1147,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
1001 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e}, 1147 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
1002 {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53}, 1148 {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
1003 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 1149 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
1004 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
1005 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, 1150 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
1006 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, 1151 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
1007 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 1152 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1020,7 +1165,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
1020 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, 1165 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
1021 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1166 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1022 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1167 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1023 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, 1168 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
1024 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, 1169 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
1025 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 1170 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
1026 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1171 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1206,6 +1351,11 @@ static const u32 ar9485_1_1_mac_core[][2] = {
1206 {0x000083d0, 0x000301ff}, 1351 {0x000083d0, 0x000301ff},
1207}; 1352};
1208 1353
1209#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484 1354static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
1355 /* Addr allmodes */
1356 {0x0000a398, 0x00000000},
1357 {0x0000a39c, 0x6f7f0301},
1358 {0x0000a3a0, 0xca9228ee},
1359};
1210 1360
1211#endif /* INITVALS_9485_H */ 1361#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index e85a8b076c22..a8c757b6124f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -272,9 +272,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
272 {0x0000a398, 0x001f0e0f}, 272 {0x0000a398, 0x001f0e0f},
273 {0x0000a39c, 0x0075393f}, 273 {0x0000a39c, 0x0075393f},
274 {0x0000a3a0, 0xb79f6427}, 274 {0x0000a3a0, 0xb79f6427},
275 {0x0000a3a4, 0x00000000}, 275 {0x0000a3a4, 0x00000011},
276 {0x0000a3a8, 0xaaaaaaaa}, 276 {0x0000a3a8, 0xaaaaaa6e},
277 {0x0000a3ac, 0x3c466478}, 277 {0x0000a3ac, 0x3c466455},
278 {0x0000a3c0, 0x20202020}, 278 {0x0000a3c0, 0x20202020},
279 {0x0000a3c4, 0x22222220}, 279 {0x0000a3c4, 0x22222220},
280 {0x0000a3c8, 0x20200020}, 280 {0x0000a3c8, 0x20200020},
@@ -295,11 +295,11 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
295 {0x0000a404, 0x00000000}, 295 {0x0000a404, 0x00000000},
296 {0x0000a408, 0x0e79e5c6}, 296 {0x0000a408, 0x0e79e5c6},
297 {0x0000a40c, 0x00820820}, 297 {0x0000a40c, 0x00820820},
298 {0x0000a414, 0x1ce739ce}, 298 {0x0000a414, 0x1ce739c5},
299 {0x0000a418, 0x2d001dce}, 299 {0x0000a418, 0x2d001dce},
300 {0x0000a41c, 0x1ce739ce}, 300 {0x0000a41c, 0x1ce739c5},
301 {0x0000a420, 0x000001ce}, 301 {0x0000a420, 0x000001ce},
302 {0x0000a424, 0x1ce739ce}, 302 {0x0000a424, 0x1ce739c5},
303 {0x0000a428, 0x000001ce}, 303 {0x0000a428, 0x000001ce},
304 {0x0000a42c, 0x1ce739ce}, 304 {0x0000a42c, 0x1ce739ce},
305 {0x0000a430, 0x1ce739ce}, 305 {0x0000a430, 0x1ce739ce},
@@ -351,9 +351,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, 351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
354 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 354 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
355 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 355 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
356 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222}, 356 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
357 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, 357 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
358 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 358 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
359 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 359 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -452,6 +452,7 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
452 /* Addr allmodes */ 452 /* Addr allmodes */
453 {0x00004050, 0x00300300}, 453 {0x00004050, 0x00300300},
454 {0x0000406c, 0x00100000}, 454 {0x0000406c, 0x00100000},
455 {0x00009e20, 0x000003b6},
455 {0x0000a000, 0x00010000}, 456 {0x0000a000, 0x00010000},
456 {0x0000a004, 0x00030002}, 457 {0x0000a004, 0x00030002},
457 {0x0000a008, 0x00050004}, 458 {0x0000a008, 0x00050004},
@@ -1230,4 +1231,11 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1230 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1231 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1231}; 1232};
1232 1233
1234static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
1235 /* Addr allmodes */
1236 {0x0000a398, 0x00000000},
1237 {0x0000a39c, 0x6f7f0301},
1238 {0x0000a3a0, 0xca9228ee},
1239};
1240
1233#endif /* INITVALS_9565_1P0_H */ 1241#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 2ee35f677c0e..14ff7e9dde4c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -64,7 +64,6 @@ struct ath_node;
64 64
65struct ath_config { 65struct ath_config {
66 u16 txpowlimit; 66 u16 txpowlimit;
67 u8 cabqReadytime;
68}; 67};
69 68
70/*************************/ 69/*************************/
@@ -207,6 +206,14 @@ struct ath_frame_info {
207 u8 baw_tracked : 1; 206 u8 baw_tracked : 1;
208}; 207};
209 208
209struct ath_rxbuf {
210 struct list_head list;
211 struct sk_buff *bf_mpdu;
212 void *bf_desc;
213 dma_addr_t bf_daddr;
214 dma_addr_t bf_buf_addr;
215};
216
210struct ath_buf_state { 217struct ath_buf_state {
211 u8 bf_type; 218 u8 bf_type;
212 u8 bfs_paprd; 219 u8 bfs_paprd;
@@ -307,7 +314,7 @@ struct ath_rx {
307 struct ath_descdma rxdma; 314 struct ath_descdma rxdma;
308 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; 315 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
309 316
310 struct ath_buf *buf_hold; 317 struct ath_rxbuf *buf_hold;
311 struct sk_buff *frag; 318 struct sk_buff *frag;
312 319
313 u32 ampdu_ref; 320 u32 ampdu_ref;
@@ -459,8 +466,8 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
459 466
460#define ATH_DUMP_BTCOEX(_s, _val) \ 467#define ATH_DUMP_BTCOEX(_s, _val) \
461 do { \ 468 do { \
462 len += snprintf(buf + len, size - len, \ 469 len += scnprintf(buf + len, size - len, \
463 "%20s : %10d\n", _s, (_val)); \ 470 "%20s : %10d\n", _s, (_val)); \
464 } while (0) 471 } while (0)
465 472
466enum bt_op_flags { 473enum bt_op_flags {
@@ -581,7 +588,6 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
581#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50 588#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
582#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50 589#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
583 590
584#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
585#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4 591#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
586#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2 592#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
587#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2 593#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
@@ -626,12 +632,15 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
626/* Main driver core */ 632/* Main driver core */
627/********************/ 633/********************/
628 634
629#define ATH9K_PCI_CUS198 0x0001 635#define ATH9K_PCI_CUS198 0x0001
630#define ATH9K_PCI_CUS230 0x0002 636#define ATH9K_PCI_CUS230 0x0002
631#define ATH9K_PCI_CUS217 0x0004 637#define ATH9K_PCI_CUS217 0x0004
632#define ATH9K_PCI_WOW 0x0008 638#define ATH9K_PCI_CUS252 0x0008
633#define ATH9K_PCI_BT_ANT_DIV 0x0010 639#define ATH9K_PCI_WOW 0x0010
634#define ATH9K_PCI_D3_L1_WAR 0x0020 640#define ATH9K_PCI_BT_ANT_DIV 0x0020
641#define ATH9K_PCI_D3_L1_WAR 0x0040
642#define ATH9K_PCI_AR9565_1ANT 0x0080
643#define ATH9K_PCI_AR9565_2ANT 0x0100
635 644
636/* 645/*
637 * Default cache line size, in bytes. 646 * Default cache line size, in bytes.
@@ -924,7 +933,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
924void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); 933void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
925void ath9k_reload_chainmask_settings(struct ath_softc *sc); 934void ath9k_reload_chainmask_settings(struct ath_softc *sc);
926 935
927bool ath9k_uses_beacons(int type);
928void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw); 936void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
929int ath9k_spectral_scan_config(struct ieee80211_hw *hw, 937int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
930 enum spectral_mode spectral_mode); 938 enum spectral_mode spectral_mode);
@@ -952,7 +960,7 @@ void ath9k_ps_restore(struct ath_softc *sc);
952u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate); 960u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
953 961
954void ath_start_rfkill_poll(struct ath_softc *sc); 962void ath_start_rfkill_poll(struct ath_softc *sc);
955extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); 963void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
956void ath9k_calculate_iter_data(struct ieee80211_hw *hw, 964void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
957 struct ieee80211_vif *vif, 965 struct ieee80211_vif *vif,
958 struct ath9k_vif_iter_data *iter_data); 966 struct ath9k_vif_iter_data *iter_data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b5c16b3a37b9..17be35392bb4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -334,6 +334,8 @@ void ath9k_beacon_tasklet(unsigned long data)
334 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) { 334 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
335 sc->beacon.bmisscnt++; 335 sc->beacon.bmisscnt++;
336 336
337 ath9k_hw_check_nav(ah);
338
337 if (!ath9k_hw_check_alive(ah)) 339 if (!ath9k_hw_check_alive(ah))
338 ieee80211_queue_work(sc->hw, &sc->hw_check_work); 340 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
339 341
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 5e8219a91e25..d8db74b0ef66 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -119,7 +119,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
119 ath_dbg(common, CALIBRATE, 119 ath_dbg(common, CALIBRATE,
120 "NFmid[%d] (%d) > MAX (%d), %s\n", 120 "NFmid[%d] (%d) > MAX (%d), %s\n",
121 i, h[i].privNF, limit->max, 121 i, h[i].privNF, limit->max,
122 (cal->nfcal_interference ? 122 (test_bit(NFCAL_INTF, &cal->cal_flags) ?
123 "not corrected (due to interference)" : 123 "not corrected (due to interference)" :
124 "correcting to MAX")); 124 "correcting to MAX"));
125 125
@@ -130,7 +130,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
130 * we bypass this limit here in order to better deal 130 * we bypass this limit here in order to better deal
131 * with our environment. 131 * with our environment.
132 */ 132 */
133 if (!cal->nfcal_interference) 133 if (!test_bit(NFCAL_INTF, &cal->cal_flags))
134 h[i].privNF = limit->max; 134 h[i].privNF = limit->max;
135 } 135 }
136 } 136 }
@@ -141,7 +141,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
141 * Re-enable the enforcement of the NF maximum again. 141 * Re-enable the enforcement of the NF maximum again.
142 */ 142 */
143 if (!high_nf_mid) 143 if (!high_nf_mid)
144 cal->nfcal_interference = false; 144 clear_bit(NFCAL_INTF, &cal->cal_flags);
145} 145}
146 146
147static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah, 147static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -186,7 +186,6 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
186bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 186bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
187{ 187{
188 struct ath_common *common = ath9k_hw_common(ah); 188 struct ath_common *common = ath9k_hw_common(ah);
189 struct ieee80211_conf *conf = &common->hw->conf;
190 struct ath9k_cal_list *currCal = ah->cal_list_curr; 189 struct ath9k_cal_list *currCal = ah->cal_list_curr;
191 190
192 if (!ah->caldata) 191 if (!ah->caldata)
@@ -208,7 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
208 return true; 207 return true;
209 208
210 ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n", 209 ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
211 currCal->calData->calType, conf->chandef.chan->center_freq); 210 currCal->calData->calType, ah->curchan->chan->center_freq);
212 211
213 ah->caldata->CalValid &= ~currCal->calData->calType; 212 ah->caldata->CalValid &= ~currCal->calData->calType;
214 currCal->calState = CAL_WAITING; 213 currCal->calState = CAL_WAITING;
@@ -220,7 +219,7 @@ EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
220void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update) 219void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
221{ 220{
222 if (ah->caldata) 221 if (ah->caldata)
223 ah->caldata->nfcal_pending = true; 222 set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
224 223
225 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 224 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
226 AR_PHY_AGC_CONTROL_ENABLE_NF); 225 AR_PHY_AGC_CONTROL_ENABLE_NF);
@@ -242,7 +241,6 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
242 int32_t val; 241 int32_t val;
243 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 242 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
244 struct ath_common *common = ath9k_hw_common(ah); 243 struct ath_common *common = ath9k_hw_common(ah);
245 struct ieee80211_conf *conf = &common->hw->conf;
246 s16 default_nf = ath9k_hw_get_default_nf(ah, chan); 244 s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
247 245
248 if (ah->caldata) 246 if (ah->caldata)
@@ -252,7 +250,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
252 if (chainmask & (1 << i)) { 250 if (chainmask & (1 << i)) {
253 s16 nfval; 251 s16 nfval;
254 252
255 if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)) 253 if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
256 continue; 254 continue;
257 255
258 if (h) 256 if (h)
@@ -314,7 +312,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
314 ENABLE_REGWRITE_BUFFER(ah); 312 ENABLE_REGWRITE_BUFFER(ah);
315 for (i = 0; i < NUM_NF_READINGS; i++) { 313 for (i = 0; i < NUM_NF_READINGS; i++) {
316 if (chainmask & (1 << i)) { 314 if (chainmask & (1 << i)) {
317 if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)) 315 if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
318 continue; 316 continue;
319 317
320 val = REG_READ(ah, ah->nf_regs[i]); 318 val = REG_READ(ah, ah->nf_regs[i]);
@@ -391,7 +389,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
391 } 389 }
392 390
393 h = caldata->nfCalHist; 391 h = caldata->nfCalHist;
394 caldata->nfcal_pending = false; 392 clear_bit(NFCAL_PENDING, &caldata->cal_flags);
395 ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray); 393 ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
396 chan->noisefloor = h[0].privNF; 394 chan->noisefloor = h[0].privNF;
397 ah->noise = ath9k_hw_getchan_noise(ah, chan); 395 ah->noise = ath9k_hw_getchan_noise(ah, chan);
@@ -408,7 +406,6 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
408 406
409 ah->caldata->channel = chan->channel; 407 ah->caldata->channel = chan->channel;
410 ah->caldata->channelFlags = chan->channelFlags; 408 ah->caldata->channelFlags = chan->channelFlags;
411 ah->caldata->chanmode = chan->chanmode;
412 h = ah->caldata->nfCalHist; 409 h = ah->caldata->nfCalHist;
413 default_nf = ath9k_hw_get_default_nf(ah, chan); 410 default_nf = ath9k_hw_get_default_nf(ah, chan);
414 for (i = 0; i < NUM_NF_READINGS; i++) { 411 for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -437,12 +434,12 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
437 * the baseband update the internal NF value itself, similar to 434 * the baseband update the internal NF value itself, similar to
438 * what is being done after a full reset. 435 * what is being done after a full reset.
439 */ 436 */
440 if (!caldata->nfcal_pending) 437 if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
441 ath9k_hw_start_nfcal(ah, true); 438 ath9k_hw_start_nfcal(ah, true);
442 else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF)) 439 else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
443 ath9k_hw_getnf(ah, ah->curchan); 440 ath9k_hw_getnf(ah, ah->curchan);
444 441
445 caldata->nfcal_interference = true; 442 set_bit(NFCAL_INTF, &caldata->cal_flags);
446} 443}
447EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal); 444EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
448 445
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index d3063c21e16c..a7e5a05b2eff 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,103 +49,64 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
49} 49}
50EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype); 50EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
51 51
52static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
53{
54 u32 chanmode = 0;
55
56 switch (chandef->chan->band) {
57 case IEEE80211_BAND_2GHZ:
58 switch (chandef->width) {
59 case NL80211_CHAN_WIDTH_20_NOHT:
60 case NL80211_CHAN_WIDTH_20:
61 chanmode = CHANNEL_G_HT20;
62 break;
63 case NL80211_CHAN_WIDTH_40:
64 if (chandef->center_freq1 > chandef->chan->center_freq)
65 chanmode = CHANNEL_G_HT40PLUS;
66 else
67 chanmode = CHANNEL_G_HT40MINUS;
68 break;
69 default:
70 break;
71 }
72 break;
73 case IEEE80211_BAND_5GHZ:
74 switch (chandef->width) {
75 case NL80211_CHAN_WIDTH_20_NOHT:
76 case NL80211_CHAN_WIDTH_20:
77 chanmode = CHANNEL_A_HT20;
78 break;
79 case NL80211_CHAN_WIDTH_40:
80 if (chandef->center_freq1 > chandef->chan->center_freq)
81 chanmode = CHANNEL_A_HT40PLUS;
82 else
83 chanmode = CHANNEL_A_HT40MINUS;
84 break;
85 default:
86 break;
87 }
88 break;
89 default:
90 break;
91 }
92
93 return chanmode;
94}
95
96/* 52/*
97 * Update internal channel flags. 53 * Update internal channel flags.
98 */ 54 */
99void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, 55static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
100 struct cfg80211_chan_def *chandef) 56 struct cfg80211_chan_def *chandef)
101{ 57{
102 ichan->channel = chandef->chan->center_freq; 58 struct ieee80211_channel *chan = chandef->chan;
103 ichan->chan = chandef->chan; 59 u16 flags = 0;
104 60
105 if (chandef->chan->band == IEEE80211_BAND_2GHZ) { 61 ichan->channel = chan->center_freq;
106 ichan->chanmode = CHANNEL_G; 62 ichan->chan = chan;
107 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM; 63
108 } else { 64 if (chan->band == IEEE80211_BAND_5GHZ)
109 ichan->chanmode = CHANNEL_A; 65 flags |= CHANNEL_5GHZ;
110 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
111 }
112 66
113 switch (chandef->width) { 67 switch (chandef->width) {
114 case NL80211_CHAN_WIDTH_5: 68 case NL80211_CHAN_WIDTH_5:
115 ichan->channelFlags |= CHANNEL_QUARTER; 69 flags |= CHANNEL_QUARTER;
116 break; 70 break;
117 case NL80211_CHAN_WIDTH_10: 71 case NL80211_CHAN_WIDTH_10:
118 ichan->channelFlags |= CHANNEL_HALF; 72 flags |= CHANNEL_HALF;
119 break; 73 break;
120 case NL80211_CHAN_WIDTH_20_NOHT: 74 case NL80211_CHAN_WIDTH_20_NOHT:
121 break; 75 break;
122 case NL80211_CHAN_WIDTH_20: 76 case NL80211_CHAN_WIDTH_20:
77 flags |= CHANNEL_HT;
78 break;
123 case NL80211_CHAN_WIDTH_40: 79 case NL80211_CHAN_WIDTH_40:
124 ichan->chanmode = ath9k_get_extchanmode(chandef); 80 if (chandef->center_freq1 > chandef->chan->center_freq)
81 flags |= CHANNEL_HT40PLUS | CHANNEL_HT;
82 else
83 flags |= CHANNEL_HT40MINUS | CHANNEL_HT;
125 break; 84 break;
126 default: 85 default:
127 WARN_ON(1); 86 WARN_ON(1);
128 } 87 }
88
89 ichan->channelFlags = flags;
129} 90}
130EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
131 91
132/* 92/*
133 * Get the internal channel reference. 93 * Get the internal channel reference.
134 */ 94 */
135struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, 95struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
136 struct ath_hw *ah) 96 struct ath_hw *ah,
97 struct cfg80211_chan_def *chandef)
137{ 98{
138 struct ieee80211_channel *curchan = hw->conf.chandef.chan; 99 struct ieee80211_channel *curchan = chandef->chan;
139 struct ath9k_channel *channel; 100 struct ath9k_channel *channel;
140 u8 chan_idx; 101 u8 chan_idx;
141 102
142 chan_idx = curchan->hw_value; 103 chan_idx = curchan->hw_value;
143 channel = &ah->channels[chan_idx]; 104 channel = &ah->channels[chan_idx];
144 ath9k_cmn_update_ichannel(channel, &hw->conf.chandef); 105 ath9k_cmn_update_ichannel(channel, chandef);
145 106
146 return channel; 107 return channel;
147} 108}
148EXPORT_SYMBOL(ath9k_cmn_get_curchannel); 109EXPORT_SYMBOL(ath9k_cmn_get_channel);
149 110
150int ath9k_cmn_count_streams(unsigned int chainmask, int max) 111int ath9k_cmn_count_streams(unsigned int chainmask, int max)
151{ 112{
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e039bcbfbd79..eb85e1bdca88 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -43,10 +43,9 @@
43 (((x) + ((mul)/2)) / (mul)) 43 (((x) + ((mul)/2)) / (mul))
44 44
45int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 45int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
46void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, 46struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
47 struct cfg80211_chan_def *chandef); 47 struct ath_hw *ah,
48struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, 48 struct cfg80211_chan_def *chandef);
49 struct ath_hw *ah);
50int ath9k_cmn_count_streams(unsigned int chainmask, int max); 49int ath9k_cmn_count_streams(unsigned int chainmask, int max);
51void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common, 50void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
52 enum ath_stomp_type stomp_type); 51 enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c088744a6bfb..1be2c787aac9 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -104,37 +104,37 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
104 return -ENOMEM; 104 return -ENOMEM;
105 105
106 if (common->disable_ani) { 106 if (common->disable_ani) {
107 len += snprintf(buf + len, size - len, "%s: %s\n", 107 len += scnprintf(buf + len, size - len, "%s: %s\n",
108 "ANI", "DISABLED"); 108 "ANI", "DISABLED");
109 goto exit; 109 goto exit;
110 } 110 }
111 111
112 len += snprintf(buf + len, size - len, "%15s: %s\n", 112 len += scnprintf(buf + len, size - len, "%15s: %s\n",
113 "ANI", "ENABLED"); 113 "ANI", "ENABLED");
114 len += snprintf(buf + len, size - len, "%15s: %u\n", 114 len += scnprintf(buf + len, size - len, "%15s: %u\n",
115 "ANI RESET", ah->stats.ast_ani_reset); 115 "ANI RESET", ah->stats.ast_ani_reset);
116 len += snprintf(buf + len, size - len, "%15s: %u\n", 116 len += scnprintf(buf + len, size - len, "%15s: %u\n",
117 "SPUR UP", ah->stats.ast_ani_spurup); 117 "SPUR UP", ah->stats.ast_ani_spurup);
118 len += snprintf(buf + len, size - len, "%15s: %u\n", 118 len += scnprintf(buf + len, size - len, "%15s: %u\n",
119 "SPUR DOWN", ah->stats.ast_ani_spurup); 119 "SPUR DOWN", ah->stats.ast_ani_spurup);
120 len += snprintf(buf + len, size - len, "%15s: %u\n", 120 len += scnprintf(buf + len, size - len, "%15s: %u\n",
121 "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon); 121 "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
122 len += snprintf(buf + len, size - len, "%15s: %u\n", 122 len += scnprintf(buf + len, size - len, "%15s: %u\n",
123 "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff); 123 "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
124 len += snprintf(buf + len, size - len, "%15s: %u\n", 124 len += scnprintf(buf + len, size - len, "%15s: %u\n",
125 "MRC-CCK ON", ah->stats.ast_ani_ccklow); 125 "MRC-CCK ON", ah->stats.ast_ani_ccklow);
126 len += snprintf(buf + len, size - len, "%15s: %u\n", 126 len += scnprintf(buf + len, size - len, "%15s: %u\n",
127 "MRC-CCK OFF", ah->stats.ast_ani_cckhigh); 127 "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
128 len += snprintf(buf + len, size - len, "%15s: %u\n", 128 len += scnprintf(buf + len, size - len, "%15s: %u\n",
129 "FIR-STEP UP", ah->stats.ast_ani_stepup); 129 "FIR-STEP UP", ah->stats.ast_ani_stepup);
130 len += snprintf(buf + len, size - len, "%15s: %u\n", 130 len += scnprintf(buf + len, size - len, "%15s: %u\n",
131 "FIR-STEP DOWN", ah->stats.ast_ani_stepdown); 131 "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
132 len += snprintf(buf + len, size - len, "%15s: %u\n", 132 len += scnprintf(buf + len, size - len, "%15s: %u\n",
133 "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero); 133 "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
134 len += snprintf(buf + len, size - len, "%15s: %u\n", 134 len += scnprintf(buf + len, size - len, "%15s: %u\n",
135 "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs); 135 "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
136 len += snprintf(buf + len, size - len, "%15s: %u\n", 136 len += scnprintf(buf + len, size - len, "%15s: %u\n",
137 "CCK ERRORS", ah->stats.ast_ani_cckerrs); 137 "CCK ERRORS", ah->stats.ast_ani_cckerrs);
138exit: 138exit:
139 if (len > size) 139 if (len > size)
140 len = size; 140 len = size;
@@ -280,70 +280,70 @@ static ssize_t read_file_antenna_diversity(struct file *file,
280 return -ENOMEM; 280 return -ENOMEM;
281 281
282 if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) { 282 if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
283 len += snprintf(buf + len, size - len, "%s\n", 283 len += scnprintf(buf + len, size - len, "%s\n",
284 "Antenna Diversity Combining is disabled"); 284 "Antenna Diversity Combining is disabled");
285 goto exit; 285 goto exit;
286 } 286 }
287 287
288 ath9k_ps_wakeup(sc); 288 ath9k_ps_wakeup(sc);
289 ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf); 289 ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
290 len += snprintf(buf + len, size - len, "Current MAIN config : %s\n", 290 len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
291 lna_conf_str[div_ant_conf.main_lna_conf]); 291 lna_conf_str[div_ant_conf.main_lna_conf]);
292 len += snprintf(buf + len, size - len, "Current ALT config : %s\n", 292 len += scnprintf(buf + len, size - len, "Current ALT config : %s\n",
293 lna_conf_str[div_ant_conf.alt_lna_conf]); 293 lna_conf_str[div_ant_conf.alt_lna_conf]);
294 len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n", 294 len += scnprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
295 as_main->rssi_avg); 295 as_main->rssi_avg);
296 len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n", 296 len += scnprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
297 as_alt->rssi_avg); 297 as_alt->rssi_avg);
298 ath9k_ps_restore(sc); 298 ath9k_ps_restore(sc);
299 299
300 len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n"); 300 len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
301 len += snprintf(buf + len, size - len, "-------------------\n"); 301 len += scnprintf(buf + len, size - len, "-------------------\n");
302 302
303 len += snprintf(buf + len, size - len, "%30s%15s\n", 303 len += scnprintf(buf + len, size - len, "%30s%15s\n",
304 "MAIN", "ALT"); 304 "MAIN", "ALT");
305 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 305 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
306 "TOTAL COUNT", 306 "TOTAL COUNT",
307 as_main->recv_cnt, 307 as_main->recv_cnt,
308 as_alt->recv_cnt); 308 as_alt->recv_cnt);
309 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 309 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
310 "LNA1", 310 "LNA1",
311 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1], 311 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
312 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]); 312 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
313 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 313 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
314 "LNA2", 314 "LNA2",
315 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2], 315 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
316 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]); 316 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
317 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 317 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
318 "LNA1 + LNA2", 318 "LNA1 + LNA2",
319 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2], 319 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
320 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]); 320 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
321 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 321 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
322 "LNA1 - LNA2", 322 "LNA1 - LNA2",
323 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2], 323 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
324 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]); 324 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
325 325
326 len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n"); 326 len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
327 len += snprintf(buf + len, size - len, "--------------------\n"); 327 len += scnprintf(buf + len, size - len, "--------------------\n");
328 328
329 len += snprintf(buf + len, size - len, "%30s%15s\n", 329 len += scnprintf(buf + len, size - len, "%30s%15s\n",
330 "MAIN", "ALT"); 330 "MAIN", "ALT");
331 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 331 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
332 "LNA1", 332 "LNA1",
333 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1], 333 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
334 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]); 334 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
335 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 335 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
336 "LNA2", 336 "LNA2",
337 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2], 337 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
338 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]); 338 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
339 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 339 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
340 "LNA1 + LNA2", 340 "LNA1 + LNA2",
341 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2], 341 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
342 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]); 342 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
343 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", 343 len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
344 "LNA1 - LNA2", 344 "LNA1 - LNA2",
345 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2], 345 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
346 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]); 346 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
347 347
348exit: 348exit:
349 if (len > size) 349 if (len > size)
@@ -385,21 +385,21 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
385 (AR_MACMISC_MISC_OBS_BUS_1 << 385 (AR_MACMISC_MISC_OBS_BUS_1 <<
386 AR_MACMISC_MISC_OBS_BUS_MSB_S))); 386 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
387 387
388 len += snprintf(buf + len, DMA_BUF_LEN - len, 388 len += scnprintf(buf + len, DMA_BUF_LEN - len,
389 "Raw DMA Debug values:\n"); 389 "Raw DMA Debug values:\n");
390 390
391 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) { 391 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
392 if (i % 4 == 0) 392 if (i % 4 == 0)
393 len += snprintf(buf + len, DMA_BUF_LEN - len, "\n"); 393 len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
394 394
395 val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32))); 395 val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
396 len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ", 396 len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
397 i, val[i]); 397 i, val[i]);
398 } 398 }
399 399
400 len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n"); 400 len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
401 len += snprintf(buf + len, DMA_BUF_LEN - len, 401 len += scnprintf(buf + len, DMA_BUF_LEN - len,
402 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"); 402 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
403 403
404 for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) { 404 for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
405 if (i == 8) { 405 if (i == 8) {
@@ -412,39 +412,39 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
412 dcuBase++; 412 dcuBase++;
413 } 413 }
414 414
415 len += snprintf(buf + len, DMA_BUF_LEN - len, 415 len += scnprintf(buf + len, DMA_BUF_LEN - len,
416 "%2d %2x %1x %2x %2x\n", 416 "%2d %2x %1x %2x %2x\n",
417 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, 417 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
418 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3), 418 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
419 val[2] & (0x7 << (i * 3)) >> (i * 3), 419 val[2] & (0x7 << (i * 3)) >> (i * 3),
420 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); 420 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
421 } 421 }
422 422
423 len += snprintf(buf + len, DMA_BUF_LEN - len, "\n"); 423 len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
424 424
425 len += snprintf(buf + len, DMA_BUF_LEN - len, 425 len += scnprintf(buf + len, DMA_BUF_LEN - len,
426 "qcu_stitch state: %2x qcu_fetch state: %2x\n", 426 "qcu_stitch state: %2x qcu_fetch state: %2x\n",
427 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22); 427 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
428 len += snprintf(buf + len, DMA_BUF_LEN - len, 428 len += scnprintf(buf + len, DMA_BUF_LEN - len,
429 "qcu_complete state: %2x dcu_complete state: %2x\n", 429 "qcu_complete state: %2x dcu_complete state: %2x\n",
430 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3)); 430 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
431 len += snprintf(buf + len, DMA_BUF_LEN - len, 431 len += scnprintf(buf + len, DMA_BUF_LEN - len,
432 "dcu_arb state: %2x dcu_fp state: %2x\n", 432 "dcu_arb state: %2x dcu_fp state: %2x\n",
433 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27); 433 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
434 len += snprintf(buf + len, DMA_BUF_LEN - len, 434 len += scnprintf(buf + len, DMA_BUF_LEN - len,
435 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n", 435 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
436 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10); 436 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
437 len += snprintf(buf + len, DMA_BUF_LEN - len, 437 len += scnprintf(buf + len, DMA_BUF_LEN - len,
438 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n", 438 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
439 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12); 439 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
440 len += snprintf(buf + len, DMA_BUF_LEN - len, 440 len += scnprintf(buf + len, DMA_BUF_LEN - len,
441 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n", 441 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
442 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); 442 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
443 443
444 len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n", 444 len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
445 REG_READ_D(ah, AR_OBS_BUS_1)); 445 REG_READ_D(ah, AR_OBS_BUS_1));
446 len += snprintf(buf + len, DMA_BUF_LEN - len, 446 len += scnprintf(buf + len, DMA_BUF_LEN - len,
447 "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR)); 447 "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
448 448
449 ath9k_ps_restore(sc); 449 ath9k_ps_restore(sc);
450 450
@@ -530,9 +530,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
530 530
531#define PR_IS(a, s) \ 531#define PR_IS(a, s) \
532 do { \ 532 do { \
533 len += snprintf(buf + len, mxlen - len, \ 533 len += scnprintf(buf + len, mxlen - len, \
534 "%21s: %10u\n", a, \ 534 "%21s: %10u\n", a, \
535 sc->debug.stats.istats.s); \ 535 sc->debug.stats.istats.s); \
536 } while (0) 536 } while (0)
537 537
538 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 538 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -563,8 +563,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
563 PR_IS("GENTIMER", gen_timer); 563 PR_IS("GENTIMER", gen_timer);
564 PR_IS("TOTAL", total); 564 PR_IS("TOTAL", total);
565 565
566 len += snprintf(buf + len, mxlen - len, 566 len += scnprintf(buf + len, mxlen - len,
567 "SYNC_CAUSE stats:\n"); 567 "SYNC_CAUSE stats:\n");
568 568
569 PR_IS("Sync-All", sync_cause_all); 569 PR_IS("Sync-All", sync_cause_all);
570 PR_IS("RTC-IRQ", sync_rtc_irq); 570 PR_IS("RTC-IRQ", sync_rtc_irq);
@@ -655,16 +655,16 @@ static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
655 655
656 ath_txq_lock(sc, txq); 656 ath_txq_lock(sc, txq);
657 657
658 len += snprintf(buf + len, size - len, "%s: %d ", 658 len += scnprintf(buf + len, size - len, "%s: %d ",
659 "qnum", txq->axq_qnum); 659 "qnum", txq->axq_qnum);
660 len += snprintf(buf + len, size - len, "%s: %2d ", 660 len += scnprintf(buf + len, size - len, "%s: %2d ",
661 "qdepth", txq->axq_depth); 661 "qdepth", txq->axq_depth);
662 len += snprintf(buf + len, size - len, "%s: %2d ", 662 len += scnprintf(buf + len, size - len, "%s: %2d ",
663 "ampdu-depth", txq->axq_ampdu_depth); 663 "ampdu-depth", txq->axq_ampdu_depth);
664 len += snprintf(buf + len, size - len, "%s: %3d ", 664 len += scnprintf(buf + len, size - len, "%s: %3d ",
665 "pending", txq->pending_frames); 665 "pending", txq->pending_frames);
666 len += snprintf(buf + len, size - len, "%s: %d\n", 666 len += scnprintf(buf + len, size - len, "%s: %d\n",
667 "stopped", txq->stopped); 667 "stopped", txq->stopped);
668 668
669 ath_txq_unlock(sc, txq); 669 ath_txq_unlock(sc, txq);
670 return len; 670 return len;
@@ -687,11 +687,11 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
687 687
688 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 688 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
689 txq = sc->tx.txq_map[i]; 689 txq = sc->tx.txq_map[i];
690 len += snprintf(buf + len, size - len, "(%s): ", qname[i]); 690 len += scnprintf(buf + len, size - len, "(%s): ", qname[i]);
691 len += print_queue(sc, txq, buf + len, size - len); 691 len += print_queue(sc, txq, buf + len, size - len);
692 } 692 }
693 693
694 len += snprintf(buf + len, size - len, "(CAB): "); 694 len += scnprintf(buf + len, size - len, "(CAB): ");
695 len += print_queue(sc, sc->beacon.cabq, buf + len, size - len); 695 len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
696 696
697 if (len > size) 697 if (len > size)
@@ -716,80 +716,82 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
716 unsigned int reg; 716 unsigned int reg;
717 u32 rxfilter; 717 u32 rxfilter;
718 718
719 len += snprintf(buf + len, sizeof(buf) - len, 719 len += scnprintf(buf + len, sizeof(buf) - len,
720 "BSSID: %pM\n", common->curbssid); 720 "BSSID: %pM\n", common->curbssid);
721 len += snprintf(buf + len, sizeof(buf) - len, 721 len += scnprintf(buf + len, sizeof(buf) - len,
722 "BSSID-MASK: %pM\n", common->bssidmask); 722 "BSSID-MASK: %pM\n", common->bssidmask);
723 len += snprintf(buf + len, sizeof(buf) - len, 723 len += scnprintf(buf + len, sizeof(buf) - len,
724 "OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode)); 724 "OPMODE: %s\n",
725 ath_opmode_to_string(sc->sc_ah->opmode));
725 726
726 ath9k_ps_wakeup(sc); 727 ath9k_ps_wakeup(sc);
727 rxfilter = ath9k_hw_getrxfilter(sc->sc_ah); 728 rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
728 ath9k_ps_restore(sc); 729 ath9k_ps_restore(sc);
729 730
730 len += snprintf(buf + len, sizeof(buf) - len, 731 len += scnprintf(buf + len, sizeof(buf) - len,
731 "RXFILTER: 0x%x", rxfilter); 732 "RXFILTER: 0x%x", rxfilter);
732 733
733 if (rxfilter & ATH9K_RX_FILTER_UCAST) 734 if (rxfilter & ATH9K_RX_FILTER_UCAST)
734 len += snprintf(buf + len, sizeof(buf) - len, " UCAST"); 735 len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
735 if (rxfilter & ATH9K_RX_FILTER_MCAST) 736 if (rxfilter & ATH9K_RX_FILTER_MCAST)
736 len += snprintf(buf + len, sizeof(buf) - len, " MCAST"); 737 len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
737 if (rxfilter & ATH9K_RX_FILTER_BCAST) 738 if (rxfilter & ATH9K_RX_FILTER_BCAST)
738 len += snprintf(buf + len, sizeof(buf) - len, " BCAST"); 739 len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
739 if (rxfilter & ATH9K_RX_FILTER_CONTROL) 740 if (rxfilter & ATH9K_RX_FILTER_CONTROL)
740 len += snprintf(buf + len, sizeof(buf) - len, " CONTROL"); 741 len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
741 if (rxfilter & ATH9K_RX_FILTER_BEACON) 742 if (rxfilter & ATH9K_RX_FILTER_BEACON)
742 len += snprintf(buf + len, sizeof(buf) - len, " BEACON"); 743 len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
743 if (rxfilter & ATH9K_RX_FILTER_PROM) 744 if (rxfilter & ATH9K_RX_FILTER_PROM)
744 len += snprintf(buf + len, sizeof(buf) - len, " PROM"); 745 len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
745 if (rxfilter & ATH9K_RX_FILTER_PROBEREQ) 746 if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
746 len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ"); 747 len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
747 if (rxfilter & ATH9K_RX_FILTER_PHYERR) 748 if (rxfilter & ATH9K_RX_FILTER_PHYERR)
748 len += snprintf(buf + len, sizeof(buf) - len, " PHYERR"); 749 len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
749 if (rxfilter & ATH9K_RX_FILTER_MYBEACON) 750 if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
750 len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON"); 751 len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
751 if (rxfilter & ATH9K_RX_FILTER_COMP_BAR) 752 if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
752 len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR"); 753 len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
753 if (rxfilter & ATH9K_RX_FILTER_PSPOLL) 754 if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
754 len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL"); 755 len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
755 if (rxfilter & ATH9K_RX_FILTER_PHYRADAR) 756 if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
756 len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR"); 757 len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
757 if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL) 758 if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
758 len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL"); 759 len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
759 if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER) 760 if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
760 len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER"); 761 len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
761 762
762 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 763 len += scnprintf(buf + len, sizeof(buf) - len, "\n");
763 764
764 reg = sc->sc_ah->imask; 765 reg = sc->sc_ah->imask;
765 766
766 len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg); 767 len += scnprintf(buf + len, sizeof(buf) - len,
768 "INTERRUPT-MASK: 0x%x", reg);
767 769
768 if (reg & ATH9K_INT_SWBA) 770 if (reg & ATH9K_INT_SWBA)
769 len += snprintf(buf + len, sizeof(buf) - len, " SWBA"); 771 len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
770 if (reg & ATH9K_INT_BMISS) 772 if (reg & ATH9K_INT_BMISS)
771 len += snprintf(buf + len, sizeof(buf) - len, " BMISS"); 773 len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
772 if (reg & ATH9K_INT_CST) 774 if (reg & ATH9K_INT_CST)
773 len += snprintf(buf + len, sizeof(buf) - len, " CST"); 775 len += scnprintf(buf + len, sizeof(buf) - len, " CST");
774 if (reg & ATH9K_INT_RX) 776 if (reg & ATH9K_INT_RX)
775 len += snprintf(buf + len, sizeof(buf) - len, " RX"); 777 len += scnprintf(buf + len, sizeof(buf) - len, " RX");
776 if (reg & ATH9K_INT_RXHP) 778 if (reg & ATH9K_INT_RXHP)
777 len += snprintf(buf + len, sizeof(buf) - len, " RXHP"); 779 len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
778 if (reg & ATH9K_INT_RXLP) 780 if (reg & ATH9K_INT_RXLP)
779 len += snprintf(buf + len, sizeof(buf) - len, " RXLP"); 781 len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
780 if (reg & ATH9K_INT_BB_WATCHDOG) 782 if (reg & ATH9K_INT_BB_WATCHDOG)
781 len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG"); 783 len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
782 784
783 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 785 len += scnprintf(buf + len, sizeof(buf) - len, "\n");
784 786
785 ath9k_calculate_iter_data(hw, NULL, &iter_data); 787 ath9k_calculate_iter_data(hw, NULL, &iter_data);
786 788
787 len += snprintf(buf + len, sizeof(buf) - len, 789 len += scnprintf(buf + len, sizeof(buf) - len,
788 "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i" 790 "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
789 " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n", 791 " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
790 iter_data.naps, iter_data.nstations, iter_data.nmeshes, 792 iter_data.naps, iter_data.nstations, iter_data.nmeshes,
791 iter_data.nwds, iter_data.nadhocs, 793 iter_data.nwds, iter_data.nadhocs,
792 sc->nvifs, sc->nbcnvifs); 794 sc->nvifs, sc->nbcnvifs);
793 795
794 if (len > sizeof(buf)) 796 if (len > sizeof(buf))
795 len = sizeof(buf); 797 len = sizeof(buf);
@@ -805,27 +807,27 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
805 char buf[512]; 807 char buf[512];
806 unsigned int len = 0; 808 unsigned int len = 0;
807 809
808 len += snprintf(buf + len, sizeof(buf) - len, 810 len += scnprintf(buf + len, sizeof(buf) - len,
809 "%17s: %2d\n", "Baseband Hang", 811 "%17s: %2d\n", "Baseband Hang",
810 sc->debug.stats.reset[RESET_TYPE_BB_HANG]); 812 sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
811 len += snprintf(buf + len, sizeof(buf) - len, 813 len += scnprintf(buf + len, sizeof(buf) - len,
812 "%17s: %2d\n", "Baseband Watchdog", 814 "%17s: %2d\n", "Baseband Watchdog",
813 sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]); 815 sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
814 len += snprintf(buf + len, sizeof(buf) - len, 816 len += scnprintf(buf + len, sizeof(buf) - len,
815 "%17s: %2d\n", "Fatal HW Error", 817 "%17s: %2d\n", "Fatal HW Error",
816 sc->debug.stats.reset[RESET_TYPE_FATAL_INT]); 818 sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
817 len += snprintf(buf + len, sizeof(buf) - len, 819 len += scnprintf(buf + len, sizeof(buf) - len,
818 "%17s: %2d\n", "TX HW error", 820 "%17s: %2d\n", "TX HW error",
819 sc->debug.stats.reset[RESET_TYPE_TX_ERROR]); 821 sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
820 len += snprintf(buf + len, sizeof(buf) - len, 822 len += scnprintf(buf + len, sizeof(buf) - len,
821 "%17s: %2d\n", "TX Path Hang", 823 "%17s: %2d\n", "TX Path Hang",
822 sc->debug.stats.reset[RESET_TYPE_TX_HANG]); 824 sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
823 len += snprintf(buf + len, sizeof(buf) - len, 825 len += scnprintf(buf + len, sizeof(buf) - len,
824 "%17s: %2d\n", "PLL RX Hang", 826 "%17s: %2d\n", "PLL RX Hang",
825 sc->debug.stats.reset[RESET_TYPE_PLL_HANG]); 827 sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
826 len += snprintf(buf + len, sizeof(buf) - len, 828 len += scnprintf(buf + len, sizeof(buf) - len,
827 "%17s: %2d\n", "MCI Reset", 829 "%17s: %2d\n", "MCI Reset",
828 sc->debug.stats.reset[RESET_TYPE_MCI]); 830 sc->debug.stats.reset[RESET_TYPE_MCI]);
829 831
830 if (len > sizeof(buf)) 832 if (len > sizeof(buf))
831 len = sizeof(buf); 833 len = sizeof(buf);
@@ -902,14 +904,14 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
902 size_t count, loff_t *ppos) 904 size_t count, loff_t *ppos)
903{ 905{
904#define PHY_ERR(s, p) \ 906#define PHY_ERR(s, p) \
905 len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \ 907 len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
906 sc->debug.stats.rxstats.phy_err_stats[p]); 908 sc->debug.stats.rxstats.phy_err_stats[p]);
907 909
908#define RXS_ERR(s, e) \ 910#define RXS_ERR(s, e) \
909 do { \ 911 do { \
910 len += snprintf(buf + len, size - len, \ 912 len += scnprintf(buf + len, size - len, \
911 "%22s : %10u\n", s, \ 913 "%22s : %10u\n", s, \
912 sc->debug.stats.rxstats.e); \ 914 sc->debug.stats.rxstats.e);\
913 } while (0) 915 } while (0)
914 916
915 struct ath_softc *sc = file->private_data; 917 struct ath_softc *sc = file->private_data;
@@ -1439,22 +1441,22 @@ static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
1439 if (!buf) 1441 if (!buf)
1440 return -ENOMEM; 1442 return -ENOMEM;
1441 1443
1442 len += snprintf(buf + len, size - len, 1444 len += scnprintf(buf + len, size - len,
1443 "Channel Noise Floor : %d\n", ah->noise); 1445 "Channel Noise Floor : %d\n", ah->noise);
1444 len += snprintf(buf + len, size - len, 1446 len += scnprintf(buf + len, size - len,
1445 "Chain | privNF | # Readings | NF Readings\n"); 1447 "Chain | privNF | # Readings | NF Readings\n");
1446 for (i = 0; i < NUM_NF_READINGS; i++) { 1448 for (i = 0; i < NUM_NF_READINGS; i++) {
1447 if (!(chainmask & (1 << i)) || 1449 if (!(chainmask & (1 << i)) ||
1448 ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))) 1450 ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
1449 continue; 1451 continue;
1450 1452
1451 nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount; 1453 nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
1452 len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t", 1454 len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
1453 i, h[i].privNF, nread); 1455 i, h[i].privNF, nread);
1454 for (j = 0; j < nread; j++) 1456 for (j = 0; j < nread; j++)
1455 len += snprintf(buf + len, size - len, 1457 len += scnprintf(buf + len, size - len,
1456 " %d", h[i].nfCalBuffer[j]); 1458 " %d", h[i].nfCalBuffer[j]);
1457 len += snprintf(buf + len, size - len, "\n"); 1459 len += scnprintf(buf + len, size - len, "\n");
1458 } 1460 }
1459 1461
1460 if (len > size) 1462 if (len > size)
@@ -1543,8 +1545,8 @@ static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
1543 return -ENOMEM; 1545 return -ENOMEM;
1544 1546
1545 if (!sc->sc_ah->common.btcoex_enabled) { 1547 if (!sc->sc_ah->common.btcoex_enabled) {
1546 len = snprintf(buf, size, "%s\n", 1548 len = scnprintf(buf, size, "%s\n",
1547 "BTCOEX is disabled"); 1549 "BTCOEX is disabled");
1548 goto exit; 1550 goto exit;
1549 } 1551 }
1550 1552
@@ -1582,43 +1584,43 @@ static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
1582 return -ENOMEM; 1584 return -ENOMEM;
1583 1585
1584 if (!an->sta->ht_cap.ht_supported) { 1586 if (!an->sta->ht_cap.ht_supported) {
1585 len = snprintf(buf, size, "%s\n", 1587 len = scnprintf(buf, size, "%s\n",
1586 "HT not supported"); 1588 "HT not supported");
1587 goto exit; 1589 goto exit;
1588 } 1590 }
1589 1591
1590 len = snprintf(buf, size, "Max-AMPDU: %d\n", 1592 len = scnprintf(buf, size, "Max-AMPDU: %d\n",
1591 an->maxampdu); 1593 an->maxampdu);
1592 len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n", 1594 len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
1593 an->mpdudensity); 1595 an->mpdudensity);
1594 1596
1595 len += snprintf(buf + len, size - len, 1597 len += scnprintf(buf + len, size - len,
1596 "%2s%7s\n", "AC", "SCHED"); 1598 "%2s%7s\n", "AC", "SCHED");
1597 1599
1598 for (acno = 0, ac = &an->ac[acno]; 1600 for (acno = 0, ac = &an->ac[acno];
1599 acno < IEEE80211_NUM_ACS; acno++, ac++) { 1601 acno < IEEE80211_NUM_ACS; acno++, ac++) {
1600 txq = ac->txq; 1602 txq = ac->txq;
1601 ath_txq_lock(sc, txq); 1603 ath_txq_lock(sc, txq);
1602 len += snprintf(buf + len, size - len, 1604 len += scnprintf(buf + len, size - len,
1603 "%2d%7d\n", 1605 "%2d%7d\n",
1604 acno, ac->sched); 1606 acno, ac->sched);
1605 ath_txq_unlock(sc, txq); 1607 ath_txq_unlock(sc, txq);
1606 } 1608 }
1607 1609
1608 len += snprintf(buf + len, size - len, 1610 len += scnprintf(buf + len, size - len,
1609 "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n", 1611 "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
1610 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE", 1612 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
1611 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED"); 1613 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
1612 1614
1613 for (tidno = 0, tid = &an->tid[tidno]; 1615 for (tidno = 0, tid = &an->tid[tidno];
1614 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 1616 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1615 txq = tid->ac->txq; 1617 txq = tid->ac->txq;
1616 ath_txq_lock(sc, txq); 1618 ath_txq_lock(sc, txq);
1617 len += snprintf(buf + len, size - len, 1619 len += scnprintf(buf + len, size - len,
1618 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n", 1620 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
1619 tid->tidno, tid->seq_start, tid->seq_next, 1621 tid->tidno, tid->seq_start, tid->seq_next,
1620 tid->baw_size, tid->baw_head, tid->baw_tail, 1622 tid->baw_size, tid->baw_head, tid->baw_tail,
1621 tid->bar_index, tid->sched, tid->paused); 1623 tid->bar_index, tid->sched, tid->paused);
1622 ath_txq_unlock(sc, txq); 1624 ath_txq_unlock(sc, txq);
1623 } 1625 }
1624exit: 1626exit:
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 6e1556fa2f3e..d6e3fa4299a4 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -193,12 +193,12 @@ struct ath_tx_stats {
193#define TXSTATS sc->debug.stats.txstats 193#define TXSTATS sc->debug.stats.txstats
194#define PR(str, elem) \ 194#define PR(str, elem) \
195 do { \ 195 do { \
196 len += snprintf(buf + len, size - len, \ 196 len += scnprintf(buf + len, size - len, \
197 "%s%13u%11u%10u%10u\n", str, \ 197 "%s%13u%11u%10u%10u\n", str, \
198 TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \ 198 TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
199 TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \ 199 TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
200 TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \ 200 TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
201 TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \ 201 TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
202 } while(0) 202 } while(0)
203 203
204#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++) 204#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 3c6e4138a95d..821599135d8a 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -25,11 +25,11 @@
25struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 }; 25struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
26 26
27#define ATH9K_DFS_STAT(s, p) \ 27#define ATH9K_DFS_STAT(s, p) \
28 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \ 28 len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
29 sc->debug.stats.dfs_stats.p); 29 sc->debug.stats.dfs_stats.p);
30#define ATH9K_DFS_POOL_STAT(s, p) \ 30#define ATH9K_DFS_POOL_STAT(s, p) \
31 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \ 31 len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
32 global_dfs_pool_stats.p); 32 global_dfs_pool_stats.p);
33 33
34static ssize_t read_file_dfs(struct file *file, char __user *user_buf, 34static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos) 35 size_t count, loff_t *ppos)
@@ -44,12 +44,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
44 if (buf == NULL) 44 if (buf == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46 46
47 len += snprintf(buf + len, size - len, "DFS support for " 47 len += scnprintf(buf + len, size - len, "DFS support for "
48 "macVersion = 0x%x, macRev = 0x%x: %s\n", 48 "macVersion = 0x%x, macRev = 0x%x: %s\n",
49 hw_ver->macVersion, hw_ver->macRev, 49 hw_ver->macVersion, hw_ver->macRev,
50 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? 50 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
51 "enabled" : "disabled"); 51 "enabled" : "disabled");
52 len += snprintf(buf + len, size - len, "Pulse detector statistics:\n"); 52 len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
53 ATH9K_DFS_STAT("pulse events reported ", pulses_total); 53 ATH9K_DFS_STAT("pulse events reported ", pulses_total);
54 ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs); 54 ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
55 ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected); 55 ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
@@ -59,11 +59,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
59 ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors); 59 ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
60 ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors); 60 ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
61 ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors); 61 ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
62 len += snprintf(buf + len, size - len, "Radar detector statistics " 62 len += scnprintf(buf + len, size - len, "Radar detector statistics "
63 "(current DFS region: %d)\n", sc->dfs_detector->region); 63 "(current DFS region: %d)\n",
64 sc->dfs_detector->region);
64 ATH9K_DFS_STAT("Pulse events processed ", pulses_processed); 65 ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
65 ATH9K_DFS_STAT("Radars detected ", radar_detected); 66 ATH9K_DFS_STAT("Radars detected ", radar_detected);
66 len += snprintf(buf + len, size - len, "Global Pool statistics:\n"); 67 len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
67 ATH9K_DFS_POOL_STAT("Pool references ", pool_reference); 68 ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
68 ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated); 69 ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
69 ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error); 70 ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
index 5ba4b6fe37c0..c718fc379a10 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -392,7 +392,7 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
392 392
393 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) { 393 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
394 pri_detector_reset(de, ts); 394 pri_detector_reset(de, ts);
395 return false; 395 return NULL;
396 } 396 }
397 397
398 ps = pseq_handler_check_detection(de); 398 ps = pseq_handler_check_detection(de);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9ea8e4b779c9..b4091716e9b3 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -129,10 +129,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
129 struct base_eep_header_4k *pBase = &eep->baseEepHeader; 129 struct base_eep_header_4k *pBase = &eep->baseEepHeader;
130 130
131 if (!dump_base_hdr) { 131 if (!dump_base_hdr) {
132 len += snprintf(buf + len, size - len, 132 len += scnprintf(buf + len, size - len,
133 "%20s :\n", "2GHz modal Header"); 133 "%20s :\n", "2GHz modal Header");
134 len = ath9k_dump_4k_modal_eeprom(buf, len, size, 134 len = ath9k_dump_4k_modal_eeprom(buf, len, size,
135 &eep->modalHeader); 135 &eep->modalHeader);
136 goto out; 136 goto out;
137 } 137 }
138 138
@@ -160,8 +160,8 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
160 PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); 160 PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
161 PR_EEP("TX Gain type", pBase->txGainType); 161 PR_EEP("TX Gain type", pBase->txGainType);
162 162
163 len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", 163 len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
164 pBase->macAddr); 164 pBase->macAddr);
165 165
166out: 166out:
167 if (len > size) 167 if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 3ae1f3df0637..e1d0c217c104 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -125,8 +125,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
125 struct base_eep_ar9287_header *pBase = &eep->baseEepHeader; 125 struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
126 126
127 if (!dump_base_hdr) { 127 if (!dump_base_hdr) {
128 len += snprintf(buf + len, size - len, 128 len += scnprintf(buf + len, size - len,
129 "%20s :\n", "2GHz modal Header"); 129 "%20s :\n", "2GHz modal Header");
130 len = ar9287_dump_modal_eeprom(buf, len, size, 130 len = ar9287_dump_modal_eeprom(buf, len, size,
131 &eep->modalHeader); 131 &eep->modalHeader);
132 goto out; 132 goto out;
@@ -157,8 +157,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
157 PR_EEP("Power Table Offset", pBase->pwrTableOffset); 157 PR_EEP("Power Table Offset", pBase->pwrTableOffset);
158 PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl); 158 PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
159 159
160 len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", 160 len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
161 pBase->macAddr); 161 pBase->macAddr);
162 162
163out: 163out:
164 if (len > size) 164 if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 1c25368b3836..39107e31e79a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -205,12 +205,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
205 struct base_eep_header *pBase = &eep->baseEepHeader; 205 struct base_eep_header *pBase = &eep->baseEepHeader;
206 206
207 if (!dump_base_hdr) { 207 if (!dump_base_hdr) {
208 len += snprintf(buf + len, size - len, 208 len += scnprintf(buf + len, size - len,
209 "%20s :\n", "2GHz modal Header"); 209 "%20s :\n", "2GHz modal Header");
210 len = ath9k_def_dump_modal_eeprom(buf, len, size, 210 len = ath9k_def_dump_modal_eeprom(buf, len, size,
211 &eep->modalHeader[0]); 211 &eep->modalHeader[0]);
212 len += snprintf(buf + len, size - len, 212 len += scnprintf(buf + len, size - len,
213 "%20s :\n", "5GHz modal Header"); 213 "%20s :\n", "5GHz modal Header");
214 len = ath9k_def_dump_modal_eeprom(buf, len, size, 214 len = ath9k_def_dump_modal_eeprom(buf, len, size,
215 &eep->modalHeader[1]); 215 &eep->modalHeader[1]);
216 goto out; 216 goto out;
@@ -240,8 +240,8 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
240 PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); 240 PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
241 PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl); 241 PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
242 242
243 len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", 243 len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
244 pBase->macAddr); 244 pBase->macAddr);
245 245
246out: 246out:
247 if (len > size) 247 if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4b412aaf4f36..c34f21241da9 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -522,22 +522,22 @@ static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
522 ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx); 522 ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
523 ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count); 523 ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
524 524
525 len += snprintf(buf + len, size - len, "BT Weights: "); 525 len += scnprintf(buf + len, size - len, "BT Weights: ");
526 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) 526 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
527 len += snprintf(buf + len, size - len, "%08x ", 527 len += scnprintf(buf + len, size - len, "%08x ",
528 btcoex_hw->bt_weight[i]); 528 btcoex_hw->bt_weight[i]);
529 len += snprintf(buf + len, size - len, "\n"); 529 len += scnprintf(buf + len, size - len, "\n");
530 len += snprintf(buf + len, size - len, "WLAN Weights: "); 530 len += scnprintf(buf + len, size - len, "WLAN Weights: ");
531 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) 531 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
532 len += snprintf(buf + len, size - len, "%08x ", 532 len += scnprintf(buf + len, size - len, "%08x ",
533 btcoex_hw->wlan_weight[i]); 533 btcoex_hw->wlan_weight[i]);
534 len += snprintf(buf + len, size - len, "\n"); 534 len += scnprintf(buf + len, size - len, "\n");
535 len += snprintf(buf + len, size - len, "Tx Priorities: "); 535 len += scnprintf(buf + len, size - len, "Tx Priorities: ");
536 for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++) 536 for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
537 len += snprintf(buf + len, size - len, "%08x ", 537 len += scnprintf(buf + len, size - len, "%08x ",
538 btcoex_hw->tx_prio[i]); 538 btcoex_hw->tx_prio[i]);
539 539
540 len += snprintf(buf + len, size - len, "\n"); 540 len += scnprintf(buf + len, size - len, "\n");
541 541
542 return len; 542 return len;
543} 543}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index c1b45e2f8481..fb071ee4fcfb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -37,29 +37,29 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
37 37
38 ath9k_htc_ps_restore(priv); 38 ath9k_htc_ps_restore(priv);
39 39
40 len += snprintf(buf + len, sizeof(buf) - len, 40 len += scnprintf(buf + len, sizeof(buf) - len,
41 "%20s : %10u\n", "RX", 41 "%20s : %10u\n", "RX",
42 be32_to_cpu(cmd_rsp.rx)); 42 be32_to_cpu(cmd_rsp.rx));
43 43
44 len += snprintf(buf + len, sizeof(buf) - len, 44 len += scnprintf(buf + len, sizeof(buf) - len,
45 "%20s : %10u\n", "RXORN", 45 "%20s : %10u\n", "RXORN",
46 be32_to_cpu(cmd_rsp.rxorn)); 46 be32_to_cpu(cmd_rsp.rxorn));
47 47
48 len += snprintf(buf + len, sizeof(buf) - len, 48 len += scnprintf(buf + len, sizeof(buf) - len,
49 "%20s : %10u\n", "RXEOL", 49 "%20s : %10u\n", "RXEOL",
50 be32_to_cpu(cmd_rsp.rxeol)); 50 be32_to_cpu(cmd_rsp.rxeol));
51 51
52 len += snprintf(buf + len, sizeof(buf) - len, 52 len += scnprintf(buf + len, sizeof(buf) - len,
53 "%20s : %10u\n", "TXURN", 53 "%20s : %10u\n", "TXURN",
54 be32_to_cpu(cmd_rsp.txurn)); 54 be32_to_cpu(cmd_rsp.txurn));
55 55
56 len += snprintf(buf + len, sizeof(buf) - len, 56 len += scnprintf(buf + len, sizeof(buf) - len,
57 "%20s : %10u\n", "TXTO", 57 "%20s : %10u\n", "TXTO",
58 be32_to_cpu(cmd_rsp.txto)); 58 be32_to_cpu(cmd_rsp.txto));
59 59
60 len += snprintf(buf + len, sizeof(buf) - len, 60 len += scnprintf(buf + len, sizeof(buf) - len,
61 "%20s : %10u\n", "CST", 61 "%20s : %10u\n", "CST",
62 be32_to_cpu(cmd_rsp.cst)); 62 be32_to_cpu(cmd_rsp.cst));
63 63
64 if (len > sizeof(buf)) 64 if (len > sizeof(buf))
65 len = sizeof(buf); 65 len = sizeof(buf);
@@ -95,41 +95,41 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
95 95
96 ath9k_htc_ps_restore(priv); 96 ath9k_htc_ps_restore(priv);
97 97
98 len += snprintf(buf + len, sizeof(buf) - len, 98 len += scnprintf(buf + len, sizeof(buf) - len,
99 "%20s : %10u\n", "Xretries", 99 "%20s : %10u\n", "Xretries",
100 be32_to_cpu(cmd_rsp.xretries)); 100 be32_to_cpu(cmd_rsp.xretries));
101 101
102 len += snprintf(buf + len, sizeof(buf) - len, 102 len += scnprintf(buf + len, sizeof(buf) - len,
103 "%20s : %10u\n", "FifoErr", 103 "%20s : %10u\n", "FifoErr",
104 be32_to_cpu(cmd_rsp.fifoerr)); 104 be32_to_cpu(cmd_rsp.fifoerr));
105 105
106 len += snprintf(buf + len, sizeof(buf) - len, 106 len += scnprintf(buf + len, sizeof(buf) - len,
107 "%20s : %10u\n", "Filtered", 107 "%20s : %10u\n", "Filtered",
108 be32_to_cpu(cmd_rsp.filtered)); 108 be32_to_cpu(cmd_rsp.filtered));
109 109
110 len += snprintf(buf + len, sizeof(buf) - len, 110 len += scnprintf(buf + len, sizeof(buf) - len,
111 "%20s : %10u\n", "TimerExp", 111 "%20s : %10u\n", "TimerExp",
112 be32_to_cpu(cmd_rsp.timer_exp)); 112 be32_to_cpu(cmd_rsp.timer_exp));
113 113
114 len += snprintf(buf + len, sizeof(buf) - len, 114 len += scnprintf(buf + len, sizeof(buf) - len,
115 "%20s : %10u\n", "ShortRetries", 115 "%20s : %10u\n", "ShortRetries",
116 be32_to_cpu(cmd_rsp.shortretries)); 116 be32_to_cpu(cmd_rsp.shortretries));
117 117
118 len += snprintf(buf + len, sizeof(buf) - len, 118 len += scnprintf(buf + len, sizeof(buf) - len,
119 "%20s : %10u\n", "LongRetries", 119 "%20s : %10u\n", "LongRetries",
120 be32_to_cpu(cmd_rsp.longretries)); 120 be32_to_cpu(cmd_rsp.longretries));
121 121
122 len += snprintf(buf + len, sizeof(buf) - len, 122 len += scnprintf(buf + len, sizeof(buf) - len,
123 "%20s : %10u\n", "QueueNull", 123 "%20s : %10u\n", "QueueNull",
124 be32_to_cpu(cmd_rsp.qnull)); 124 be32_to_cpu(cmd_rsp.qnull));
125 125
126 len += snprintf(buf + len, sizeof(buf) - len, 126 len += scnprintf(buf + len, sizeof(buf) - len,
127 "%20s : %10u\n", "EncapFail", 127 "%20s : %10u\n", "EncapFail",
128 be32_to_cpu(cmd_rsp.encap_fail)); 128 be32_to_cpu(cmd_rsp.encap_fail));
129 129
130 len += snprintf(buf + len, sizeof(buf) - len, 130 len += scnprintf(buf + len, sizeof(buf) - len,
131 "%20s : %10u\n", "NoBuf", 131 "%20s : %10u\n", "NoBuf",
132 be32_to_cpu(cmd_rsp.nobuf)); 132 be32_to_cpu(cmd_rsp.nobuf));
133 133
134 if (len > sizeof(buf)) 134 if (len > sizeof(buf))
135 len = sizeof(buf); 135 len = sizeof(buf);
@@ -165,17 +165,17 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
165 165
166 ath9k_htc_ps_restore(priv); 166 ath9k_htc_ps_restore(priv);
167 167
168 len += snprintf(buf + len, sizeof(buf) - len, 168 len += scnprintf(buf + len, sizeof(buf) - len,
169 "%20s : %10u\n", "NoBuf", 169 "%20s : %10u\n", "NoBuf",
170 be32_to_cpu(cmd_rsp.nobuf)); 170 be32_to_cpu(cmd_rsp.nobuf));
171 171
172 len += snprintf(buf + len, sizeof(buf) - len, 172 len += scnprintf(buf + len, sizeof(buf) - len,
173 "%20s : %10u\n", "HostSend", 173 "%20s : %10u\n", "HostSend",
174 be32_to_cpu(cmd_rsp.host_send)); 174 be32_to_cpu(cmd_rsp.host_send));
175 175
176 len += snprintf(buf + len, sizeof(buf) - len, 176 len += scnprintf(buf + len, sizeof(buf) - len,
177 "%20s : %10u\n", "HostDone", 177 "%20s : %10u\n", "HostDone",
178 be32_to_cpu(cmd_rsp.host_done)); 178 be32_to_cpu(cmd_rsp.host_done));
179 179
180 if (len > sizeof(buf)) 180 if (len > sizeof(buf))
181 len = sizeof(buf); 181 len = sizeof(buf);
@@ -197,37 +197,37 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
197 char buf[512]; 197 char buf[512];
198 unsigned int len = 0; 198 unsigned int len = 0;
199 199
200 len += snprintf(buf + len, sizeof(buf) - len, 200 len += scnprintf(buf + len, sizeof(buf) - len,
201 "%20s : %10u\n", "Buffers queued", 201 "%20s : %10u\n", "Buffers queued",
202 priv->debug.tx_stats.buf_queued); 202 priv->debug.tx_stats.buf_queued);
203 len += snprintf(buf + len, sizeof(buf) - len, 203 len += scnprintf(buf + len, sizeof(buf) - len,
204 "%20s : %10u\n", "Buffers completed", 204 "%20s : %10u\n", "Buffers completed",
205 priv->debug.tx_stats.buf_completed); 205 priv->debug.tx_stats.buf_completed);
206 len += snprintf(buf + len, sizeof(buf) - len, 206 len += scnprintf(buf + len, sizeof(buf) - len,
207 "%20s : %10u\n", "SKBs queued", 207 "%20s : %10u\n", "SKBs queued",
208 priv->debug.tx_stats.skb_queued); 208 priv->debug.tx_stats.skb_queued);
209 len += snprintf(buf + len, sizeof(buf) - len, 209 len += scnprintf(buf + len, sizeof(buf) - len,
210 "%20s : %10u\n", "SKBs success", 210 "%20s : %10u\n", "SKBs success",
211 priv->debug.tx_stats.skb_success); 211 priv->debug.tx_stats.skb_success);
212 len += snprintf(buf + len, sizeof(buf) - len, 212 len += scnprintf(buf + len, sizeof(buf) - len,
213 "%20s : %10u\n", "SKBs failed", 213 "%20s : %10u\n", "SKBs failed",
214 priv->debug.tx_stats.skb_failed); 214 priv->debug.tx_stats.skb_failed);
215 len += snprintf(buf + len, sizeof(buf) - len, 215 len += scnprintf(buf + len, sizeof(buf) - len,
216 "%20s : %10u\n", "CAB queued", 216 "%20s : %10u\n", "CAB queued",
217 priv->debug.tx_stats.cab_queued); 217 priv->debug.tx_stats.cab_queued);
218 218
219 len += snprintf(buf + len, sizeof(buf) - len, 219 len += scnprintf(buf + len, sizeof(buf) - len,
220 "%20s : %10u\n", "BE queued", 220 "%20s : %10u\n", "BE queued",
221 priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]); 221 priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
222 len += snprintf(buf + len, sizeof(buf) - len, 222 len += scnprintf(buf + len, sizeof(buf) - len,
223 "%20s : %10u\n", "BK queued", 223 "%20s : %10u\n", "BK queued",
224 priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]); 224 priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
225 len += snprintf(buf + len, sizeof(buf) - len, 225 len += scnprintf(buf + len, sizeof(buf) - len,
226 "%20s : %10u\n", "VI queued", 226 "%20s : %10u\n", "VI queued",
227 priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]); 227 priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
228 len += snprintf(buf + len, sizeof(buf) - len, 228 len += scnprintf(buf + len, sizeof(buf) - len,
229 "%20s : %10u\n", "VO queued", 229 "%20s : %10u\n", "VO queued",
230 priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]); 230 priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
231 231
232 if (len > sizeof(buf)) 232 if (len > sizeof(buf))
233 len = sizeof(buf); 233 len = sizeof(buf);
@@ -273,8 +273,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
273 size_t count, loff_t *ppos) 273 size_t count, loff_t *ppos)
274{ 274{
275#define PHY_ERR(s, p) \ 275#define PHY_ERR(s, p) \
276 len += snprintf(buf + len, size - len, "%20s : %10u\n", s, \ 276 len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
277 priv->debug.rx_stats.err_phy_stats[p]); 277 priv->debug.rx_stats.err_phy_stats[p]);
278 278
279 struct ath9k_htc_priv *priv = file->private_data; 279 struct ath9k_htc_priv *priv = file->private_data;
280 char *buf; 280 char *buf;
@@ -285,37 +285,37 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
285 if (buf == NULL) 285 if (buf == NULL)
286 return -ENOMEM; 286 return -ENOMEM;
287 287
288 len += snprintf(buf + len, size - len, 288 len += scnprintf(buf + len, size - len,
289 "%20s : %10u\n", "SKBs allocated", 289 "%20s : %10u\n", "SKBs allocated",
290 priv->debug.rx_stats.skb_allocated); 290 priv->debug.rx_stats.skb_allocated);
291 len += snprintf(buf + len, size - len, 291 len += scnprintf(buf + len, size - len,
292 "%20s : %10u\n", "SKBs completed", 292 "%20s : %10u\n", "SKBs completed",
293 priv->debug.rx_stats.skb_completed); 293 priv->debug.rx_stats.skb_completed);
294 len += snprintf(buf + len, size - len, 294 len += scnprintf(buf + len, size - len,
295 "%20s : %10u\n", "SKBs Dropped", 295 "%20s : %10u\n", "SKBs Dropped",
296 priv->debug.rx_stats.skb_dropped); 296 priv->debug.rx_stats.skb_dropped);
297 297
298 len += snprintf(buf + len, size - len, 298 len += scnprintf(buf + len, size - len,
299 "%20s : %10u\n", "CRC ERR", 299 "%20s : %10u\n", "CRC ERR",
300 priv->debug.rx_stats.err_crc); 300 priv->debug.rx_stats.err_crc);
301 len += snprintf(buf + len, size - len, 301 len += scnprintf(buf + len, size - len,
302 "%20s : %10u\n", "DECRYPT CRC ERR", 302 "%20s : %10u\n", "DECRYPT CRC ERR",
303 priv->debug.rx_stats.err_decrypt_crc); 303 priv->debug.rx_stats.err_decrypt_crc);
304 len += snprintf(buf + len, size - len, 304 len += scnprintf(buf + len, size - len,
305 "%20s : %10u\n", "MIC ERR", 305 "%20s : %10u\n", "MIC ERR",
306 priv->debug.rx_stats.err_mic); 306 priv->debug.rx_stats.err_mic);
307 len += snprintf(buf + len, size - len, 307 len += scnprintf(buf + len, size - len,
308 "%20s : %10u\n", "PRE-DELIM CRC ERR", 308 "%20s : %10u\n", "PRE-DELIM CRC ERR",
309 priv->debug.rx_stats.err_pre_delim); 309 priv->debug.rx_stats.err_pre_delim);
310 len += snprintf(buf + len, size - len, 310 len += scnprintf(buf + len, size - len,
311 "%20s : %10u\n", "POST-DELIM CRC ERR", 311 "%20s : %10u\n", "POST-DELIM CRC ERR",
312 priv->debug.rx_stats.err_post_delim); 312 priv->debug.rx_stats.err_post_delim);
313 len += snprintf(buf + len, size - len, 313 len += scnprintf(buf + len, size - len,
314 "%20s : %10u\n", "DECRYPT BUSY ERR", 314 "%20s : %10u\n", "DECRYPT BUSY ERR",
315 priv->debug.rx_stats.err_decrypt_busy); 315 priv->debug.rx_stats.err_decrypt_busy);
316 len += snprintf(buf + len, size - len, 316 len += scnprintf(buf + len, size - len,
317 "%20s : %10u\n", "TOTAL PHY ERR", 317 "%20s : %10u\n", "TOTAL PHY ERR",
318 priv->debug.rx_stats.err_phy); 318 priv->debug.rx_stats.err_phy);
319 319
320 320
321 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN); 321 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
@@ -372,16 +372,16 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
372 372
373 spin_lock_bh(&priv->tx.tx_lock); 373 spin_lock_bh(&priv->tx.tx_lock);
374 374
375 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : "); 375 len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
376 376
377 len += bitmap_scnprintf(buf + len, sizeof(buf) - len, 377 len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
378 priv->tx.tx_slot, MAX_TX_BUF_NUM); 378 priv->tx.tx_slot, MAX_TX_BUF_NUM);
379 379
380 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 380 len += scnprintf(buf + len, sizeof(buf) - len, "\n");
381 381
382 len += snprintf(buf + len, sizeof(buf) - len, 382 len += scnprintf(buf + len, sizeof(buf) - len,
383 "Used slots : %d\n", 383 "Used slots : %d\n",
384 bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM)); 384 bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
385 385
386 spin_unlock_bh(&priv->tx.tx_lock); 386 spin_unlock_bh(&priv->tx.tx_lock);
387 387
@@ -405,30 +405,30 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
405 char buf[512]; 405 char buf[512];
406 unsigned int len = 0; 406 unsigned int len = 0;
407 407
408 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", 408 len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
409 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue)); 409 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
410 410
411 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", 411 len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
412 "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue)); 412 "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
413 413
414 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", 414 len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
415 "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue)); 415 "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
416 416
417 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", 417 len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
418 "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue)); 418 "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
419 419
420 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", 420 len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
421 "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue)); 421 "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
422 422
423 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", 423 len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
424 "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue)); 424 "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
425 425
426 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", 426 len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
427 "Failed queue", skb_queue_len(&priv->tx.tx_failed)); 427 "Failed queue", skb_queue_len(&priv->tx.tx_failed));
428 428
429 spin_lock_bh(&priv->tx.tx_lock); 429 spin_lock_bh(&priv->tx.tx_lock);
430 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", 430 len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
431 "Queued count", priv->tx.queued_cnt); 431 "Queued count", priv->tx.queued_cnt);
432 spin_unlock_bh(&priv->tx.tx_lock); 432 spin_unlock_bh(&priv->tx.tx_lock);
433 433
434 if (len > sizeof(buf)) 434 if (len > sizeof(buf))
@@ -507,70 +507,70 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
507 if (buf == NULL) 507 if (buf == NULL)
508 return -ENOMEM; 508 return -ENOMEM;
509 509
510 len += snprintf(buf + len, size - len, 510 len += scnprintf(buf + len, size - len,
511 "%20s : %10d\n", "Major Version", 511 "%20s : %10d\n", "Major Version",
512 pBase->version >> 12); 512 pBase->version >> 12);
513 len += snprintf(buf + len, size - len, 513 len += scnprintf(buf + len, size - len,
514 "%20s : %10d\n", "Minor Version", 514 "%20s : %10d\n", "Minor Version",
515 pBase->version & 0xFFF); 515 pBase->version & 0xFFF);
516 len += snprintf(buf + len, size - len, 516 len += scnprintf(buf + len, size - len,
517 "%20s : %10d\n", "Checksum", 517 "%20s : %10d\n", "Checksum",
518 pBase->checksum); 518 pBase->checksum);
519 len += snprintf(buf + len, size - len, 519 len += scnprintf(buf + len, size - len,
520 "%20s : %10d\n", "Length", 520 "%20s : %10d\n", "Length",
521 pBase->length); 521 pBase->length);
522 len += snprintf(buf + len, size - len, 522 len += scnprintf(buf + len, size - len,
523 "%20s : %10d\n", "RegDomain1", 523 "%20s : %10d\n", "RegDomain1",
524 pBase->regDmn[0]); 524 pBase->regDmn[0]);
525 len += snprintf(buf + len, size - len, 525 len += scnprintf(buf + len, size - len,
526 "%20s : %10d\n", "RegDomain2", 526 "%20s : %10d\n", "RegDomain2",
527 pBase->regDmn[1]); 527 pBase->regDmn[1]);
528 len += snprintf(buf + len, size - len, 528 len += scnprintf(buf + len, size - len,
529 "%20s : %10d\n", 529 "%20s : %10d\n",
530 "TX Mask", pBase->txMask); 530 "TX Mask", pBase->txMask);
531 len += snprintf(buf + len, size - len, 531 len += scnprintf(buf + len, size - len,
532 "%20s : %10d\n", 532 "%20s : %10d\n",
533 "RX Mask", pBase->rxMask); 533 "RX Mask", pBase->rxMask);
534 len += snprintf(buf + len, size - len, 534 len += scnprintf(buf + len, size - len,
535 "%20s : %10d\n", 535 "%20s : %10d\n",
536 "Allow 5GHz", 536 "Allow 5GHz",
537 !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); 537 !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
538 len += snprintf(buf + len, size - len, 538 len += scnprintf(buf + len, size - len,
539 "%20s : %10d\n", 539 "%20s : %10d\n",
540 "Allow 2GHz", 540 "Allow 2GHz",
541 !!(pBase->opCapFlags & AR5416_OPFLAGS_11G)); 541 !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
542 len += snprintf(buf + len, size - len, 542 len += scnprintf(buf + len, size - len,
543 "%20s : %10d\n", 543 "%20s : %10d\n",
544 "Disable 2GHz HT20", 544 "Disable 2GHz HT20",
545 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20)); 545 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
546 len += snprintf(buf + len, size - len, 546 len += scnprintf(buf + len, size - len,
547 "%20s : %10d\n", 547 "%20s : %10d\n",
548 "Disable 2GHz HT40", 548 "Disable 2GHz HT40",
549 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40)); 549 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
550 len += snprintf(buf + len, size - len, 550 len += scnprintf(buf + len, size - len,
551 "%20s : %10d\n", 551 "%20s : %10d\n",
552 "Disable 5Ghz HT20", 552 "Disable 5Ghz HT20",
553 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20)); 553 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
554 len += snprintf(buf + len, size - len, 554 len += scnprintf(buf + len, size - len,
555 "%20s : %10d\n", 555 "%20s : %10d\n",
556 "Disable 5Ghz HT40", 556 "Disable 5Ghz HT40",
557 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); 557 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
558 len += snprintf(buf + len, size - len, 558 len += scnprintf(buf + len, size - len,
559 "%20s : %10d\n", 559 "%20s : %10d\n",
560 "Big Endian", 560 "Big Endian",
561 !!(pBase->eepMisc & 0x01)); 561 !!(pBase->eepMisc & 0x01));
562 len += snprintf(buf + len, size - len, 562 len += scnprintf(buf + len, size - len,
563 "%20s : %10d\n", 563 "%20s : %10d\n",
564 "Cal Bin Major Ver", 564 "Cal Bin Major Ver",
565 (pBase->binBuildNumber >> 24) & 0xFF); 565 (pBase->binBuildNumber >> 24) & 0xFF);
566 len += snprintf(buf + len, size - len, 566 len += scnprintf(buf + len, size - len,
567 "%20s : %10d\n", 567 "%20s : %10d\n",
568 "Cal Bin Minor Ver", 568 "Cal Bin Minor Ver",
569 (pBase->binBuildNumber >> 16) & 0xFF); 569 (pBase->binBuildNumber >> 16) & 0xFF);
570 len += snprintf(buf + len, size - len, 570 len += scnprintf(buf + len, size - len,
571 "%20s : %10d\n", 571 "%20s : %10d\n",
572 "Cal Bin Build", 572 "Cal Bin Build",
573 (pBase->binBuildNumber >> 8) & 0xFF); 573 (pBase->binBuildNumber >> 8) & 0xFF);
574 574
575 /* 575 /*
576 * UB91 specific data. 576 * UB91 specific data.
@@ -579,10 +579,10 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
579 struct base_eep_header_4k *pBase4k = 579 struct base_eep_header_4k *pBase4k =
580 &priv->ah->eeprom.map4k.baseEepHeader; 580 &priv->ah->eeprom.map4k.baseEepHeader;
581 581
582 len += snprintf(buf + len, size - len, 582 len += scnprintf(buf + len, size - len,
583 "%20s : %10d\n", 583 "%20s : %10d\n",
584 "TX Gain type", 584 "TX Gain type",
585 pBase4k->txGainType); 585 pBase4k->txGainType);
586 } 586 }
587 587
588 /* 588 /*
@@ -592,19 +592,19 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
592 struct base_eep_ar9287_header *pBase9287 = 592 struct base_eep_ar9287_header *pBase9287 =
593 &priv->ah->eeprom.map9287.baseEepHeader; 593 &priv->ah->eeprom.map9287.baseEepHeader;
594 594
595 len += snprintf(buf + len, size - len, 595 len += scnprintf(buf + len, size - len,
596 "%20s : %10ddB\n", 596 "%20s : %10ddB\n",
597 "Power Table Offset", 597 "Power Table Offset",
598 pBase9287->pwrTableOffset); 598 pBase9287->pwrTableOffset);
599 599
600 len += snprintf(buf + len, size - len, 600 len += scnprintf(buf + len, size - len,
601 "%20s : %10d\n", 601 "%20s : %10d\n",
602 "OpenLoop Power Ctrl", 602 "OpenLoop Power Ctrl",
603 pBase9287->openLoopPwrCntl); 603 pBase9287->openLoopPwrCntl);
604 } 604 }
605 605
606 len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", 606 len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
607 pBase->macAddr); 607 pBase->macAddr);
608 if (len > size) 608 if (len > size)
609 len = size; 609 len = size;
610 610
@@ -627,8 +627,8 @@ static ssize_t read_4k_modal_eeprom(struct file *file,
627{ 627{
628#define PR_EEP(_s, _val) \ 628#define PR_EEP(_s, _val) \
629 do { \ 629 do { \
630 len += snprintf(buf + len, size - len, "%20s : %10d\n", \ 630 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
631 _s, (_val)); \ 631 _s, (_val)); \
632 } while (0) 632 } while (0)
633 633
634 struct ath9k_htc_priv *priv = file->private_data; 634 struct ath9k_htc_priv *priv = file->private_data;
@@ -708,12 +708,12 @@ static ssize_t read_def_modal_eeprom(struct file *file,
708 do { \ 708 do { \
709 if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \ 709 if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
710 pModal = &priv->ah->eeprom.def.modalHeader[1]; \ 710 pModal = &priv->ah->eeprom.def.modalHeader[1]; \
711 len += snprintf(buf + len, size - len, "%20s : %8d%7s", \ 711 len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
712 _s, (_val), "|"); \ 712 _s, (_val), "|"); \
713 } \ 713 } \
714 if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \ 714 if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
715 pModal = &priv->ah->eeprom.def.modalHeader[0]; \ 715 pModal = &priv->ah->eeprom.def.modalHeader[0]; \
716 len += snprintf(buf + len, size - len, "%9d\n", \ 716 len += scnprintf(buf + len, size - len, "%9d\n",\
717 (_val)); \ 717 (_val)); \
718 } \ 718 } \
719 } while (0) 719 } while (0)
@@ -729,10 +729,10 @@ static ssize_t read_def_modal_eeprom(struct file *file,
729 if (buf == NULL) 729 if (buf == NULL)
730 return -ENOMEM; 730 return -ENOMEM;
731 731
732 len += snprintf(buf + len, size - len, 732 len += scnprintf(buf + len, size - len,
733 "%31s %15s\n", "2G", "5G"); 733 "%31s %15s\n", "2G", "5G");
734 len += snprintf(buf + len, size - len, 734 len += scnprintf(buf + len, size - len,
735 "%32s %16s\n", "====", "====\n"); 735 "%32s %16s\n", "====", "====\n");
736 736
737 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]); 737 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
738 PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]); 738 PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
@@ -814,8 +814,8 @@ static ssize_t read_9287_modal_eeprom(struct file *file,
814{ 814{
815#define PR_EEP(_s, _val) \ 815#define PR_EEP(_s, _val) \
816 do { \ 816 do { \
817 len += snprintf(buf + len, size - len, "%20s : %10d\n", \ 817 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
818 _s, (_val)); \ 818 _s, (_val)); \
819 } while (0) 819 } while (0)
820 820
821 struct ath9k_htc_priv *priv = file->private_data; 821 struct ath9k_htc_priv *priv = file->private_data;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index d44258172c0f..9a2657fdd9cc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,30 +24,10 @@
24static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv, 24static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
25 struct ath9k_channel *ichan) 25 struct ath9k_channel *ichan)
26{ 26{
27 enum htc_phymode mode; 27 if (IS_CHAN_5GHZ(ichan))
28 28 return HTC_MODE_11NA;
29 mode = -EINVAL;
30
31 switch (ichan->chanmode) {
32 case CHANNEL_G:
33 case CHANNEL_G_HT20:
34 case CHANNEL_G_HT40PLUS:
35 case CHANNEL_G_HT40MINUS:
36 mode = HTC_MODE_11NG;
37 break;
38 case CHANNEL_A:
39 case CHANNEL_A_HT20:
40 case CHANNEL_A_HT40PLUS:
41 case CHANNEL_A_HT40MINUS:
42 mode = HTC_MODE_11NA;
43 break;
44 default:
45 break;
46 }
47 29
48 WARN_ON(mode < 0); 30 return HTC_MODE_11NG;
49
50 return mode;
51} 31}
52 32
53bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, 33bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
@@ -926,7 +906,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
926 WMI_CMD(WMI_FLUSH_RECV_CMDID); 906 WMI_CMD(WMI_FLUSH_RECV_CMDID);
927 907
928 /* setup initial channel */ 908 /* setup initial channel */
929 init_channel = ath9k_cmn_get_curchannel(hw, ah); 909 init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
930 910
931 ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 911 ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
932 if (ret) { 912 if (ret) {
@@ -1208,9 +1188,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1208 ath_dbg(common, CONFIG, "Set channel: %d MHz\n", 1188 ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
1209 curchan->center_freq); 1189 curchan->center_freq);
1210 1190
1211 ath9k_cmn_update_ichannel(&priv->ah->channels[pos], 1191 ath9k_cmn_get_channel(hw, priv->ah, &hw->conf.chandef);
1212 &hw->conf.chandef);
1213
1214 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { 1192 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1215 ath_err(common, "Unable to set channel\n"); 1193 ath_err(common, "Unable to set channel\n");
1216 ret = -EINVAL; 1194 ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ecc6ec4a1edb..dcdbab48709e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -130,29 +130,29 @@ void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
130 130
131static void ath9k_hw_set_clockrate(struct ath_hw *ah) 131static void ath9k_hw_set_clockrate(struct ath_hw *ah)
132{ 132{
133 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
134 struct ath_common *common = ath9k_hw_common(ah); 133 struct ath_common *common = ath9k_hw_common(ah);
134 struct ath9k_channel *chan = ah->curchan;
135 unsigned int clockrate; 135 unsigned int clockrate;
136 136
137 /* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */ 137 /* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
138 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) 138 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
139 clockrate = 117; 139 clockrate = 117;
140 else if (!ah->curchan) /* should really check for CCK instead */ 140 else if (!chan) /* should really check for CCK instead */
141 clockrate = ATH9K_CLOCK_RATE_CCK; 141 clockrate = ATH9K_CLOCK_RATE_CCK;
142 else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ) 142 else if (IS_CHAN_2GHZ(chan))
143 clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM; 143 clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
144 else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK) 144 else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
145 clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM; 145 clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
146 else 146 else
147 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; 147 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
148 148
149 if (conf_is_ht40(conf)) 149 if (IS_CHAN_HT40(chan))
150 clockrate *= 2; 150 clockrate *= 2;
151 151
152 if (ah->curchan) { 152 if (ah->curchan) {
153 if (IS_CHAN_HALF_RATE(ah->curchan)) 153 if (IS_CHAN_HALF_RATE(chan))
154 clockrate /= 2; 154 clockrate /= 2;
155 if (IS_CHAN_QUARTER_RATE(ah->curchan)) 155 if (IS_CHAN_QUARTER_RATE(chan))
156 clockrate /= 4; 156 clockrate /= 4;
157 } 157 }
158 158
@@ -190,10 +190,7 @@ EXPORT_SYMBOL(ath9k_hw_wait);
190void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan, 190void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
191 int hw_delay) 191 int hw_delay)
192{ 192{
193 if (IS_CHAN_B(chan)) 193 hw_delay /= 10;
194 hw_delay = (4 * hw_delay) / 22;
195 else
196 hw_delay /= 10;
197 194
198 if (IS_CHAN_HALF_RATE(chan)) 195 if (IS_CHAN_HALF_RATE(chan))
199 hw_delay *= 2; 196 hw_delay *= 2;
@@ -294,8 +291,7 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
294 return; 291 return;
295 } 292 }
296 293
297 if ((chan->chanmode == CHANNEL_A_HT40PLUS) || 294 if (IS_CHAN_HT40PLUS(chan)) {
298 (chan->chanmode == CHANNEL_G_HT40PLUS)) {
299 centers->synth_center = 295 centers->synth_center =
300 chan->channel + HT40_CHANNEL_CENTER_SHIFT; 296 chan->channel + HT40_CHANNEL_CENTER_SHIFT;
301 extoff = 1; 297 extoff = 1;
@@ -549,6 +545,18 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
549 545
550 ath9k_hw_ani_init(ah); 546 ath9k_hw_ani_init(ah);
551 547
548 /*
549 * EEPROM needs to be initialized before we do this.
550 * This is required for regulatory compliance.
551 */
552 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
553 u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
554 if ((regdmn & 0xF0) == CTL_FCC) {
555 ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
556 ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
557 }
558 }
559
552 return 0; 560 return 0;
553} 561}
554 562
@@ -1030,7 +1038,6 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1030void ath9k_hw_init_global_settings(struct ath_hw *ah) 1038void ath9k_hw_init_global_settings(struct ath_hw *ah)
1031{ 1039{
1032 struct ath_common *common = ath9k_hw_common(ah); 1040 struct ath_common *common = ath9k_hw_common(ah);
1033 struct ieee80211_conf *conf = &common->hw->conf;
1034 const struct ath9k_channel *chan = ah->curchan; 1041 const struct ath9k_channel *chan = ah->curchan;
1035 int acktimeout, ctstimeout, ack_offset = 0; 1042 int acktimeout, ctstimeout, ack_offset = 0;
1036 int slottime; 1043 int slottime;
@@ -1105,8 +1112,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1105 * BA frames in some implementations, but it has been found to fix ACK 1112 * BA frames in some implementations, but it has been found to fix ACK
1106 * timeout issues in other cases as well. 1113 * timeout issues in other cases as well.
1107 */ 1114 */
1108 if (conf->chandef.chan && 1115 if (IS_CHAN_2GHZ(chan) &&
1109 conf->chandef.chan->band == IEEE80211_BAND_2GHZ &&
1110 !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) { 1116 !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
1111 acktimeout += 64 - sifstime - ah->slottime; 1117 acktimeout += 64 - sifstime - ah->slottime;
1112 ctstimeout += 48 - sifstime - ah->slottime; 1118 ctstimeout += 48 - sifstime - ah->slottime;
@@ -1148,9 +1154,7 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
1148{ 1154{
1149 u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); 1155 u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
1150 1156
1151 if (IS_CHAN_B(chan)) 1157 if (IS_CHAN_2GHZ(chan))
1152 ctl |= CTL_11B;
1153 else if (IS_CHAN_G(chan))
1154 ctl |= CTL_11G; 1158 ctl |= CTL_11G;
1155 else 1159 else
1156 ctl |= CTL_11A; 1160 ctl |= CTL_11A;
@@ -1498,10 +1502,8 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1498 int r; 1502 int r;
1499 1503
1500 if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) { 1504 if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
1501 u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ); 1505 band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
1502 u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ); 1506 mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
1503 band_switch = (cur != new);
1504 mode_diff = (chan->chanmode != ah->curchan->chanmode);
1505 } 1507 }
1506 1508
1507 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1509 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1540,9 +1542,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1540 ath9k_hw_set_clockrate(ah); 1542 ath9k_hw_set_clockrate(ah);
1541 ath9k_hw_apply_txpower(ah, chan, false); 1543 ath9k_hw_apply_txpower(ah, chan, false);
1542 1544
1543 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1545 ath9k_hw_set_delta_slope(ah, chan);
1544 ath9k_hw_set_delta_slope(ah, chan);
1545
1546 ath9k_hw_spur_mitigate_freq(ah, chan); 1546 ath9k_hw_spur_mitigate_freq(ah, chan);
1547 1547
1548 if (band_switch || ini_reloaded) 1548 if (band_switch || ini_reloaded)
@@ -1644,6 +1644,19 @@ hang_check_iter:
1644 return true; 1644 return true;
1645} 1645}
1646 1646
1647void ath9k_hw_check_nav(struct ath_hw *ah)
1648{
1649 struct ath_common *common = ath9k_hw_common(ah);
1650 u32 val;
1651
1652 val = REG_READ(ah, AR_NAV);
1653 if (val != 0xdeadbeef && val > 0x7fff) {
1654 ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
1655 REG_WRITE(ah, AR_NAV, 0);
1656 }
1657}
1658EXPORT_SYMBOL(ath9k_hw_check_nav);
1659
1647bool ath9k_hw_check_alive(struct ath_hw *ah) 1660bool ath9k_hw_check_alive(struct ath_hw *ah)
1648{ 1661{
1649 int count = 50; 1662 int count = 50;
@@ -1799,20 +1812,11 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1799 goto fail; 1812 goto fail;
1800 1813
1801 /* 1814 /*
1802 * If cross-band fcc is not supoprted, bail out if 1815 * If cross-band fcc is not supoprted, bail out if channelFlags differ.
1803 * either channelFlags or chanmode differ.
1804 *
1805 * chanmode will be different if the HT operating mode
1806 * changes because of CSA.
1807 */ 1816 */
1808 if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) { 1817 if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
1809 if ((chan->channelFlags & CHANNEL_ALL) != 1818 chan->channelFlags != ah->curchan->channelFlags)
1810 (ah->curchan->channelFlags & CHANNEL_ALL)) 1819 goto fail;
1811 goto fail;
1812
1813 if (chan->chanmode != ah->curchan->chanmode)
1814 goto fail;
1815 }
1816 1820
1817 if (!ath9k_hw_check_alive(ah)) 1821 if (!ath9k_hw_check_alive(ah))
1818 goto fail; 1822 goto fail;
@@ -1822,9 +1826,9 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1822 * re-using are present. 1826 * re-using are present.
1823 */ 1827 */
1824 if (AR_SREV_9462(ah) && (ah->caldata && 1828 if (AR_SREV_9462(ah) && (ah->caldata &&
1825 (!ah->caldata->done_txiqcal_once || 1829 (!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
1826 !ah->caldata->done_txclcal_once || 1830 !test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
1827 !ah->caldata->rtt_done))) 1831 !test_bit(RTT_DONE, &ah->caldata->cal_flags))))
1828 goto fail; 1832 goto fail;
1829 1833
1830 ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n", 1834 ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1874,13 +1878,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1874 1878
1875 ah->caldata = caldata; 1879 ah->caldata = caldata;
1876 if (caldata && (chan->channel != caldata->channel || 1880 if (caldata && (chan->channel != caldata->channel ||
1877 chan->channelFlags != caldata->channelFlags || 1881 chan->channelFlags != caldata->channelFlags)) {
1878 chan->chanmode != caldata->chanmode)) {
1879 /* Operating channel changed, reset channel calibration data */ 1882 /* Operating channel changed, reset channel calibration data */
1880 memset(caldata, 0, sizeof(*caldata)); 1883 memset(caldata, 0, sizeof(*caldata));
1881 ath9k_init_nfcal_hist_buffer(ah, chan); 1884 ath9k_init_nfcal_hist_buffer(ah, chan);
1882 } else if (caldata) { 1885 } else if (caldata) {
1883 caldata->paprd_packet_sent = false; 1886 clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
1884 } 1887 }
1885 ah->noise = ath9k_hw_getchan_noise(ah, chan); 1888 ah->noise = ath9k_hw_getchan_noise(ah, chan);
1886 1889
@@ -1964,9 +1967,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1964 1967
1965 ath9k_hw_init_mfp(ah); 1968 ath9k_hw_init_mfp(ah);
1966 1969
1967 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1970 ath9k_hw_set_delta_slope(ah, chan);
1968 ath9k_hw_set_delta_slope(ah, chan);
1969
1970 ath9k_hw_spur_mitigate_freq(ah, chan); 1971 ath9k_hw_spur_mitigate_freq(ah, chan);
1971 ah->eep_ops->set_board_values(ah, chan); 1972 ah->eep_ops->set_board_values(ah, chan);
1972 1973
@@ -2017,8 +2018,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2017 ath9k_hw_init_bb(ah, chan); 2018 ath9k_hw_init_bb(ah, chan);
2018 2019
2019 if (caldata) { 2020 if (caldata) {
2020 caldata->done_txiqcal_once = false; 2021 clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
2021 caldata->done_txclcal_once = false; 2022 clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
2022 } 2023 }
2023 if (!ath9k_hw_init_cal(ah, chan)) 2024 if (!ath9k_hw_init_cal(ah, chan))
2024 return -EIO; 2025 return -EIO;
@@ -2943,12 +2944,11 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
2943} 2944}
2944EXPORT_SYMBOL(ath9k_hw_set_tsfadjust); 2945EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
2945 2946
2946void ath9k_hw_set11nmac2040(struct ath_hw *ah) 2947void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan)
2947{ 2948{
2948 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
2949 u32 macmode; 2949 u32 macmode;
2950 2950
2951 if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca) 2951 if (IS_CHAN_HT40(chan) && !ah->config.cwm_ignore_extcca)
2952 macmode = AR_2040_JOINED_RX_CLEAR; 2952 macmode = AR_2040_JOINED_RX_CLEAR;
2953 else 2953 else
2954 macmode = 0; 2954 macmode = 0;
@@ -3240,19 +3240,19 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
3240 3240
3241 /* chipsets >= AR9280 are single-chip */ 3241 /* chipsets >= AR9280 are single-chip */
3242 if (AR_SREV_9280_20_OR_LATER(ah)) { 3242 if (AR_SREV_9280_20_OR_LATER(ah)) {
3243 used = snprintf(hw_name, len, 3243 used = scnprintf(hw_name, len,
3244 "Atheros AR%s Rev:%x", 3244 "Atheros AR%s Rev:%x",
3245 ath9k_hw_mac_bb_name(ah->hw_version.macVersion), 3245 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
3246 ah->hw_version.macRev); 3246 ah->hw_version.macRev);
3247 } 3247 }
3248 else { 3248 else {
3249 used = snprintf(hw_name, len, 3249 used = scnprintf(hw_name, len,
3250 "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x", 3250 "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
3251 ath9k_hw_mac_bb_name(ah->hw_version.macVersion), 3251 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
3252 ah->hw_version.macRev, 3252 ah->hw_version.macRev,
3253 ath9k_hw_rf_name((ah->hw_version.analog5GhzRev & 3253 ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
3254 AR_RADIO_SREV_MAJOR)), 3254 & AR_RADIO_SREV_MAJOR)),
3255 ah->hw_version.phyRev); 3255 ah->hw_version.phyRev);
3256 } 3256 }
3257 3257
3258 hw_name[used] = '\0'; 3258 hw_name[used] = '\0';
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 69a907b55a73..81fcbc756122 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -98,8 +98,8 @@
98 98
99#define PR_EEP(_s, _val) \ 99#define PR_EEP(_s, _val) \
100 do { \ 100 do { \
101 len += snprintf(buf + len, size - len, "%20s : %10d\n", \ 101 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
102 _s, (_val)); \ 102 _s, (_val)); \
103 } while (0) 103 } while (0)
104 104
105#define SM(_v, _f) (((_v) << _f##_S) & _f) 105#define SM(_v, _f) (((_v) << _f##_S) & _f)
@@ -369,55 +369,30 @@ enum ath9k_int {
369 ATH9K_INT_NOCARD = 0xffffffff 369 ATH9K_INT_NOCARD = 0xffffffff
370}; 370};
371 371
372#define CHANNEL_CCK 0x00020
373#define CHANNEL_OFDM 0x00040
374#define CHANNEL_2GHZ 0x00080
375#define CHANNEL_5GHZ 0x00100
376#define CHANNEL_PASSIVE 0x00200
377#define CHANNEL_DYN 0x00400
378#define CHANNEL_HALF 0x04000
379#define CHANNEL_QUARTER 0x08000
380#define CHANNEL_HT20 0x10000
381#define CHANNEL_HT40PLUS 0x20000
382#define CHANNEL_HT40MINUS 0x40000
383
384#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
385#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
386#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
387#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
388#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
389#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
390#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
391#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
392#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
393#define CHANNEL_ALL \
394 (CHANNEL_OFDM| \
395 CHANNEL_CCK| \
396 CHANNEL_2GHZ | \
397 CHANNEL_5GHZ | \
398 CHANNEL_HT20 | \
399 CHANNEL_HT40PLUS | \
400 CHANNEL_HT40MINUS)
401
402#define MAX_RTT_TABLE_ENTRY 6 372#define MAX_RTT_TABLE_ENTRY 6
403#define MAX_IQCAL_MEASUREMENT 8 373#define MAX_IQCAL_MEASUREMENT 8
404#define MAX_CL_TAB_ENTRY 16 374#define MAX_CL_TAB_ENTRY 16
405#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j)) 375#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
406 376
377enum ath9k_cal_flags {
378 RTT_DONE,
379 PAPRD_PACKET_SENT,
380 PAPRD_DONE,
381 NFCAL_PENDING,
382 NFCAL_INTF,
383 TXIQCAL_DONE,
384 TXCLCAL_DONE,
385 SW_PKDET_DONE,
386};
387
407struct ath9k_hw_cal_data { 388struct ath9k_hw_cal_data {
408 u16 channel; 389 u16 channel;
409 u32 channelFlags; 390 u16 channelFlags;
410 u32 chanmode; 391 unsigned long cal_flags;
411 int32_t CalValid; 392 int32_t CalValid;
412 int8_t iCoff; 393 int8_t iCoff;
413 int8_t qCoff; 394 int8_t qCoff;
414 bool rtt_done; 395 u8 caldac[2];
415 bool paprd_packet_sent;
416 bool paprd_done;
417 bool nfcal_pending;
418 bool nfcal_interference;
419 bool done_txiqcal_once;
420 bool done_txclcal_once;
421 u16 small_signal_gain[AR9300_MAX_CHAINS]; 396 u16 small_signal_gain[AR9300_MAX_CHAINS];
422 u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; 397 u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
423 u32 num_measures[AR9300_MAX_CHAINS]; 398 u32 num_measures[AR9300_MAX_CHAINS];
@@ -430,33 +405,34 @@ struct ath9k_hw_cal_data {
430struct ath9k_channel { 405struct ath9k_channel {
431 struct ieee80211_channel *chan; 406 struct ieee80211_channel *chan;
432 u16 channel; 407 u16 channel;
433 u32 channelFlags; 408 u16 channelFlags;
434 u32 chanmode;
435 s16 noisefloor; 409 s16 noisefloor;
436}; 410};
437 411
438#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ 412#define CHANNEL_5GHZ BIT(0)
439 (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \ 413#define CHANNEL_HALF BIT(1)
440 (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \ 414#define CHANNEL_QUARTER BIT(2)
441 (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS)) 415#define CHANNEL_HT BIT(3)
442#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0) 416#define CHANNEL_HT40PLUS BIT(4)
443#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0) 417#define CHANNEL_HT40MINUS BIT(5)
444#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0) 418
445#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0) 419#define IS_CHAN_5GHZ(_c) (!!((_c)->channelFlags & CHANNEL_5GHZ))
446#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0) 420#define IS_CHAN_2GHZ(_c) (!IS_CHAN_5GHZ(_c))
421
422#define IS_CHAN_HALF_RATE(_c) (!!((_c)->channelFlags & CHANNEL_HALF))
423#define IS_CHAN_QUARTER_RATE(_c) (!!((_c)->channelFlags & CHANNEL_QUARTER))
447#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \ 424#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
448 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \ 425 (IS_CHAN_5GHZ(_c) && ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
449 ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)) 426
450 427#define IS_CHAN_HT(_c) ((_c)->channelFlags & CHANNEL_HT)
451/* These macros check chanmode and not channelFlags */ 428
452#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B) 429#define IS_CHAN_HT20(_c) (IS_CHAN_HT(_c) && !IS_CHAN_HT40(_c))
453#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \ 430
454 ((_c)->chanmode == CHANNEL_G_HT20)) 431#define IS_CHAN_HT40(_c) \
455#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \ 432 (!!((_c)->channelFlags & (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)))
456 ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \ 433
457 ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \ 434#define IS_CHAN_HT40PLUS(_c) ((_c)->channelFlags & CHANNEL_HT40PLUS)
458 ((_c)->chanmode == CHANNEL_G_HT40MINUS)) 435#define IS_CHAN_HT40MINUS(_c) ((_c)->channelFlags & CHANNEL_HT40MINUS)
459#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
460 436
461enum ath9k_power_mode { 437enum ath9k_power_mode {
462 ATH9K_PM_AWAKE = 0, 438 ATH9K_PM_AWAKE = 0,
@@ -558,6 +534,7 @@ struct ath_hw_antcomb_conf {
558 u8 main_gaintb; 534 u8 main_gaintb;
559 u8 alt_gaintb; 535 u8 alt_gaintb;
560 int lna1_lna2_delta; 536 int lna1_lna2_delta;
537 int lna1_lna2_switch_delta;
561 u8 div_group; 538 u8 div_group;
562}; 539};
563 540
@@ -1026,10 +1003,11 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah);
1026void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set); 1003void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
1027void ath9k_hw_init_global_settings(struct ath_hw *ah); 1004void ath9k_hw_init_global_settings(struct ath_hw *ah);
1028u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah); 1005u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
1029void ath9k_hw_set11nmac2040(struct ath_hw *ah); 1006void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan);
1030void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 1007void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
1031void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 1008void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
1032 const struct ath9k_beacon_state *bs); 1009 const struct ath9k_beacon_state *bs);
1010void ath9k_hw_check_nav(struct ath_hw *ah);
1033bool ath9k_hw_check_alive(struct ath_hw *ah); 1011bool ath9k_hw_check_alive(struct ath_hw *ah);
1034 1012
1035bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode); 1013bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 9a1f349f9260..7df728f36330 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -347,7 +347,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
347{ 347{
348 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 348 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
349 u8 *ds; 349 u8 *ds;
350 struct ath_buf *bf;
351 int i, bsize, desc_len; 350 int i, bsize, desc_len;
352 351
353 ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n", 352 ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
@@ -399,33 +398,68 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
399 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 398 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
400 399
401 /* allocate buffers */ 400 /* allocate buffers */
402 bsize = sizeof(struct ath_buf) * nbuf; 401 if (is_tx) {
403 bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL); 402 struct ath_buf *bf;
404 if (!bf) 403
405 return -ENOMEM; 404 bsize = sizeof(struct ath_buf) * nbuf;
405 bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
406 if (!bf)
407 return -ENOMEM;
408
409 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
410 bf->bf_desc = ds;
411 bf->bf_daddr = DS2PHYS(dd, ds);
412
413 if (!(sc->sc_ah->caps.hw_caps &
414 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
415 /*
416 * Skip descriptor addresses which can cause 4KB
417 * boundary crossing (addr + length) with a 32 dword
418 * descriptor fetch.
419 */
420 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
421 BUG_ON((caddr_t) bf->bf_desc >=
422 ((caddr_t) dd->dd_desc +
423 dd->dd_desc_len));
424
425 ds += (desc_len * ndesc);
426 bf->bf_desc = ds;
427 bf->bf_daddr = DS2PHYS(dd, ds);
428 }
429 }
430 list_add_tail(&bf->list, head);
431 }
432 } else {
433 struct ath_rxbuf *bf;
434
435 bsize = sizeof(struct ath_rxbuf) * nbuf;
436 bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
437 if (!bf)
438 return -ENOMEM;
406 439
407 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) { 440 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
408 bf->bf_desc = ds; 441 bf->bf_desc = ds;
409 bf->bf_daddr = DS2PHYS(dd, ds); 442 bf->bf_daddr = DS2PHYS(dd, ds);
410 443
411 if (!(sc->sc_ah->caps.hw_caps & 444 if (!(sc->sc_ah->caps.hw_caps &
412 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 445 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
413 /* 446 /*
414 * Skip descriptor addresses which can cause 4KB 447 * Skip descriptor addresses which can cause 4KB
415 * boundary crossing (addr + length) with a 32 dword 448 * boundary crossing (addr + length) with a 32 dword
416 * descriptor fetch. 449 * descriptor fetch.
417 */ 450 */
418 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 451 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
419 BUG_ON((caddr_t) bf->bf_desc >= 452 BUG_ON((caddr_t) bf->bf_desc >=
420 ((caddr_t) dd->dd_desc + 453 ((caddr_t) dd->dd_desc +
421 dd->dd_desc_len)); 454 dd->dd_desc_len));
422 455
423 ds += (desc_len * ndesc); 456 ds += (desc_len * ndesc);
424 bf->bf_desc = ds; 457 bf->bf_desc = ds;
425 bf->bf_daddr = DS2PHYS(dd, ds); 458 bf->bf_daddr = DS2PHYS(dd, ds);
459 }
426 } 460 }
461 list_add_tail(&bf->list, head);
427 } 462 }
428 list_add_tail(&bf->list, head);
429 } 463 }
430 return 0; 464 return 0;
431} 465}
@@ -437,7 +471,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
437 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); 471 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
438 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 472 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
439 473
440 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
441 ath_cabq_update(sc); 474 ath_cabq_update(sc);
442 475
443 sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0); 476 sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -547,6 +580,26 @@ static void ath9k_init_platform(struct ath_softc *sc)
547 if (sc->driver_data & ATH9K_PCI_CUS217) 580 if (sc->driver_data & ATH9K_PCI_CUS217)
548 ath_info(common, "CUS217 card detected\n"); 581 ath_info(common, "CUS217 card detected\n");
549 582
583 if (sc->driver_data & ATH9K_PCI_CUS252)
584 ath_info(common, "CUS252 card detected\n");
585
586 if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
587 ath_info(common, "WB335 1-ANT card detected\n");
588
589 if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
590 ath_info(common, "WB335 2-ANT card detected\n");
591
592 /*
593 * Some WB335 cards do not support antenna diversity. Since
594 * we use a hardcoded value for AR9565 instead of using the
595 * EEPROM/OTP data, remove the combining feature from
596 * the HW capabilities bitmap.
597 */
598 if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
599 if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
600 pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
601 }
602
550 if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) { 603 if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
551 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; 604 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
552 ath_info(common, "Set BT/WLAN RX diversity capability\n"); 605 ath_info(common, "Set BT/WLAN RX diversity capability\n");
@@ -748,7 +801,7 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
748 chan = &sband->channels[i]; 801 chan = &sband->channels[i];
749 ah->curchan = &ah->channels[chan->hw_value]; 802 ah->curchan = &ah->channels[chan->hw_value];
750 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); 803 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
751 ath9k_cmn_update_ichannel(ah->curchan, &chandef); 804 ath9k_cmn_get_channel(sc->hw, ah, &chandef);
752 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true); 805 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
753 } 806 }
754} 807}
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 2f831db396ac..84a60644f93a 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -184,7 +184,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
184 struct ath9k_hw_cal_data *caldata = ah->caldata; 184 struct ath9k_hw_cal_data *caldata = ah->caldata;
185 int chain; 185 int chain;
186 186
187 if (!caldata || !caldata->paprd_done) { 187 if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
188 ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n"); 188 ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
189 return; 189 return;
190 } 190 }
@@ -256,7 +256,9 @@ void ath_paprd_calibrate(struct work_struct *work)
256 int len = 1800; 256 int len = 1800;
257 int ret; 257 int ret;
258 258
259 if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) { 259 if (!caldata ||
260 !test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
261 test_bit(PAPRD_DONE, &caldata->cal_flags)) {
260 ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n"); 262 ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
261 return; 263 return;
262 } 264 }
@@ -316,7 +318,7 @@ void ath_paprd_calibrate(struct work_struct *work)
316 kfree_skb(skb); 318 kfree_skb(skb);
317 319
318 if (chain_ok) { 320 if (chain_ok) {
319 caldata->paprd_done = true; 321 set_bit(PAPRD_DONE, &caldata->cal_flags);
320 ath_paprd_activate(sc); 322 ath_paprd_activate(sc);
321 } 323 }
322 324
@@ -343,7 +345,7 @@ void ath_ani_calibrate(unsigned long data)
343 u32 cal_interval, short_cal_interval, long_cal_interval; 345 u32 cal_interval, short_cal_interval, long_cal_interval;
344 unsigned long flags; 346 unsigned long flags;
345 347
346 if (ah->caldata && ah->caldata->nfcal_interference) 348 if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
347 long_cal_interval = ATH_LONG_CALINTERVAL_INT; 349 long_cal_interval = ATH_LONG_CALINTERVAL_INT;
348 else 350 else
349 long_cal_interval = ATH_LONG_CALINTERVAL; 351 long_cal_interval = ATH_LONG_CALINTERVAL;
@@ -432,7 +434,7 @@ set_timer:
432 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 434 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
433 435
434 if (ar9003_is_paprd_enabled(ah) && ah->caldata) { 436 if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
435 if (!ah->caldata->paprd_done) { 437 if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
436 ieee80211_queue_work(sc->hw, &sc->paprd_work); 438 ieee80211_queue_work(sc->hw, &sc->paprd_work);
437 } else if (!ah->paprd_table_write_done) { 439 } else if (!ah->paprd_table_write_done) {
438 ath9k_ps_wakeup(sc); 440 ath9k_ps_wakeup(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index a3eff0986a3f..6a18f9d3e9cc 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -374,7 +374,6 @@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
374bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 374bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
375{ 375{
376 struct ath_common *common = ath9k_hw_common(ah); 376 struct ath_common *common = ath9k_hw_common(ah);
377 struct ath9k_channel *chan = ah->curchan;
378 struct ath9k_tx_queue_info *qi; 377 struct ath9k_tx_queue_info *qi;
379 u32 cwMin, chanCwMin, value; 378 u32 cwMin, chanCwMin, value;
380 379
@@ -387,10 +386,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
387 ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q); 386 ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
388 387
389 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 388 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
390 if (chan && IS_CHAN_B(chan)) 389 chanCwMin = INIT_CWMIN;
391 chanCwMin = INIT_CWMIN_11B;
392 else
393 chanCwMin = INIT_CWMIN;
394 390
395 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); 391 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
396 } else 392 } else
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index bfccaceed44e..e3eed81f2439 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -603,8 +603,6 @@ enum ath9k_tx_queue_flags {
603#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001 603#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
604 604
605#define ATH9K_DECOMP_MASK_SIZE 128 605#define ATH9K_DECOMP_MASK_SIZE 128
606#define ATH9K_READY_TIME_LO_BOUND 50
607#define ATH9K_READY_TIME_HI_BOUND 96
608 606
609enum ath9k_pkt_type { 607enum ath9k_pkt_type {
610 ATH9K_PKT_TYPE_NORMAL = 0, 608 ATH9K_PKT_TYPE_NORMAL = 0,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 709301f88dcd..20a2fbc1e34f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -312,17 +312,91 @@ out:
312 * by reseting the chip. To accomplish this we must first cleanup any pending 312 * by reseting the chip. To accomplish this we must first cleanup any pending
313 * DMA, then restart stuff. 313 * DMA, then restart stuff.
314*/ 314*/
315static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 315static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
316 struct ath9k_channel *hchan)
317{ 316{
317 struct ath_hw *ah = sc->sc_ah;
318 struct ath_common *common = ath9k_hw_common(ah);
319 struct ieee80211_hw *hw = sc->hw;
320 struct ath9k_channel *hchan;
321 struct ieee80211_channel *chan = chandef->chan;
322 unsigned long flags;
323 bool offchannel;
324 int pos = chan->hw_value;
325 int old_pos = -1;
318 int r; 326 int r;
319 327
320 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) 328 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
321 return -EIO; 329 return -EIO;
322 330
331 offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
332
333 if (ah->curchan)
334 old_pos = ah->curchan - &ah->channels[0];
335
336 ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
337 chan->center_freq, chandef->width);
338
339 /* update survey stats for the old channel before switching */
340 spin_lock_irqsave(&common->cc_lock, flags);
341 ath_update_survey_stats(sc);
342 spin_unlock_irqrestore(&common->cc_lock, flags);
343
344 ath9k_cmn_get_channel(hw, ah, chandef);
345
346 /*
347 * If the operating channel changes, change the survey in-use flags
348 * along with it.
349 * Reset the survey data for the new channel, unless we're switching
350 * back to the operating channel from an off-channel operation.
351 */
352 if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
353 if (sc->cur_survey)
354 sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
355
356 sc->cur_survey = &sc->survey[pos];
357
358 memset(sc->cur_survey, 0, sizeof(struct survey_info));
359 sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
360 } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
361 memset(&sc->survey[pos], 0, sizeof(struct survey_info));
362 }
363
364 hchan = &sc->sc_ah->channels[pos];
323 r = ath_reset_internal(sc, hchan); 365 r = ath_reset_internal(sc, hchan);
366 if (r)
367 return r;
324 368
325 return r; 369 /*
370 * The most recent snapshot of channel->noisefloor for the old
371 * channel is only available after the hardware reset. Copy it to
372 * the survey stats now.
373 */
374 if (old_pos >= 0)
375 ath_update_survey_nf(sc, old_pos);
376
377 /*
378 * Enable radar pulse detection if on a DFS channel. Spectral
379 * scanning and radar detection can not be used concurrently.
380 */
381 if (hw->conf.radar_enabled) {
382 u32 rxfilter;
383
384 /* set HW specific DFS configuration */
385 ath9k_hw_set_radar_params(ah);
386 rxfilter = ath9k_hw_getrxfilter(ah);
387 rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
388 ATH9K_RX_FILTER_PHYERR;
389 ath9k_hw_setrxfilter(ah, rxfilter);
390 ath_dbg(common, DFS, "DFS enabled at freq %d\n",
391 chan->center_freq);
392 } else {
393 /* perform spectral scan if requested. */
394 if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
395 sc->spectral_mode == SPECTRAL_CHANSCAN)
396 ath9k_spectral_scan_trigger(hw);
397 }
398
399 return 0;
326} 400}
327 401
328static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, 402static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -372,6 +446,13 @@ void ath9k_tasklet(unsigned long data)
372 type = RESET_TYPE_BB_WATCHDOG; 446 type = RESET_TYPE_BB_WATCHDOG;
373 447
374 ath9k_queue_reset(sc, type); 448 ath9k_queue_reset(sc, type);
449
450 /*
451 * Increment the ref. counter here so that
452 * interrupts are enabled in the reset routine.
453 */
454 atomic_inc(&ah->intr_ref_cnt);
455 ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
375 goto out; 456 goto out;
376 } 457 }
377 458
@@ -410,10 +491,9 @@ void ath9k_tasklet(unsigned long data)
410 491
411 ath9k_btcoex_handle_interrupt(sc, status); 492 ath9k_btcoex_handle_interrupt(sc, status);
412 493
413out:
414 /* re-enable hardware interrupt */ 494 /* re-enable hardware interrupt */
415 ath9k_hw_enable_interrupts(ah); 495 ath9k_hw_enable_interrupts(ah);
416 496out:
417 spin_unlock(&sc->sc_pcu_lock); 497 spin_unlock(&sc->sc_pcu_lock);
418 ath9k_ps_restore(sc); 498 ath9k_ps_restore(sc);
419} 499}
@@ -594,7 +674,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
594 ath9k_ps_wakeup(sc); 674 ath9k_ps_wakeup(sc);
595 mutex_lock(&sc->mutex); 675 mutex_lock(&sc->mutex);
596 676
597 init_channel = ath9k_cmn_get_curchannel(hw, ah); 677 init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
598 678
599 /* Reset SERDES registers */ 679 /* Reset SERDES registers */
600 ath9k_hw_configpcipowersave(ah, false); 680 ath9k_hw_configpcipowersave(ah, false);
@@ -797,7 +877,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
797 } 877 }
798 878
799 if (!ah->curchan) 879 if (!ah->curchan)
800 ah->curchan = ath9k_cmn_get_curchannel(hw, ah); 880 ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
801 881
802 ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 882 ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
803 ath9k_hw_phy_disable(ah); 883 ath9k_hw_phy_disable(ah);
@@ -816,7 +896,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
816 ath_dbg(common, CONFIG, "Driver halt\n"); 896 ath_dbg(common, CONFIG, "Driver halt\n");
817} 897}
818 898
819bool ath9k_uses_beacons(int type) 899static bool ath9k_uses_beacons(int type)
820{ 900{
821 switch (type) { 901 switch (type) {
822 case NL80211_IFTYPE_AP: 902 case NL80211_IFTYPE_AP:
@@ -1201,81 +1281,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1201 } 1281 }
1202 1282
1203 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) { 1283 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
1204 struct ieee80211_channel *curchan = hw->conf.chandef.chan; 1284 if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
1205 int pos = curchan->hw_value;
1206 int old_pos = -1;
1207 unsigned long flags;
1208
1209 if (ah->curchan)
1210 old_pos = ah->curchan - &ah->channels[0];
1211
1212 ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
1213 curchan->center_freq, hw->conf.chandef.width);
1214
1215 /* update survey stats for the old channel before switching */
1216 spin_lock_irqsave(&common->cc_lock, flags);
1217 ath_update_survey_stats(sc);
1218 spin_unlock_irqrestore(&common->cc_lock, flags);
1219
1220 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
1221 &conf->chandef);
1222
1223 /*
1224 * If the operating channel changes, change the survey in-use flags
1225 * along with it.
1226 * Reset the survey data for the new channel, unless we're switching
1227 * back to the operating channel from an off-channel operation.
1228 */
1229 if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
1230 sc->cur_survey != &sc->survey[pos]) {
1231
1232 if (sc->cur_survey)
1233 sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
1234
1235 sc->cur_survey = &sc->survey[pos];
1236
1237 memset(sc->cur_survey, 0, sizeof(struct survey_info));
1238 sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
1239 } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
1240 memset(&sc->survey[pos], 0, sizeof(struct survey_info));
1241 }
1242
1243 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
1244 ath_err(common, "Unable to set channel\n"); 1285 ath_err(common, "Unable to set channel\n");
1245 mutex_unlock(&sc->mutex); 1286 mutex_unlock(&sc->mutex);
1246 ath9k_ps_restore(sc); 1287 ath9k_ps_restore(sc);
1247 return -EINVAL; 1288 return -EINVAL;
1248 } 1289 }
1249
1250 /*
1251 * The most recent snapshot of channel->noisefloor for the old
1252 * channel is only available after the hardware reset. Copy it to
1253 * the survey stats now.
1254 */
1255 if (old_pos >= 0)
1256 ath_update_survey_nf(sc, old_pos);
1257
1258 /*
1259 * Enable radar pulse detection if on a DFS channel. Spectral
1260 * scanning and radar detection can not be used concurrently.
1261 */
1262 if (hw->conf.radar_enabled) {
1263 u32 rxfilter;
1264
1265 /* set HW specific DFS configuration */
1266 ath9k_hw_set_radar_params(ah);
1267 rxfilter = ath9k_hw_getrxfilter(ah);
1268 rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
1269 ATH9K_RX_FILTER_PHYERR;
1270 ath9k_hw_setrxfilter(ah, rxfilter);
1271 ath_dbg(common, DFS, "DFS enabled at freq %d\n",
1272 curchan->center_freq);
1273 } else {
1274 /* perform spectral scan if requested. */
1275 if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
1276 sc->spectral_mode == SPECTRAL_CHANSCAN)
1277 ath9k_spectral_scan_trigger(hw);
1278 }
1279 } 1290 }
1280 1291
1281 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1292 if (changed & IEEE80211_CONF_CHANGE_POWER) {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 815bee21c19a..0ac1b5f04256 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -661,9 +661,9 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
661 chan_start = wlan_chan - 10; 661 chan_start = wlan_chan - 10;
662 chan_end = wlan_chan + 10; 662 chan_end = wlan_chan + 10;
663 663
664 if (chan->chanmode == CHANNEL_G_HT40PLUS) 664 if (IS_CHAN_HT40PLUS(chan))
665 chan_end += 20; 665 chan_end += 20;
666 else if (chan->chanmode == CHANNEL_G_HT40MINUS) 666 else if (IS_CHAN_HT40MINUS(chan))
667 chan_start -= 20; 667 chan_start -= 20;
668 668
669 /* adjust side band */ 669 /* adjust side band */
@@ -707,11 +707,11 @@ void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
707 707
708 if (setchannel) { 708 if (setchannel) {
709 struct ath9k_hw_cal_data *caldata = &sc->caldata; 709 struct ath9k_hw_cal_data *caldata = &sc->caldata;
710 if ((caldata->chanmode == CHANNEL_G_HT40PLUS) && 710 if (IS_CHAN_HT40PLUS(ah->curchan) &&
711 (ah->curchan->channel > caldata->channel) && 711 (ah->curchan->channel > caldata->channel) &&
712 (ah->curchan->channel <= caldata->channel + 20)) 712 (ah->curchan->channel <= caldata->channel + 20))
713 return; 713 return;
714 if ((caldata->chanmode == CHANNEL_G_HT40MINUS) && 714 if (IS_CHAN_HT40MINUS(ah->curchan) &&
715 (ah->curchan->channel < caldata->channel) && 715 (ah->curchan->channel < caldata->channel) &&
716 (ah->curchan->channel >= caldata->channel - 20)) 716 (ah->curchan->channel >= caldata->channel - 20))
717 return; 717 return;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index d089a7cf01c4..7e4c2524b630 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -269,7 +269,200 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
269 269
270 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ 270 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
271 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ 271 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
272 { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */ 272
273 /* CUS252 */
274 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
275 0x0036,
276 PCI_VENDOR_ID_ATHEROS,
277 0x3028),
278 .driver_data = ATH9K_PCI_CUS252 |
279 ATH9K_PCI_AR9565_2ANT |
280 ATH9K_PCI_BT_ANT_DIV },
281 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
282 0x0036,
283 PCI_VENDOR_ID_AZWAVE,
284 0x2176),
285 .driver_data = ATH9K_PCI_CUS252 |
286 ATH9K_PCI_AR9565_2ANT |
287 ATH9K_PCI_BT_ANT_DIV },
288
289 /* WB335 1-ANT */
290 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
291 0x0036,
292 PCI_VENDOR_ID_FOXCONN,
293 0xE068),
294 .driver_data = ATH9K_PCI_AR9565_1ANT },
295 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
296 0x0036,
297 0x185F, /* WNC */
298 0xA119),
299 .driver_data = ATH9K_PCI_AR9565_1ANT },
300 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
301 0x0036,
302 0x11AD, /* LITEON */
303 0x0632),
304 .driver_data = ATH9K_PCI_AR9565_1ANT },
305 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
306 0x0036,
307 0x11AD, /* LITEON */
308 0x6671),
309 .driver_data = ATH9K_PCI_AR9565_1ANT },
310 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
311 0x0036,
312 0x1B9A, /* XAVI */
313 0x2811),
314 .driver_data = ATH9K_PCI_AR9565_1ANT },
315 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
316 0x0036,
317 0x1B9A, /* XAVI */
318 0x2812),
319 .driver_data = ATH9K_PCI_AR9565_1ANT },
320
321 /* WB335 1-ANT / Antenna Diversity */
322 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
323 0x0036,
324 PCI_VENDOR_ID_ATHEROS,
325 0x3025),
326 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
327 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
328 0x0036,
329 PCI_VENDOR_ID_ATHEROS,
330 0x3026),
331 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
332 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
333 0x0036,
334 PCI_VENDOR_ID_ATHEROS,
335 0x302B),
336 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
337 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
338 0x0036,
339 PCI_VENDOR_ID_FOXCONN,
340 0xE069),
341 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
342 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
343 0x0036,
344 0x185F, /* WNC */
345 0x3028),
346 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
347 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
348 0x0036,
349 0x11AD, /* LITEON */
350 0x0622),
351 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
352 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
353 0x0036,
354 0x11AD, /* LITEON */
355 0x0672),
356 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
357 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
358 0x0036,
359 0x11AD, /* LITEON */
360 0x0662),
361 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
362 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
363 0x0036,
364 PCI_VENDOR_ID_AZWAVE,
365 0x213A),
366 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
367 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
368 0x0036,
369 PCI_VENDOR_ID_LENOVO,
370 0x3026),
371 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
372 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
373 0x0036,
374 PCI_VENDOR_ID_HP,
375 0x18E3),
376 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
377 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
378 0x0036,
379 PCI_VENDOR_ID_HP,
380 0x217F),
381 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
382 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
383 0x0036,
384 PCI_VENDOR_ID_DELL,
385 0x020E),
386 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
387
388 /* WB335 2-ANT */
389 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
390 0x0036,
391 PCI_VENDOR_ID_SAMSUNG,
392 0x411A),
393 .driver_data = ATH9K_PCI_AR9565_2ANT },
394 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
395 0x0036,
396 PCI_VENDOR_ID_SAMSUNG,
397 0x411B),
398 .driver_data = ATH9K_PCI_AR9565_2ANT },
399 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
400 0x0036,
401 PCI_VENDOR_ID_SAMSUNG,
402 0x411C),
403 .driver_data = ATH9K_PCI_AR9565_2ANT },
404 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
405 0x0036,
406 PCI_VENDOR_ID_SAMSUNG,
407 0x411D),
408 .driver_data = ATH9K_PCI_AR9565_2ANT },
409 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
410 0x0036,
411 PCI_VENDOR_ID_SAMSUNG,
412 0x411E),
413 .driver_data = ATH9K_PCI_AR9565_2ANT },
414
415 /* WB335 2-ANT / Antenna-Diversity */
416 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
417 0x0036,
418 PCI_VENDOR_ID_ATHEROS,
419 0x3027),
420 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
421 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
422 0x0036,
423 PCI_VENDOR_ID_ATHEROS,
424 0x302C),
425 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
426 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
427 0x0036,
428 0x11AD, /* LITEON */
429 0x0642),
430 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
431 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
432 0x0036,
433 0x11AD, /* LITEON */
434 0x0652),
435 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
436 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
437 0x0036,
438 0x11AD, /* LITEON */
439 0x0612),
440 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
441 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
442 0x0036,
443 PCI_VENDOR_ID_AZWAVE,
444 0x2130),
445 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
446 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
447 0x0036,
448 0x144F, /* ASKEY */
449 0x7202),
450 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
451 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
452 0x0036,
453 0x1B9A, /* XAVI */
454 0x2810),
455 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
456 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
457 0x0036,
458 0x185F, /* WNC */
459 0x3027),
460 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
461
462 /* PCI-E AR9565 (WB335) */
463 { PCI_VDEVICE(ATHEROS, 0x0036),
464 .driver_data = ATH9K_PCI_BT_ANT_DIV },
465
273 { 0 } 466 { 0 }
274}; 467};
275 468
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index d3d7c51fa6c8..d829bb62a3fc 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1387,31 +1387,31 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
1387 int used_mcs = 0, used_htmode = 0; 1387 int used_mcs = 0, used_htmode = 0;
1388 1388
1389 if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) { 1389 if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
1390 used_mcs = snprintf(mcs, 5, "%d", 1390 used_mcs = scnprintf(mcs, 5, "%d",
1391 rc->rate_table->info[i].ratecode); 1391 rc->rate_table->info[i].ratecode);
1392 1392
1393 if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy)) 1393 if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
1394 used_htmode = snprintf(htmode, 5, "HT40"); 1394 used_htmode = scnprintf(htmode, 5, "HT40");
1395 else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy)) 1395 else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
1396 used_htmode = snprintf(htmode, 5, "HT20"); 1396 used_htmode = scnprintf(htmode, 5, "HT20");
1397 else 1397 else
1398 used_htmode = snprintf(htmode, 5, "????"); 1398 used_htmode = scnprintf(htmode, 5, "????");
1399 } 1399 }
1400 1400
1401 mcs[used_mcs] = '\0'; 1401 mcs[used_mcs] = '\0';
1402 htmode[used_htmode] = '\0'; 1402 htmode[used_htmode] = '\0';
1403 1403
1404 len += snprintf(buf + len, max - len, 1404 len += scnprintf(buf + len, max - len,
1405 "%6s %6s %3u.%d: " 1405 "%6s %6s %3u.%d: "
1406 "%10u %10u %10u %10u\n", 1406 "%10u %10u %10u %10u\n",
1407 htmode, 1407 htmode,
1408 mcs, 1408 mcs,
1409 ratekbps / 1000, 1409 ratekbps / 1000,
1410 (ratekbps % 1000) / 100, 1410 (ratekbps % 1000) / 100,
1411 stats->success, 1411 stats->success,
1412 stats->retries, 1412 stats->retries,
1413 stats->xretries, 1413 stats->xretries,
1414 stats->per); 1414 stats->per);
1415 } 1415 }
1416 1416
1417 if (len > max) 1417 if (len > max)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ab9e3a8410bc..8b788efb41fd 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,7 +19,7 @@
19#include "ath9k.h" 19#include "ath9k.h"
20#include "ar9003_mac.h" 20#include "ar9003_mac.h"
21 21
22#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
23 23
24static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 24static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
25{ 25{
@@ -35,7 +35,7 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
35 * buffer (or rx fifo). This can incorrectly acknowledge packets 35 * buffer (or rx fifo). This can incorrectly acknowledge packets
36 * to a sender if last desc is self-linked. 36 * to a sender if last desc is self-linked.
37 */ 37 */
38static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 38static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
39{ 39{
40 struct ath_hw *ah = sc->sc_ah; 40 struct ath_hw *ah = sc->sc_ah;
41 struct ath_common *common = ath9k_hw_common(ah); 41 struct ath_common *common = ath9k_hw_common(ah);
@@ -68,7 +68,7 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
68 sc->rx.rxlink = &ds->ds_link; 68 sc->rx.rxlink = &ds->ds_link;
69} 69}
70 70
71static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf) 71static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
72{ 72{
73 if (sc->rx.buf_hold) 73 if (sc->rx.buf_hold)
74 ath_rx_buf_link(sc, sc->rx.buf_hold); 74 ath_rx_buf_link(sc, sc->rx.buf_hold);
@@ -112,13 +112,13 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
112 struct ath_hw *ah = sc->sc_ah; 112 struct ath_hw *ah = sc->sc_ah;
113 struct ath_rx_edma *rx_edma; 113 struct ath_rx_edma *rx_edma;
114 struct sk_buff *skb; 114 struct sk_buff *skb;
115 struct ath_buf *bf; 115 struct ath_rxbuf *bf;
116 116
117 rx_edma = &sc->rx.rx_edma[qtype]; 117 rx_edma = &sc->rx.rx_edma[qtype];
118 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 118 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
119 return false; 119 return false;
120 120
121 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 121 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
122 list_del_init(&bf->list); 122 list_del_init(&bf->list);
123 123
124 skb = bf->bf_mpdu; 124 skb = bf->bf_mpdu;
@@ -138,7 +138,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
138 enum ath9k_rx_qtype qtype) 138 enum ath9k_rx_qtype qtype)
139{ 139{
140 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 140 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
141 struct ath_buf *bf, *tbf; 141 struct ath_rxbuf *bf, *tbf;
142 142
143 if (list_empty(&sc->rx.rxbuf)) { 143 if (list_empty(&sc->rx.rxbuf)) {
144 ath_dbg(common, QUEUE, "No free rx buf available\n"); 144 ath_dbg(common, QUEUE, "No free rx buf available\n");
@@ -154,7 +154,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
154static void ath_rx_remove_buffer(struct ath_softc *sc, 154static void ath_rx_remove_buffer(struct ath_softc *sc,
155 enum ath9k_rx_qtype qtype) 155 enum ath9k_rx_qtype qtype)
156{ 156{
157 struct ath_buf *bf; 157 struct ath_rxbuf *bf;
158 struct ath_rx_edma *rx_edma; 158 struct ath_rx_edma *rx_edma;
159 struct sk_buff *skb; 159 struct sk_buff *skb;
160 160
@@ -171,7 +171,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
171{ 171{
172 struct ath_hw *ah = sc->sc_ah; 172 struct ath_hw *ah = sc->sc_ah;
173 struct ath_common *common = ath9k_hw_common(ah); 173 struct ath_common *common = ath9k_hw_common(ah);
174 struct ath_buf *bf; 174 struct ath_rxbuf *bf;
175 175
176 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 176 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
177 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 177 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
@@ -199,7 +199,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
199 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 199 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
200 struct ath_hw *ah = sc->sc_ah; 200 struct ath_hw *ah = sc->sc_ah;
201 struct sk_buff *skb; 201 struct sk_buff *skb;
202 struct ath_buf *bf; 202 struct ath_rxbuf *bf;
203 int error = 0, i; 203 int error = 0, i;
204 u32 size; 204 u32 size;
205 205
@@ -211,7 +211,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
211 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 211 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
212 ah->caps.rx_hp_qdepth); 212 ah->caps.rx_hp_qdepth);
213 213
214 size = sizeof(struct ath_buf) * nbufs; 214 size = sizeof(struct ath_rxbuf) * nbufs;
215 bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); 215 bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
216 if (!bf) 216 if (!bf)
217 return -ENOMEM; 217 return -ENOMEM;
@@ -271,7 +271,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
271{ 271{
272 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 272 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
273 struct sk_buff *skb; 273 struct sk_buff *skb;
274 struct ath_buf *bf; 274 struct ath_rxbuf *bf;
275 int error = 0; 275 int error = 0;
276 276
277 spin_lock_init(&sc->sc_pcu_lock); 277 spin_lock_init(&sc->sc_pcu_lock);
@@ -332,7 +332,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
332 struct ath_hw *ah = sc->sc_ah; 332 struct ath_hw *ah = sc->sc_ah;
333 struct ath_common *common = ath9k_hw_common(ah); 333 struct ath_common *common = ath9k_hw_common(ah);
334 struct sk_buff *skb; 334 struct sk_buff *skb;
335 struct ath_buf *bf; 335 struct ath_rxbuf *bf;
336 336
337 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 337 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
338 ath_rx_edma_cleanup(sc); 338 ath_rx_edma_cleanup(sc);
@@ -427,7 +427,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
427int ath_startrecv(struct ath_softc *sc) 427int ath_startrecv(struct ath_softc *sc)
428{ 428{
429 struct ath_hw *ah = sc->sc_ah; 429 struct ath_hw *ah = sc->sc_ah;
430 struct ath_buf *bf, *tbf; 430 struct ath_rxbuf *bf, *tbf;
431 431
432 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 432 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
433 ath_edma_start_recv(sc); 433 ath_edma_start_recv(sc);
@@ -447,7 +447,7 @@ int ath_startrecv(struct ath_softc *sc)
447 if (list_empty(&sc->rx.rxbuf)) 447 if (list_empty(&sc->rx.rxbuf))
448 goto start_recv; 448 goto start_recv;
449 449
450 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 450 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
451 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 451 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
452 ath9k_hw_rxena(ah); 452 ath9k_hw_rxena(ah);
453 453
@@ -603,13 +603,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
603static bool ath_edma_get_buffers(struct ath_softc *sc, 603static bool ath_edma_get_buffers(struct ath_softc *sc,
604 enum ath9k_rx_qtype qtype, 604 enum ath9k_rx_qtype qtype,
605 struct ath_rx_status *rs, 605 struct ath_rx_status *rs,
606 struct ath_buf **dest) 606 struct ath_rxbuf **dest)
607{ 607{
608 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 608 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
609 struct ath_hw *ah = sc->sc_ah; 609 struct ath_hw *ah = sc->sc_ah;
610 struct ath_common *common = ath9k_hw_common(ah); 610 struct ath_common *common = ath9k_hw_common(ah);
611 struct sk_buff *skb; 611 struct sk_buff *skb;
612 struct ath_buf *bf; 612 struct ath_rxbuf *bf;
613 int ret; 613 int ret;
614 614
615 skb = skb_peek(&rx_edma->rx_fifo); 615 skb = skb_peek(&rx_edma->rx_fifo);
@@ -653,11 +653,11 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
653 return true; 653 return true;
654} 654}
655 655
656static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 656static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
657 struct ath_rx_status *rs, 657 struct ath_rx_status *rs,
658 enum ath9k_rx_qtype qtype) 658 enum ath9k_rx_qtype qtype)
659{ 659{
660 struct ath_buf *bf = NULL; 660 struct ath_rxbuf *bf = NULL;
661 661
662 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 662 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
663 if (!bf) 663 if (!bf)
@@ -668,13 +668,13 @@ static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
668 return NULL; 668 return NULL;
669} 669}
670 670
671static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 671static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
672 struct ath_rx_status *rs) 672 struct ath_rx_status *rs)
673{ 673{
674 struct ath_hw *ah = sc->sc_ah; 674 struct ath_hw *ah = sc->sc_ah;
675 struct ath_common *common = ath9k_hw_common(ah); 675 struct ath_common *common = ath9k_hw_common(ah);
676 struct ath_desc *ds; 676 struct ath_desc *ds;
677 struct ath_buf *bf; 677 struct ath_rxbuf *bf;
678 int ret; 678 int ret;
679 679
680 if (list_empty(&sc->rx.rxbuf)) { 680 if (list_empty(&sc->rx.rxbuf)) {
@@ -682,7 +682,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
682 return NULL; 682 return NULL;
683 } 683 }
684 684
685 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 685 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
686 if (bf == sc->rx.buf_hold) 686 if (bf == sc->rx.buf_hold)
687 return NULL; 687 return NULL;
688 688
@@ -702,7 +702,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
702 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 702 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
703 if (ret == -EINPROGRESS) { 703 if (ret == -EINPROGRESS) {
704 struct ath_rx_status trs; 704 struct ath_rx_status trs;
705 struct ath_buf *tbf; 705 struct ath_rxbuf *tbf;
706 struct ath_desc *tds; 706 struct ath_desc *tds;
707 707
708 memset(&trs, 0, sizeof(trs)); 708 memset(&trs, 0, sizeof(trs));
@@ -711,7 +711,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
711 return NULL; 711 return NULL;
712 } 712 }
713 713
714 tbf = list_entry(bf->list.next, struct ath_buf, list); 714 tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
715 715
716 /* 716 /*
717 * On some hardware the descriptor status words could 717 * On some hardware the descriptor status words could
@@ -1308,7 +1308,7 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc,
1308 1308
1309int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1309int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1310{ 1310{
1311 struct ath_buf *bf; 1311 struct ath_rxbuf *bf;
1312 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1312 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1313 struct ieee80211_rx_status *rxs; 1313 struct ieee80211_rx_status *rxs;
1314 struct ath_hw *ah = sc->sc_ah; 1314 struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index fde6da619f30..0db37f230018 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -39,7 +39,7 @@ struct wmi_fw_version {
39struct wmi_event_swba { 39struct wmi_event_swba {
40 __be64 tsf; 40 __be64 tsf;
41 u8 beacon_pending; 41 u8 beacon_pending;
42}; 42} __packed;
43 43
44/* 44/*
45 * 64 - HTC header - WMI header - 1 / txstatus 45 * 64 - HTC header - WMI header - 1 / txstatus
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index dd30452df966..47696d29743c 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1704,16 +1704,9 @@ int ath_cabq_update(struct ath_softc *sc)
1704 int qnum = sc->beacon.cabq->axq_qnum; 1704 int qnum = sc->beacon.cabq->axq_qnum;
1705 1705
1706 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1706 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1707 /*
1708 * Ensure the readytime % is within the bounds.
1709 */
1710 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1711 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1712 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1713 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1714 1707
1715 qi.tqi_readyTime = (cur_conf->beacon_interval * 1708 qi.tqi_readyTime = (cur_conf->beacon_interval *
1716 sc->config.cabqReadytime) / 100; 1709 ATH_CABQ_READY_TIME) / 100;
1717 ath_txq_update(sc, qnum, &qi); 1710 ath_txq_update(sc, qnum, &qi);
1718 1711
1719 return 0; 1712 return 0;
@@ -2037,8 +2030,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
2037 struct ath_hw *ah = sc->sc_ah; 2030 struct ath_hw *ah = sc->sc_ah;
2038 struct ath9k_channel *curchan = ah->curchan; 2031 struct ath9k_channel *curchan = ah->curchan;
2039 2032
2040 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && 2033 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
2041 (curchan->channelFlags & CHANNEL_5GHZ) &&
2042 (chainmask == 0x7) && (rate < 0x90)) 2034 (chainmask == 0x7) && (rate < 0x90))
2043 return 0x3; 2035 return 0x3;
2044 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) && 2036 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
@@ -2329,7 +2321,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2329 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2321 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
2330 2322
2331 if (sc->sc_ah->caldata) 2323 if (sc->sc_ah->caldata)
2332 sc->sc_ah->caldata->paprd_packet_sent = true; 2324 set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2333 2325
2334 if (!(tx_flags & ATH_TX_ERROR)) 2326 if (!(tx_flags & ATH_TX_ERROR))
2335 /* Frame was ACKed */ 2327 /* Frame was ACKed */
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
new file mode 100644
index 000000000000..591ebaea8265
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -0,0 +1,16 @@
1config WCN36XX
2 tristate "Qualcomm Atheros WCN3660/3680 support"
3 depends on MAC80211 && HAS_DMA
4 ---help---
5 This module adds support for wireless adapters based on
6 Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
7
8 If you choose to build a module, it'll be called wcn36xx.
9
10config WCN36XX_DEBUGFS
11 bool "WCN36XX debugfs support"
12 depends on WCN36XX
13 ---help---
14 Enabled debugfs support
15
16 If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
new file mode 100644
index 000000000000..50c43b4382ba
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_WCN36XX) := wcn36xx.o
2wcn36xx-y += main.o \
3 dxe.o \
4 txrx.o \
5 smd.o \
6 pmc.o \
7 debug.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
new file mode 100644
index 000000000000..5b84f7ae0b1e
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/debugfs.h>
20#include <linux/uaccess.h>
21#include "wcn36xx.h"
22#include "debug.h"
23#include "pmc.h"
24
25#ifdef CONFIG_WCN36XX_DEBUGFS
26
27static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
28 size_t count, loff_t *ppos)
29{
30 struct wcn36xx *wcn = file->private_data;
31 struct wcn36xx_vif *vif_priv = NULL;
32 struct ieee80211_vif *vif = NULL;
33 char buf[3];
34
35 list_for_each_entry(vif_priv, &wcn->vif_list, list) {
36 vif = container_of((void *)vif_priv,
37 struct ieee80211_vif,
38 drv_priv);
39 if (NL80211_IFTYPE_STATION == vif->type) {
40 if (vif_priv->pw_state == WCN36XX_BMPS)
41 buf[0] = '1';
42 else
43 buf[0] = '0';
44 break;
45 }
46 }
47 buf[1] = '\n';
48 buf[2] = 0x00;
49
50 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
51}
52
53static ssize_t write_file_bool_bmps(struct file *file,
54 const char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 struct wcn36xx *wcn = file->private_data;
58 struct wcn36xx_vif *vif_priv = NULL;
59 struct ieee80211_vif *vif = NULL;
60
61 char buf[32];
62 int buf_size;
63
64 buf_size = min(count, (sizeof(buf)-1));
65 if (copy_from_user(buf, user_buf, buf_size))
66 return -EFAULT;
67
68 switch (buf[0]) {
69 case 'y':
70 case 'Y':
71 case '1':
72 list_for_each_entry(vif_priv, &wcn->vif_list, list) {
73 vif = container_of((void *)vif_priv,
74 struct ieee80211_vif,
75 drv_priv);
76 if (NL80211_IFTYPE_STATION == vif->type) {
77 wcn36xx_enable_keep_alive_null_packet(wcn, vif);
78 wcn36xx_pmc_enter_bmps_state(wcn, vif);
79 }
80 }
81 break;
82 case 'n':
83 case 'N':
84 case '0':
85 list_for_each_entry(vif_priv, &wcn->vif_list, list) {
86 vif = container_of((void *)vif_priv,
87 struct ieee80211_vif,
88 drv_priv);
89 if (NL80211_IFTYPE_STATION == vif->type)
90 wcn36xx_pmc_exit_bmps_state(wcn, vif);
91 }
92 break;
93 }
94
95 return count;
96}
97
98static const struct file_operations fops_wcn36xx_bmps = {
99 .open = simple_open,
100 .read = read_file_bool_bmps,
101 .write = write_file_bool_bmps,
102};
103
104static ssize_t write_file_dump(struct file *file,
105 const char __user *user_buf,
106 size_t count, loff_t *ppos)
107{
108 struct wcn36xx *wcn = file->private_data;
109 char buf[255], *tmp;
110 int buf_size;
111 u32 arg[WCN36xx_MAX_DUMP_ARGS];
112 int i;
113
114 memset(buf, 0, sizeof(buf));
115 memset(arg, 0, sizeof(arg));
116
117 buf_size = min(count, (sizeof(buf) - 1));
118 if (copy_from_user(buf, user_buf, buf_size))
119 return -EFAULT;
120
121 tmp = buf;
122
123 for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
124 char *begin;
125 begin = strsep(&tmp, " ");
126 if (begin == NULL)
127 break;
128
129 if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0)
130 break;
131 }
132
133 wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
134 arg[3], arg[4]);
135 wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
136
137 return count;
138}
139
140static const struct file_operations fops_wcn36xx_dump = {
141 .open = simple_open,
142 .write = write_file_dump,
143};
144
145#define ADD_FILE(name, mode, fop, priv_data) \
146 do { \
147 struct dentry *d; \
148 d = debugfs_create_file(__stringify(name), \
149 mode, dfs->rootdir, \
150 priv_data, fop); \
151 dfs->file_##name.dentry = d; \
152 if (IS_ERR(d)) { \
153 wcn36xx_warn("Create the debugfs entry failed");\
154 dfs->file_##name.dentry = NULL; \
155 } \
156 } while (0)
157
158
159void wcn36xx_debugfs_init(struct wcn36xx *wcn)
160{
161 struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
162
163 dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
164 wcn->hw->wiphy->debugfsdir);
165 if (IS_ERR(dfs->rootdir)) {
166 wcn36xx_warn("Create the debugfs failed\n");
167 dfs->rootdir = NULL;
168 }
169
170 ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR,
171 &fops_wcn36xx_bmps, wcn);
172 ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn);
173}
174
175void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
176{
177 struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
178 debugfs_remove_recursive(dfs->rootdir);
179}
180
181#endif /* CONFIG_WCN36XX_DEBUGFS */
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
new file mode 100644
index 000000000000..46307aa562d3
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _WCN36XX_DEBUG_H_
18#define _WCN36XX_DEBUG_H_
19
20#include <linux/kernel.h>
21
22#define WCN36xx_MAX_DUMP_ARGS 5
23
24#ifdef CONFIG_WCN36XX_DEBUGFS
25struct wcn36xx_dfs_file {
26 struct dentry *dentry;
27 u32 value;
28};
29
30struct wcn36xx_dfs_entry {
31 struct dentry *rootdir;
32 struct wcn36xx_dfs_file file_bmps_switcher;
33 struct wcn36xx_dfs_file file_dump;
34};
35
36void wcn36xx_debugfs_init(struct wcn36xx *wcn);
37void wcn36xx_debugfs_exit(struct wcn36xx *wcn);
38
39#else
40static inline void wcn36xx_debugfs_init(struct wcn36xx *wcn)
41{
42}
43static inline void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
44{
45}
46
47#endif /* CONFIG_WCN36XX_DEBUGFS */
48
49#endif /* _WCN36XX_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
new file mode 100644
index 000000000000..ee25786b4447
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -0,0 +1,805 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
26#include "wcn36xx.h"
27#include "txrx.h"
28
29void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
30{
31 struct wcn36xx_dxe_ch *ch = is_low ?
32 &wcn->dxe_tx_l_ch :
33 &wcn->dxe_tx_h_ch;
34
35 return ch->head_blk_ctl->bd_cpu_addr;
36}
37
38static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
39{
40 wcn36xx_dbg(WCN36XX_DBG_DXE,
41 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
42 addr, data);
43
44 writel(data, wcn->mmio + addr);
45}
46
47static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
48{
49 *data = readl(wcn->mmio + addr);
50
51 wcn36xx_dbg(WCN36XX_DBG_DXE,
52 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
53 addr, *data);
54}
55
56static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
57{
58 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
59 int i;
60
61 for (i = 0; i < ch->desc_num && ctl; i++) {
62 next = ctl->next;
63 kfree(ctl);
64 ctl = next;
65 }
66}
67
68static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
69{
70 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
71 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
72 int i;
73
74 for (i = 0; i < ch->desc_num; i++) {
75 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
76 if (!cur_ctl)
77 goto out_fail;
78
79 cur_ctl->ctl_blk_order = i;
80 if (i == 0) {
81 ch->head_blk_ctl = cur_ctl;
82 ch->tail_blk_ctl = cur_ctl;
83 } else if (ch->desc_num - 1 == i) {
84 prev_ctl->next = cur_ctl;
85 cur_ctl->next = ch->head_blk_ctl;
86 } else {
87 prev_ctl->next = cur_ctl;
88 }
89 prev_ctl = cur_ctl;
90 }
91
92 return 0;
93
94out_fail:
95 wcn36xx_dxe_free_ctl_block(ch);
96 return -ENOMEM;
97}
98
99int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
100{
101 int ret;
102
103 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
104 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
105 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
106 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
107
108 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
109 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
110 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
111 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
112
113 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
114 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
115
116 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
117 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
118
119 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
120 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
121
122 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
123 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
124
125 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
126 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
127
128 /* DXE control block allocation */
129 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
130 if (ret)
131 goto out_err;
132 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
133 if (ret)
134 goto out_err;
135 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
136 if (ret)
137 goto out_err;
138 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
139 if (ret)
140 goto out_err;
141
142 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
143 ret = wcn->ctrl_ops->smsm_change_state(
144 WCN36XX_SMSM_WLAN_TX_ENABLE,
145 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
146
147 return 0;
148
149out_err:
150 wcn36xx_err("Failed to allocate DXE control blocks\n");
151 wcn36xx_dxe_free_ctl_blks(wcn);
152 return -ENOMEM;
153}
154
155void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
156{
157 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
158 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
159 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
160 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
161}
162
163static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
164{
165 struct wcn36xx_dxe_desc *cur_dxe = NULL;
166 struct wcn36xx_dxe_desc *prev_dxe = NULL;
167 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
168 size_t size;
169 int i;
170
171 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
172 wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
173 GFP_KERNEL);
174 if (!wcn_ch->cpu_addr)
175 return -ENOMEM;
176
177 memset(wcn_ch->cpu_addr, 0, size);
178
179 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
180 cur_ctl = wcn_ch->head_blk_ctl;
181
182 for (i = 0; i < wcn_ch->desc_num; i++) {
183 cur_ctl->desc = cur_dxe;
184 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
185 i * sizeof(struct wcn36xx_dxe_desc);
186
187 switch (wcn_ch->ch_type) {
188 case WCN36XX_DXE_CH_TX_L:
189 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
190 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
191 break;
192 case WCN36XX_DXE_CH_TX_H:
193 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
194 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
195 break;
196 case WCN36XX_DXE_CH_RX_L:
197 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
198 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
199 break;
200 case WCN36XX_DXE_CH_RX_H:
201 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
202 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
203 break;
204 }
205 if (0 == i) {
206 cur_dxe->phy_next_l = 0;
207 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
208 prev_dxe->phy_next_l =
209 cur_ctl->desc_phy_addr;
210 } else if (i == (wcn_ch->desc_num - 1)) {
211 prev_dxe->phy_next_l =
212 cur_ctl->desc_phy_addr;
213 cur_dxe->phy_next_l =
214 wcn_ch->head_blk_ctl->desc_phy_addr;
215 }
216 cur_ctl = cur_ctl->next;
217 prev_dxe = cur_dxe;
218 cur_dxe++;
219 }
220
221 return 0;
222}
223
224static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
225 struct wcn36xx_dxe_mem_pool *pool)
226{
227 int i, chunk_size = pool->chunk_size;
228 dma_addr_t bd_phy_addr = pool->phy_addr;
229 void *bd_cpu_addr = pool->virt_addr;
230 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
231
232 for (i = 0; i < ch->desc_num; i++) {
233 /* Only every second dxe needs a bd pointer,
234 the other will point to the skb data */
235 if (!(i & 1)) {
236 cur->bd_phy_addr = bd_phy_addr;
237 cur->bd_cpu_addr = bd_cpu_addr;
238 bd_phy_addr += chunk_size;
239 bd_cpu_addr += chunk_size;
240 } else {
241 cur->bd_phy_addr = 0;
242 cur->bd_cpu_addr = NULL;
243 }
244 cur = cur->next;
245 }
246}
247
248static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
249{
250 int reg_data = 0;
251
252 wcn36xx_dxe_read_register(wcn,
253 WCN36XX_DXE_INT_MASK_REG,
254 &reg_data);
255
256 reg_data |= wcn_ch;
257
258 wcn36xx_dxe_write_register(wcn,
259 WCN36XX_DXE_INT_MASK_REG,
260 (int)reg_data);
261 return 0;
262}
263
264static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
265{
266 struct wcn36xx_dxe_desc *dxe = ctl->desc;
267 struct sk_buff *skb;
268
269 skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
270 if (skb == NULL)
271 return -ENOMEM;
272
273 dxe->dst_addr_l = dma_map_single(NULL,
274 skb_tail_pointer(skb),
275 WCN36XX_PKT_SIZE,
276 DMA_FROM_DEVICE);
277 ctl->skb = skb;
278
279 return 0;
280}
281
282static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
283 struct wcn36xx_dxe_ch *wcn_ch)
284{
285 int i;
286 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
287
288 cur_ctl = wcn_ch->head_blk_ctl;
289
290 for (i = 0; i < wcn_ch->desc_num; i++) {
291 wcn36xx_dxe_fill_skb(cur_ctl);
292 cur_ctl = cur_ctl->next;
293 }
294
295 return 0;
296}
297
298static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
299 struct wcn36xx_dxe_ch *wcn_ch)
300{
301 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
302 int i;
303
304 for (i = 0; i < wcn_ch->desc_num; i++) {
305 kfree_skb(cur->skb);
306 cur = cur->next;
307 }
308}
309
310void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
311{
312 struct ieee80211_tx_info *info;
313 struct sk_buff *skb;
314 unsigned long flags;
315
316 spin_lock_irqsave(&wcn->dxe_lock, flags);
317 skb = wcn->tx_ack_skb;
318 wcn->tx_ack_skb = NULL;
319 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
320
321 if (!skb) {
322 wcn36xx_warn("Spurious TX complete indication\n");
323 return;
324 }
325
326 info = IEEE80211_SKB_CB(skb);
327
328 if (status == 1)
329 info->flags |= IEEE80211_TX_STAT_ACK;
330
331 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
332
333 ieee80211_tx_status_irqsafe(wcn->hw, skb);
334 ieee80211_wake_queues(wcn->hw);
335}
336
337static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
338{
339 struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
340 struct ieee80211_tx_info *info;
341 unsigned long flags;
342
343 /*
344 * Make at least one loop of do-while because in case ring is
345 * completely full head and tail are pointing to the same element
346 * and while-do will not make any cycles.
347 */
348 do {
349 if (ctl->skb) {
350 dma_unmap_single(NULL, ctl->desc->src_addr_l,
351 ctl->skb->len, DMA_TO_DEVICE);
352 info = IEEE80211_SKB_CB(ctl->skb);
353 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
354 /* Keep frame until TX status comes */
355 ieee80211_free_txskb(wcn->hw, ctl->skb);
356 }
357 spin_lock_irqsave(&ctl->skb_lock, flags);
358 if (wcn->queues_stopped) {
359 wcn->queues_stopped = false;
360 ieee80211_wake_queues(wcn->hw);
361 }
362 spin_unlock_irqrestore(&ctl->skb_lock, flags);
363
364 ctl->skb = NULL;
365 }
366 ctl = ctl->next;
367 } while (ctl != ch->head_blk_ctl &&
368 !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
369
370 ch->tail_blk_ctl = ctl;
371}
372
373static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
374{
375 struct wcn36xx *wcn = (struct wcn36xx *)dev;
376 int int_src, int_reason;
377
378 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
379
380 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
381 wcn36xx_dxe_read_register(wcn,
382 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
383 &int_reason);
384
385 /* TODO: Check int_reason */
386
387 wcn36xx_dxe_write_register(wcn,
388 WCN36XX_DXE_0_INT_CLR,
389 WCN36XX_INT_MASK_CHAN_TX_H);
390
391 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
392 WCN36XX_INT_MASK_CHAN_TX_H);
393 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
394 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
395 }
396
397 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
398 wcn36xx_dxe_read_register(wcn,
399 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
400 &int_reason);
401 /* TODO: Check int_reason */
402
403 wcn36xx_dxe_write_register(wcn,
404 WCN36XX_DXE_0_INT_CLR,
405 WCN36XX_INT_MASK_CHAN_TX_L);
406
407 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
408 WCN36XX_INT_MASK_CHAN_TX_L);
409 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
410 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
411 }
412
413 return IRQ_HANDLED;
414}
415
416static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
417{
418 struct wcn36xx *wcn = (struct wcn36xx *)dev;
419
420 disable_irq_nosync(wcn->rx_irq);
421 wcn36xx_dxe_rx_frame(wcn);
422 enable_irq(wcn->rx_irq);
423 return IRQ_HANDLED;
424}
425
426static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
427{
428 int ret;
429
430 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
431 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
432 if (ret) {
433 wcn36xx_err("failed to alloc tx irq\n");
434 goto out_err;
435 }
436
437 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
438 "wcn36xx_rx", wcn);
439 if (ret) {
440 wcn36xx_err("failed to alloc rx irq\n");
441 goto out_txirq;
442 }
443
444 enable_irq_wake(wcn->rx_irq);
445
446 return 0;
447
448out_txirq:
449 free_irq(wcn->tx_irq, wcn);
450out_err:
451 return ret;
452
453}
454
455static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
456 struct wcn36xx_dxe_ch *ch)
457{
458 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
459 struct wcn36xx_dxe_desc *dxe = ctl->desc;
460 dma_addr_t dma_addr;
461 struct sk_buff *skb;
462
463 while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
464 skb = ctl->skb;
465 dma_addr = dxe->dst_addr_l;
466 wcn36xx_dxe_fill_skb(ctl);
467
468 switch (ch->ch_type) {
469 case WCN36XX_DXE_CH_RX_L:
470 dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
471 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
472 WCN36XX_DXE_INT_CH1_MASK);
473 break;
474 case WCN36XX_DXE_CH_RX_H:
475 dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
476 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
477 WCN36XX_DXE_INT_CH3_MASK);
478 break;
479 default:
480 wcn36xx_warn("Unknown channel\n");
481 }
482
483 dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
484 DMA_FROM_DEVICE);
485 wcn36xx_rx_skb(wcn, skb);
486 ctl = ctl->next;
487 dxe = ctl->desc;
488 }
489
490 ch->head_blk_ctl = ctl;
491
492 return 0;
493}
494
495void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
496{
497 int int_src;
498
499 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
500
501 /* RX_LOW_PRI */
502 if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
503 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
504 WCN36XX_DXE_INT_CH1_MASK);
505 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
506 }
507
508 /* RX_HIGH_PRI */
509 if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
510 /* Clean up all the INT within this channel */
511 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
512 WCN36XX_DXE_INT_CH3_MASK);
513 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
514 }
515
516 if (!int_src)
517 wcn36xx_warn("No DXE interrupt pending\n");
518}
519
520int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
521{
522 size_t s;
523 void *cpu_addr;
524
525 /* Allocate BD headers for MGMT frames */
526
527 /* Where this come from ask QC */
528 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
529 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
530
531 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
532 cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
533 GFP_KERNEL);
534 if (!cpu_addr)
535 goto out_err;
536
537 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
538 memset(cpu_addr, 0, s);
539
540 /* Allocate BD headers for DATA frames */
541
542 /* Where this come from ask QC */
543 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
544 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
545
546 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
547 cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
548 GFP_KERNEL);
549 if (!cpu_addr)
550 goto out_err;
551
552 wcn->data_mem_pool.virt_addr = cpu_addr;
553 memset(cpu_addr, 0, s);
554
555 return 0;
556
557out_err:
558 wcn36xx_dxe_free_mem_pools(wcn);
559 wcn36xx_err("Failed to allocate BD mempool\n");
560 return -ENOMEM;
561}
562
563void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
564{
565 if (wcn->mgmt_mem_pool.virt_addr)
566 dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
567 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
568 wcn->mgmt_mem_pool.virt_addr,
569 wcn->mgmt_mem_pool.phy_addr);
570
571 if (wcn->data_mem_pool.virt_addr) {
572 dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
573 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
574 wcn->data_mem_pool.virt_addr,
575 wcn->data_mem_pool.phy_addr);
576 }
577}
578
579int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
580 struct wcn36xx_vif *vif_priv,
581 struct sk_buff *skb,
582 bool is_low)
583{
584 struct wcn36xx_dxe_ctl *ctl = NULL;
585 struct wcn36xx_dxe_desc *desc = NULL;
586 struct wcn36xx_dxe_ch *ch = NULL;
587 unsigned long flags;
588
589 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
590
591 ctl = ch->head_blk_ctl;
592
593 spin_lock_irqsave(&ctl->next->skb_lock, flags);
594
595 /*
596 * If skb is not null that means that we reached the tail of the ring
597 * hence ring is full. Stop queues to let mac80211 back off until ring
598 * has an empty slot again.
599 */
600 if (NULL != ctl->next->skb) {
601 ieee80211_stop_queues(wcn->hw);
602 wcn->queues_stopped = true;
603 spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
604 return -EBUSY;
605 }
606 spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
607
608 ctl->skb = NULL;
609 desc = ctl->desc;
610
611 /* Set source address of the BD we send */
612 desc->src_addr_l = ctl->bd_phy_addr;
613
614 desc->dst_addr_l = ch->dxe_wq;
615 desc->fr_len = sizeof(struct wcn36xx_tx_bd);
616 desc->ctrl = ch->ctrl_bd;
617
618 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
619
620 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
621 (char *)desc, sizeof(*desc));
622 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
623 "BD >>> ", (char *)ctl->bd_cpu_addr,
624 sizeof(struct wcn36xx_tx_bd));
625
626 /* Set source address of the SKB we send */
627 ctl = ctl->next;
628 ctl->skb = skb;
629 desc = ctl->desc;
630 if (ctl->bd_cpu_addr) {
631 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
632 return -EINVAL;
633 }
634
635 desc->src_addr_l = dma_map_single(NULL,
636 ctl->skb->data,
637 ctl->skb->len,
638 DMA_TO_DEVICE);
639
640 desc->dst_addr_l = ch->dxe_wq;
641 desc->fr_len = ctl->skb->len;
642
643 /* set dxe descriptor to VALID */
644 desc->ctrl = ch->ctrl_skb;
645
646 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
647 (char *)desc, sizeof(*desc));
648 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
649 (char *)ctl->skb->data, ctl->skb->len);
650
651 /* Move the head of the ring to the next empty descriptor */
652 ch->head_blk_ctl = ctl->next;
653
654 /*
655 * When connected and trying to send data frame chip can be in sleep
656 * mode and writing to the register will not wake up the chip. Instead
657 * notify chip about new frame through SMSM bus.
658 */
659 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
660 wcn->ctrl_ops->smsm_change_state(
661 0,
662 WCN36XX_SMSM_WLAN_TX_ENABLE);
663 } else {
664 /* indicate End Of Packet and generate interrupt on descriptor
665 * done.
666 */
667 wcn36xx_dxe_write_register(wcn,
668 ch->reg_ctrl, ch->def_ctrl);
669 }
670
671 return 0;
672}
673
674int wcn36xx_dxe_init(struct wcn36xx *wcn)
675{
676 int reg_data = 0, ret;
677
678 reg_data = WCN36XX_DXE_REG_RESET;
679 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
680
681 /* Setting interrupt path */
682 reg_data = WCN36XX_DXE_CCU_INT;
683 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
684
685 /***************************************/
686 /* Init descriptors for TX LOW channel */
687 /***************************************/
688 wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
689 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
690
691 /* Write channel head to a NEXT register */
692 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
693 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
694
695 /* Program DMA destination addr for TX LOW */
696 wcn36xx_dxe_write_register(wcn,
697 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
698 WCN36XX_DXE_WQ_TX_L);
699
700 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
701 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
702
703 /***************************************/
704 /* Init descriptors for TX HIGH channel */
705 /***************************************/
706 wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
707 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
708
709 /* Write channel head to a NEXT register */
710 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
711 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
712
713 /* Program DMA destination addr for TX HIGH */
714 wcn36xx_dxe_write_register(wcn,
715 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
716 WCN36XX_DXE_WQ_TX_H);
717
718 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
719
720 /* Enable channel interrupts */
721 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
722
723 /***************************************/
724 /* Init descriptors for RX LOW channel */
725 /***************************************/
726 wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
727
728 /* For RX we need to preallocated buffers */
729 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
730
731 /* Write channel head to a NEXT register */
732 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
733 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
734
735 /* Write DMA source address */
736 wcn36xx_dxe_write_register(wcn,
737 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
738 WCN36XX_DXE_WQ_RX_L);
739
740 /* Program preallocated destination address */
741 wcn36xx_dxe_write_register(wcn,
742 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
743 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
744
745 /* Enable default control registers */
746 wcn36xx_dxe_write_register(wcn,
747 WCN36XX_DXE_REG_CTL_RX_L,
748 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
749
750 /* Enable channel interrupts */
751 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
752
753 /***************************************/
754 /* Init descriptors for RX HIGH channel */
755 /***************************************/
756 wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
757
758 /* For RX we need to prealocat buffers */
759 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
760
761 /* Write chanel head to a NEXT register */
762 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
763 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
764
765 /* Write DMA source address */
766 wcn36xx_dxe_write_register(wcn,
767 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
768 WCN36XX_DXE_WQ_RX_H);
769
770 /* Program preallocated destination address */
771 wcn36xx_dxe_write_register(wcn,
772 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
773 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
774
775 /* Enable default control registers */
776 wcn36xx_dxe_write_register(wcn,
777 WCN36XX_DXE_REG_CTL_RX_H,
778 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
779
780 /* Enable channel interrupts */
781 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
782
783 ret = wcn36xx_dxe_request_irqs(wcn);
784 if (ret < 0)
785 goto out_err;
786
787 return 0;
788
789out_err:
790 return ret;
791}
792
793void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
794{
795 free_irq(wcn->tx_irq, wcn);
796 free_irq(wcn->rx_irq, wcn);
797
798 if (wcn->tx_ack_skb) {
799 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
800 wcn->tx_ack_skb = NULL;
801 }
802
803 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
804 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
805}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
new file mode 100644
index 000000000000..c88562f85de1
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -0,0 +1,284 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _DXE_H_
18#define _DXE_H_
19
20#include "wcn36xx.h"
21
22/*
23TX_LOW = DMA0
24TX_HIGH = DMA4
25RX_LOW = DMA1
26RX_HIGH = DMA3
27H2H_TEST_RX_TX = DMA2
28*/
29
30/* DXE registers */
31#define WCN36XX_DXE_MEM_BASE 0x03000000
32#define WCN36XX_DXE_MEM_REG 0x202000
33
34#define WCN36XX_DXE_CCU_INT 0xA0011
35#define WCN36XX_DXE_REG_CCU_INT 0x200b10
36
37/* TODO This must calculated properly but not hardcoded */
38#define WCN36XX_DXE_CTRL_TX_L 0x328a44
39#define WCN36XX_DXE_CTRL_TX_H 0x32ce44
40#define WCN36XX_DXE_CTRL_RX_L 0x12ad2f
41#define WCN36XX_DXE_CTRL_RX_H 0x12d12f
42#define WCN36XX_DXE_CTRL_TX_H_BD 0x30ce45
43#define WCN36XX_DXE_CTRL_TX_H_SKB 0x32ce4d
44#define WCN36XX_DXE_CTRL_TX_L_BD 0x308a45
45#define WCN36XX_DXE_CTRL_TX_L_SKB 0x328a4d
46
47/* TODO This must calculated properly but not hardcoded */
48#define WCN36XX_DXE_WQ_TX_L 0x17
49#define WCN36XX_DXE_WQ_TX_H 0x17
50#define WCN36XX_DXE_WQ_RX_L 0xB
51#define WCN36XX_DXE_WQ_RX_H 0x4
52
53/* DXE descriptor control filed */
54#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
55
56/* TODO This must calculated properly but not hardcoded */
57/* DXE default control register values */
58#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
59#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H 0x84FED12F
60#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H 0x853ECF4D
61#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L 0x843e8b4d
62
63/* Common DXE registers */
64#define WCN36XX_DXE_MEM_CSR (WCN36XX_DXE_MEM_REG + 0x00)
65#define WCN36XX_DXE_REG_CSR_RESET (WCN36XX_DXE_MEM_REG + 0x00)
66#define WCN36XX_DXE_ENCH_ADDR (WCN36XX_DXE_MEM_REG + 0x04)
67#define WCN36XX_DXE_REG_CH_EN (WCN36XX_DXE_MEM_REG + 0x08)
68#define WCN36XX_DXE_REG_CH_DONE (WCN36XX_DXE_MEM_REG + 0x0C)
69#define WCN36XX_DXE_REG_CH_ERR (WCN36XX_DXE_MEM_REG + 0x10)
70#define WCN36XX_DXE_INT_MASK_REG (WCN36XX_DXE_MEM_REG + 0x18)
71#define WCN36XX_DXE_INT_SRC_RAW_REG (WCN36XX_DXE_MEM_REG + 0x20)
72 /* #define WCN36XX_DXE_INT_CH6_MASK 0x00000040 */
73 /* #define WCN36XX_DXE_INT_CH5_MASK 0x00000020 */
74 #define WCN36XX_DXE_INT_CH4_MASK 0x00000010
75 #define WCN36XX_DXE_INT_CH3_MASK 0x00000008
76 /* #define WCN36XX_DXE_INT_CH2_MASK 0x00000004 */
77 #define WCN36XX_DXE_INT_CH1_MASK 0x00000002
78 #define WCN36XX_DXE_INT_CH0_MASK 0x00000001
79#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
80#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
81#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
82#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
83
84#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
85#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
86#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
87#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
88#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)
89
90#define WCN36XX_DXE_REG_RESET 0x5c89
91
92/* Temporary BMU Workqueue 4 */
93#define WCN36XX_DXE_BMU_WQ_RX_LOW 0xB
94#define WCN36XX_DXE_BMU_WQ_RX_HIGH 0x4
95/* DMA channel offset */
96#define WCN36XX_DXE_TX_LOW_OFFSET 0x400
97#define WCN36XX_DXE_TX_HIGH_OFFSET 0x500
98#define WCN36XX_DXE_RX_LOW_OFFSET 0x440
99#define WCN36XX_DXE_RX_HIGH_OFFSET 0x4C0
100
101/* Address of the next DXE descriptor */
102#define WCN36XX_DXE_CH_NEXT_DESC_ADDR 0x001C
103#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
104 WCN36XX_DXE_TX_LOW_OFFSET + \
105 WCN36XX_DXE_CH_NEXT_DESC_ADDR)
106#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
107 WCN36XX_DXE_TX_HIGH_OFFSET + \
108 WCN36XX_DXE_CH_NEXT_DESC_ADDR)
109#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
110 WCN36XX_DXE_RX_LOW_OFFSET + \
111 WCN36XX_DXE_CH_NEXT_DESC_ADDR)
112#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
113 WCN36XX_DXE_RX_HIGH_OFFSET + \
114 WCN36XX_DXE_CH_NEXT_DESC_ADDR)
115
116/* DXE Descriptor source address */
117#define WCN36XX_DXE_CH_SRC_ADDR 0x000C
118#define WCN36XX_DXE_CH_SRC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
119 WCN36XX_DXE_RX_LOW_OFFSET + \
120 WCN36XX_DXE_CH_SRC_ADDR)
121#define WCN36XX_DXE_CH_SRC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
122 WCN36XX_DXE_RX_HIGH_OFFSET + \
123 WCN36XX_DXE_CH_SRC_ADDR)
124
125/* DXE Descriptor address destination address */
126#define WCN36XX_DXE_CH_DEST_ADDR 0x0014
127#define WCN36XX_DXE_CH_DEST_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
128 WCN36XX_DXE_TX_LOW_OFFSET + \
129 WCN36XX_DXE_CH_DEST_ADDR)
130#define WCN36XX_DXE_CH_DEST_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
131 WCN36XX_DXE_TX_HIGH_OFFSET + \
132 WCN36XX_DXE_CH_DEST_ADDR)
133#define WCN36XX_DXE_CH_DEST_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
134 WCN36XX_DXE_RX_LOW_OFFSET + \
135 WCN36XX_DXE_CH_DEST_ADDR)
136#define WCN36XX_DXE_CH_DEST_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
137 WCN36XX_DXE_RX_HIGH_OFFSET + \
138 WCN36XX_DXE_CH_DEST_ADDR)
139
140/* Interrupt status */
141#define WCN36XX_DXE_CH_STATUS_REG_ADDR 0x0004
142#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
143 WCN36XX_DXE_TX_LOW_OFFSET + \
144 WCN36XX_DXE_CH_STATUS_REG_ADDR)
145#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
146 WCN36XX_DXE_TX_HIGH_OFFSET + \
147 WCN36XX_DXE_CH_STATUS_REG_ADDR)
148#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
149 WCN36XX_DXE_RX_LOW_OFFSET + \
150 WCN36XX_DXE_CH_STATUS_REG_ADDR)
151#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
152 WCN36XX_DXE_RX_HIGH_OFFSET + \
153 WCN36XX_DXE_CH_STATUS_REG_ADDR)
154
155
156/* DXE default control register */
157#define WCN36XX_DXE_REG_CTL_RX_L (WCN36XX_DXE_MEM_REG + \
158 WCN36XX_DXE_RX_LOW_OFFSET)
159#define WCN36XX_DXE_REG_CTL_RX_H (WCN36XX_DXE_MEM_REG + \
160 WCN36XX_DXE_RX_HIGH_OFFSET)
161#define WCN36XX_DXE_REG_CTL_TX_H (WCN36XX_DXE_MEM_REG + \
162 WCN36XX_DXE_TX_HIGH_OFFSET)
163#define WCN36XX_DXE_REG_CTL_TX_L (WCN36XX_DXE_MEM_REG + \
164 WCN36XX_DXE_TX_LOW_OFFSET)
165
166#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
167#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
168
169
170/* Interrupt control channel mask */
171#define WCN36XX_INT_MASK_CHAN_TX_L 0x00000001
172#define WCN36XX_INT_MASK_CHAN_RX_L 0x00000002
173#define WCN36XX_INT_MASK_CHAN_RX_H 0x00000008
174#define WCN36XX_INT_MASK_CHAN_TX_H 0x00000010
175
176#define WCN36XX_BD_CHUNK_SIZE 128
177
178#define WCN36XX_PKT_SIZE 0xF20
179enum wcn36xx_dxe_ch_type {
180 WCN36XX_DXE_CH_TX_L,
181 WCN36XX_DXE_CH_TX_H,
182 WCN36XX_DXE_CH_RX_L,
183 WCN36XX_DXE_CH_RX_H
184};
185
186/* amount of descriptors per channel */
187enum wcn36xx_dxe_ch_desc_num {
188 WCN36XX_DXE_CH_DESC_NUMB_TX_L = 128,
189 WCN36XX_DXE_CH_DESC_NUMB_TX_H = 10,
190 WCN36XX_DXE_CH_DESC_NUMB_RX_L = 512,
191 WCN36XX_DXE_CH_DESC_NUMB_RX_H = 40
192};
193
194/**
195 * struct wcn36xx_dxe_desc - describes descriptor of one DXE buffer
196 *
197 * @ctrl: is a union that consists of following bits:
198 * union {
199 * u32 valid :1; //0 = DMA stop, 1 = DMA continue with this
200 * //descriptor
201 * u32 transfer_type :2; //0 = Host to Host space
202 * u32 eop :1; //End of Packet
203 * u32 bd_handling :1; //if transferType = Host to BMU, then 0
204 * // means first 128 bytes contain BD, and 1
205 * // means create new empty BD
206 * u32 siq :1; // SIQ
207 * u32 diq :1; // DIQ
208 * u32 pdu_rel :1; //0 = don't release BD and PDUs when done,
209 * // 1 = release them
210 * u32 bthld_sel :4; //BMU Threshold Select
211 * u32 prio :3; //Specifies the priority level to use for
212 * // the transfer
213 * u32 stop_channel :1; //1 = DMA stops processing further, channel
214 * //requires re-enabling after this
215 * u32 intr :1; //Interrupt on Descriptor Done
216 * u32 rsvd :1; //reserved
217 * u32 size :14;//14 bits used - ignored for BMU transfers,
218 * //only used for host to host transfers?
219 * } ctrl;
220 */
221struct wcn36xx_dxe_desc {
222 u32 ctrl;
223 u32 fr_len;
224
225 u32 src_addr_l;
226 u32 dst_addr_l;
227 u32 phy_next_l;
228 u32 src_addr_h;
229 u32 dst_addr_h;
230 u32 phy_next_h;
231} __packed;
232
233/* DXE Control block */
234struct wcn36xx_dxe_ctl {
235 struct wcn36xx_dxe_ctl *next;
236 struct wcn36xx_dxe_desc *desc;
237 unsigned int desc_phy_addr;
238 int ctl_blk_order;
239 struct sk_buff *skb;
240 spinlock_t skb_lock;
241 void *bd_cpu_addr;
242 dma_addr_t bd_phy_addr;
243};
244
245struct wcn36xx_dxe_ch {
246 enum wcn36xx_dxe_ch_type ch_type;
247 void *cpu_addr;
248 dma_addr_t dma_addr;
249 enum wcn36xx_dxe_ch_desc_num desc_num;
250 /* DXE control block ring */
251 struct wcn36xx_dxe_ctl *head_blk_ctl;
252 struct wcn36xx_dxe_ctl *tail_blk_ctl;
253
254 /* DXE channel specific configs */
255 u32 dxe_wq;
256 u32 ctrl_bd;
257 u32 ctrl_skb;
258 u32 reg_ctrl;
259 u32 def_ctrl;
260};
261
262/* Memory Pool for BD headers */
263struct wcn36xx_dxe_mem_pool {
264 int chunk_size;
265 void *virt_addr;
266 dma_addr_t phy_addr;
267};
268
269struct wcn36xx_vif;
270int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn);
271void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn);
272void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn);
273int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn);
274void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn);
275int wcn36xx_dxe_init(struct wcn36xx *wcn);
276void wcn36xx_dxe_deinit(struct wcn36xx *wcn);
277int wcn36xx_dxe_init_channels(struct wcn36xx *wcn);
278int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
279 struct wcn36xx_vif *vif_priv,
280 struct sk_buff *skb,
281 bool is_low);
282void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
283void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low);
284#endif /* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
new file mode 100644
index 000000000000..c02dbc618724
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -0,0 +1,4657 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _HAL_H_
18#define _HAL_H_
19
20/*---------------------------------------------------------------------------
21 API VERSIONING INFORMATION
22
23 The RIVA API is versioned as MAJOR.MINOR.VERSION.REVISION
24 The MAJOR is incremented for major product/architecture changes
25 (and then MINOR/VERSION/REVISION are zeroed)
26 The MINOR is incremented for minor product/architecture changes
27 (and then VERSION/REVISION are zeroed)
28 The VERSION is incremented if a significant API change occurs
29 (and then REVISION is zeroed)
30 The REVISION is incremented if an insignificant API change occurs
31 or if a new API is added
32 All values are in the range 0..255 (ie they are 8-bit values)
33 ---------------------------------------------------------------------------*/
34#define WCN36XX_HAL_VER_MAJOR 1
35#define WCN36XX_HAL_VER_MINOR 4
36#define WCN36XX_HAL_VER_VERSION 1
37#define WCN36XX_HAL_VER_REVISION 2
38
39/* This is to force compiler to use the maximum of an int ( 4 bytes ) */
40#define WCN36XX_HAL_MAX_ENUM_SIZE 0x7FFFFFFF
41#define WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE 0x7FFF
42
43/* Max no. of transmit categories */
44#define STACFG_MAX_TC 8
45
46/* The maximum value of access category */
47#define WCN36XX_HAL_MAX_AC 4
48
49#define WCN36XX_HAL_IPV4_ADDR_LEN 4
50
51#define WALN_HAL_STA_INVALID_IDX 0xFF
52#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
53
54/* Default Beacon template size */
55#define BEACON_TEMPLATE_SIZE 0x180
56
57/* Param Change Bitmap sent to HAL */
58#define PARAM_BCN_INTERVAL_CHANGED (1 << 0)
59#define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1)
60#define PARAM_SHORT_SLOT_TIME_CHANGED (1 << 2)
61#define PARAM_llACOEXIST_CHANGED (1 << 3)
62#define PARAM_llBCOEXIST_CHANGED (1 << 4)
63#define PARAM_llGCOEXIST_CHANGED (1 << 5)
64#define PARAM_HT20MHZCOEXIST_CHANGED (1<<6)
65#define PARAM_NON_GF_DEVICES_PRESENT_CHANGED (1<<7)
66#define PARAM_RIFS_MODE_CHANGED (1<<8)
67#define PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED (1<<9)
68#define PARAM_OBSS_MODE_CHANGED (1<<10)
69#define PARAM_BEACON_UPDATE_MASK \
70 (PARAM_BCN_INTERVAL_CHANGED | \
71 PARAM_SHORT_PREAMBLE_CHANGED | \
72 PARAM_SHORT_SLOT_TIME_CHANGED | \
73 PARAM_llACOEXIST_CHANGED | \
74 PARAM_llBCOEXIST_CHANGED | \
75 PARAM_llGCOEXIST_CHANGED | \
76 PARAM_HT20MHZCOEXIST_CHANGED | \
77 PARAM_NON_GF_DEVICES_PRESENT_CHANGED | \
78 PARAM_RIFS_MODE_CHANGED | \
79 PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED | \
80 PARAM_OBSS_MODE_CHANGED)
81
82/* dump command response Buffer size */
83#define DUMPCMD_RSP_BUFFER 100
84
85/* version string max length (including NULL) */
86#define WCN36XX_HAL_VERSION_LENGTH 64
87
88/* message types for messages exchanged between WDI and HAL */
89enum wcn36xx_hal_host_msg_type {
90 /* Init/De-Init */
91 WCN36XX_HAL_START_REQ = 0,
92 WCN36XX_HAL_START_RSP = 1,
93 WCN36XX_HAL_STOP_REQ = 2,
94 WCN36XX_HAL_STOP_RSP = 3,
95
96 /* Scan */
97 WCN36XX_HAL_INIT_SCAN_REQ = 4,
98 WCN36XX_HAL_INIT_SCAN_RSP = 5,
99 WCN36XX_HAL_START_SCAN_REQ = 6,
100 WCN36XX_HAL_START_SCAN_RSP = 7,
101 WCN36XX_HAL_END_SCAN_REQ = 8,
102 WCN36XX_HAL_END_SCAN_RSP = 9,
103 WCN36XX_HAL_FINISH_SCAN_REQ = 10,
104 WCN36XX_HAL_FINISH_SCAN_RSP = 11,
105
106 /* HW STA configuration/deconfiguration */
107 WCN36XX_HAL_CONFIG_STA_REQ = 12,
108 WCN36XX_HAL_CONFIG_STA_RSP = 13,
109 WCN36XX_HAL_DELETE_STA_REQ = 14,
110 WCN36XX_HAL_DELETE_STA_RSP = 15,
111 WCN36XX_HAL_CONFIG_BSS_REQ = 16,
112 WCN36XX_HAL_CONFIG_BSS_RSP = 17,
113 WCN36XX_HAL_DELETE_BSS_REQ = 18,
114 WCN36XX_HAL_DELETE_BSS_RSP = 19,
115
116 /* Infra STA asscoiation */
117 WCN36XX_HAL_JOIN_REQ = 20,
118 WCN36XX_HAL_JOIN_RSP = 21,
119 WCN36XX_HAL_POST_ASSOC_REQ = 22,
120 WCN36XX_HAL_POST_ASSOC_RSP = 23,
121
122 /* Security */
123 WCN36XX_HAL_SET_BSSKEY_REQ = 24,
124 WCN36XX_HAL_SET_BSSKEY_RSP = 25,
125 WCN36XX_HAL_SET_STAKEY_REQ = 26,
126 WCN36XX_HAL_SET_STAKEY_RSP = 27,
127 WCN36XX_HAL_RMV_BSSKEY_REQ = 28,
128 WCN36XX_HAL_RMV_BSSKEY_RSP = 29,
129 WCN36XX_HAL_RMV_STAKEY_REQ = 30,
130 WCN36XX_HAL_RMV_STAKEY_RSP = 31,
131
132 /* Qos Related */
133 WCN36XX_HAL_ADD_TS_REQ = 32,
134 WCN36XX_HAL_ADD_TS_RSP = 33,
135 WCN36XX_HAL_DEL_TS_REQ = 34,
136 WCN36XX_HAL_DEL_TS_RSP = 35,
137 WCN36XX_HAL_UPD_EDCA_PARAMS_REQ = 36,
138 WCN36XX_HAL_UPD_EDCA_PARAMS_RSP = 37,
139 WCN36XX_HAL_ADD_BA_REQ = 38,
140 WCN36XX_HAL_ADD_BA_RSP = 39,
141 WCN36XX_HAL_DEL_BA_REQ = 40,
142 WCN36XX_HAL_DEL_BA_RSP = 41,
143
144 WCN36XX_HAL_CH_SWITCH_REQ = 42,
145 WCN36XX_HAL_CH_SWITCH_RSP = 43,
146 WCN36XX_HAL_SET_LINK_ST_REQ = 44,
147 WCN36XX_HAL_SET_LINK_ST_RSP = 45,
148 WCN36XX_HAL_GET_STATS_REQ = 46,
149 WCN36XX_HAL_GET_STATS_RSP = 47,
150 WCN36XX_HAL_UPDATE_CFG_REQ = 48,
151 WCN36XX_HAL_UPDATE_CFG_RSP = 49,
152
153 WCN36XX_HAL_MISSED_BEACON_IND = 50,
154 WCN36XX_HAL_UNKNOWN_ADDR2_FRAME_RX_IND = 51,
155 WCN36XX_HAL_MIC_FAILURE_IND = 52,
156 WCN36XX_HAL_FATAL_ERROR_IND = 53,
157 WCN36XX_HAL_SET_KEYDONE_MSG = 54,
158
159 /* NV Interface */
160 WCN36XX_HAL_DOWNLOAD_NV_REQ = 55,
161 WCN36XX_HAL_DOWNLOAD_NV_RSP = 56,
162
163 WCN36XX_HAL_ADD_BA_SESSION_REQ = 57,
164 WCN36XX_HAL_ADD_BA_SESSION_RSP = 58,
165 WCN36XX_HAL_TRIGGER_BA_REQ = 59,
166 WCN36XX_HAL_TRIGGER_BA_RSP = 60,
167 WCN36XX_HAL_UPDATE_BEACON_REQ = 61,
168 WCN36XX_HAL_UPDATE_BEACON_RSP = 62,
169 WCN36XX_HAL_SEND_BEACON_REQ = 63,
170 WCN36XX_HAL_SEND_BEACON_RSP = 64,
171
172 WCN36XX_HAL_SET_BCASTKEY_REQ = 65,
173 WCN36XX_HAL_SET_BCASTKEY_RSP = 66,
174 WCN36XX_HAL_DELETE_STA_CONTEXT_IND = 67,
175 WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ = 68,
176 WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP = 69,
177
178 /* PTT interface support */
179 WCN36XX_HAL_PROCESS_PTT_REQ = 70,
180 WCN36XX_HAL_PROCESS_PTT_RSP = 71,
181
182 /* BTAMP related events */
183 WCN36XX_HAL_SIGNAL_BTAMP_EVENT_REQ = 72,
184 WCN36XX_HAL_SIGNAL_BTAMP_EVENT_RSP = 73,
185 WCN36XX_HAL_TL_HAL_FLUSH_AC_REQ = 74,
186 WCN36XX_HAL_TL_HAL_FLUSH_AC_RSP = 75,
187
188 WCN36XX_HAL_ENTER_IMPS_REQ = 76,
189 WCN36XX_HAL_EXIT_IMPS_REQ = 77,
190 WCN36XX_HAL_ENTER_BMPS_REQ = 78,
191 WCN36XX_HAL_EXIT_BMPS_REQ = 79,
192 WCN36XX_HAL_ENTER_UAPSD_REQ = 80,
193 WCN36XX_HAL_EXIT_UAPSD_REQ = 81,
194 WCN36XX_HAL_UPDATE_UAPSD_PARAM_REQ = 82,
195 WCN36XX_HAL_CONFIGURE_RXP_FILTER_REQ = 83,
196 WCN36XX_HAL_ADD_BCN_FILTER_REQ = 84,
197 WCN36XX_HAL_REM_BCN_FILTER_REQ = 85,
198 WCN36XX_HAL_ADD_WOWL_BCAST_PTRN = 86,
199 WCN36XX_HAL_DEL_WOWL_BCAST_PTRN = 87,
200 WCN36XX_HAL_ENTER_WOWL_REQ = 88,
201 WCN36XX_HAL_EXIT_WOWL_REQ = 89,
202 WCN36XX_HAL_HOST_OFFLOAD_REQ = 90,
203 WCN36XX_HAL_SET_RSSI_THRESH_REQ = 91,
204 WCN36XX_HAL_GET_RSSI_REQ = 92,
205 WCN36XX_HAL_SET_UAPSD_AC_PARAMS_REQ = 93,
206 WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_REQ = 94,
207
208 WCN36XX_HAL_ENTER_IMPS_RSP = 95,
209 WCN36XX_HAL_EXIT_IMPS_RSP = 96,
210 WCN36XX_HAL_ENTER_BMPS_RSP = 97,
211 WCN36XX_HAL_EXIT_BMPS_RSP = 98,
212 WCN36XX_HAL_ENTER_UAPSD_RSP = 99,
213 WCN36XX_HAL_EXIT_UAPSD_RSP = 100,
214 WCN36XX_HAL_SET_UAPSD_AC_PARAMS_RSP = 101,
215 WCN36XX_HAL_UPDATE_UAPSD_PARAM_RSP = 102,
216 WCN36XX_HAL_CONFIGURE_RXP_FILTER_RSP = 103,
217 WCN36XX_HAL_ADD_BCN_FILTER_RSP = 104,
218 WCN36XX_HAL_REM_BCN_FILTER_RSP = 105,
219 WCN36XX_HAL_SET_RSSI_THRESH_RSP = 106,
220 WCN36XX_HAL_HOST_OFFLOAD_RSP = 107,
221 WCN36XX_HAL_ADD_WOWL_BCAST_PTRN_RSP = 108,
222 WCN36XX_HAL_DEL_WOWL_BCAST_PTRN_RSP = 109,
223 WCN36XX_HAL_ENTER_WOWL_RSP = 110,
224 WCN36XX_HAL_EXIT_WOWL_RSP = 111,
225 WCN36XX_HAL_RSSI_NOTIFICATION_IND = 112,
226 WCN36XX_HAL_GET_RSSI_RSP = 113,
227 WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_RSP = 114,
228
229 /* 11k related events */
230 WCN36XX_HAL_SET_MAX_TX_POWER_REQ = 115,
231 WCN36XX_HAL_SET_MAX_TX_POWER_RSP = 116,
232
233 /* 11R related msgs */
234 WCN36XX_HAL_AGGR_ADD_TS_REQ = 117,
235 WCN36XX_HAL_AGGR_ADD_TS_RSP = 118,
236
237 /* P2P WLAN_FEATURE_P2P */
238 WCN36XX_HAL_SET_P2P_GONOA_REQ = 119,
239 WCN36XX_HAL_SET_P2P_GONOA_RSP = 120,
240
241 /* WLAN Dump commands */
242 WCN36XX_HAL_DUMP_COMMAND_REQ = 121,
243 WCN36XX_HAL_DUMP_COMMAND_RSP = 122,
244
245 /* OEM_DATA FEATURE SUPPORT */
246 WCN36XX_HAL_START_OEM_DATA_REQ = 123,
247 WCN36XX_HAL_START_OEM_DATA_RSP = 124,
248
249 /* ADD SELF STA REQ and RSP */
250 WCN36XX_HAL_ADD_STA_SELF_REQ = 125,
251 WCN36XX_HAL_ADD_STA_SELF_RSP = 126,
252
253 /* DEL SELF STA SUPPORT */
254 WCN36XX_HAL_DEL_STA_SELF_REQ = 127,
255 WCN36XX_HAL_DEL_STA_SELF_RSP = 128,
256
257 /* Coex Indication */
258 WCN36XX_HAL_COEX_IND = 129,
259
260 /* Tx Complete Indication */
261 WCN36XX_HAL_OTA_TX_COMPL_IND = 130,
262
263 /* Host Suspend/resume messages */
264 WCN36XX_HAL_HOST_SUSPEND_IND = 131,
265 WCN36XX_HAL_HOST_RESUME_REQ = 132,
266 WCN36XX_HAL_HOST_RESUME_RSP = 133,
267
268 WCN36XX_HAL_SET_TX_POWER_REQ = 134,
269 WCN36XX_HAL_SET_TX_POWER_RSP = 135,
270 WCN36XX_HAL_GET_TX_POWER_REQ = 136,
271 WCN36XX_HAL_GET_TX_POWER_RSP = 137,
272
273 WCN36XX_HAL_P2P_NOA_ATTR_IND = 138,
274
275 WCN36XX_HAL_ENABLE_RADAR_DETECT_REQ = 139,
276 WCN36XX_HAL_ENABLE_RADAR_DETECT_RSP = 140,
277 WCN36XX_HAL_GET_TPC_REPORT_REQ = 141,
278 WCN36XX_HAL_GET_TPC_REPORT_RSP = 142,
279 WCN36XX_HAL_RADAR_DETECT_IND = 143,
280 WCN36XX_HAL_RADAR_DETECT_INTR_IND = 144,
281 WCN36XX_HAL_KEEP_ALIVE_REQ = 145,
282 WCN36XX_HAL_KEEP_ALIVE_RSP = 146,
283
284 /* PNO messages */
285 WCN36XX_HAL_SET_PREF_NETWORK_REQ = 147,
286 WCN36XX_HAL_SET_PREF_NETWORK_RSP = 148,
287 WCN36XX_HAL_SET_RSSI_FILTER_REQ = 149,
288 WCN36XX_HAL_SET_RSSI_FILTER_RSP = 150,
289 WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ = 151,
290 WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP = 152,
291 WCN36XX_HAL_PREF_NETW_FOUND_IND = 153,
292
293 WCN36XX_HAL_SET_TX_PER_TRACKING_REQ = 154,
294 WCN36XX_HAL_SET_TX_PER_TRACKING_RSP = 155,
295 WCN36XX_HAL_TX_PER_HIT_IND = 156,
296
297 WCN36XX_HAL_8023_MULTICAST_LIST_REQ = 157,
298 WCN36XX_HAL_8023_MULTICAST_LIST_RSP = 158,
299
300 WCN36XX_HAL_SET_PACKET_FILTER_REQ = 159,
301 WCN36XX_HAL_SET_PACKET_FILTER_RSP = 160,
302 WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_REQ = 161,
303 WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_RSP = 162,
304 WCN36XX_HAL_CLEAR_PACKET_FILTER_REQ = 163,
305 WCN36XX_HAL_CLEAR_PACKET_FILTER_RSP = 164,
306
307 /*
308 * This is temp fix. Should be removed once Host and Riva code is
309 * in sync.
310 */
311 WCN36XX_HAL_INIT_SCAN_CON_REQ = 165,
312
313 WCN36XX_HAL_SET_POWER_PARAMS_REQ = 166,
314 WCN36XX_HAL_SET_POWER_PARAMS_RSP = 167,
315
316 WCN36XX_HAL_TSM_STATS_REQ = 168,
317 WCN36XX_HAL_TSM_STATS_RSP = 169,
318
319 /* wake reason indication (WOW) */
320 WCN36XX_HAL_WAKE_REASON_IND = 170,
321
322 /* GTK offload support */
323 WCN36XX_HAL_GTK_OFFLOAD_REQ = 171,
324 WCN36XX_HAL_GTK_OFFLOAD_RSP = 172,
325 WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ = 173,
326 WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP = 174,
327
328 WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ = 175,
329 WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP = 176,
330 WCN36XX_HAL_EXCLUDE_UNENCRYPTED_IND = 177,
331
332 WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ = 178,
333 WCN36XX_HAL_SET_THERMAL_MITIGATION_RSP = 179,
334
335 WCN36XX_HAL_UPDATE_VHT_OP_MODE_REQ = 182,
336 WCN36XX_HAL_UPDATE_VHT_OP_MODE_RSP = 183,
337
338 WCN36XX_HAL_P2P_NOA_START_IND = 184,
339
340 WCN36XX_HAL_GET_ROAM_RSSI_REQ = 185,
341 WCN36XX_HAL_GET_ROAM_RSSI_RSP = 186,
342
343 WCN36XX_HAL_CLASS_B_STATS_IND = 187,
344 WCN36XX_HAL_DEL_BA_IND = 188,
345 WCN36XX_HAL_DHCP_START_IND = 189,
346 WCN36XX_HAL_DHCP_STOP_IND = 190,
347
348 WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
349};
350
351/* Enumeration for Version */
352enum wcn36xx_hal_host_msg_version {
353 WCN36XX_HAL_MSG_VERSION0 = 0,
354 WCN36XX_HAL_MSG_VERSION1 = 1,
355 /* define as 2 bytes data */
356 WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION = 0x7FFF,
357 WCN36XX_HAL_MSG_VERSION_MAX_FIELD = WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION
358};
359
360enum driver_type {
361 DRIVER_TYPE_PRODUCTION = 0,
362 DRIVER_TYPE_MFG = 1,
363 DRIVER_TYPE_DVT = 2,
364 DRIVER_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
365};
366
367enum wcn36xx_hal_stop_type {
368 HAL_STOP_TYPE_SYS_RESET,
369 HAL_STOP_TYPE_SYS_DEEP_SLEEP,
370 HAL_STOP_TYPE_RF_KILL,
371 HAL_STOP_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
372};
373
374enum wcn36xx_hal_sys_mode {
375 HAL_SYS_MODE_NORMAL,
376 HAL_SYS_MODE_LEARN,
377 HAL_SYS_MODE_SCAN,
378 HAL_SYS_MODE_PROMISC,
379 HAL_SYS_MODE_SUSPEND_LINK,
380 HAL_SYS_MODE_ROAM_SCAN,
381 HAL_SYS_MODE_ROAM_SUSPEND_LINK,
382 HAL_SYS_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
383};
384
385enum phy_chan_bond_state {
386 /* 20MHz IF bandwidth centered on IF carrier */
387 PHY_SINGLE_CHANNEL_CENTERED = 0,
388
389 /* 40MHz IF bandwidth with lower 20MHz supporting the primary channel */
390 PHY_DOUBLE_CHANNEL_LOW_PRIMARY = 1,
391
392 /* 40MHz IF bandwidth centered on IF carrier */
393 PHY_DOUBLE_CHANNEL_CENTERED = 2,
394
395 /* 40MHz IF bandwidth with higher 20MHz supporting the primary ch */
396 PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3,
397
398 /* 20/40MHZ offset LOW 40/80MHZ offset CENTERED */
399 PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED = 4,
400
401 /* 20/40MHZ offset CENTERED 40/80MHZ offset CENTERED */
402 PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED = 5,
403
404 /* 20/40MHZ offset HIGH 40/80MHZ offset CENTERED */
405 PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED = 6,
406
407 /* 20/40MHZ offset LOW 40/80MHZ offset LOW */
408 PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW = 7,
409
410 /* 20/40MHZ offset HIGH 40/80MHZ offset LOW */
411 PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW = 8,
412
413 /* 20/40MHZ offset LOW 40/80MHZ offset HIGH */
414 PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH = 9,
415
416 /* 20/40MHZ offset-HIGH 40/80MHZ offset HIGH */
417 PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH = 10,
418
419 PHY_CHANNEL_BONDING_STATE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
420};
421
422/* Spatial Multiplexing(SM) Power Save mode */
423enum wcn36xx_hal_ht_mimo_state {
424 /* Static SM Power Save mode */
425 WCN36XX_HAL_HT_MIMO_PS_STATIC = 0,
426
427 /* Dynamic SM Power Save mode */
428 WCN36XX_HAL_HT_MIMO_PS_DYNAMIC = 1,
429
430 /* reserved */
431 WCN36XX_HAL_HT_MIMO_PS_NA = 2,
432
433 /* SM Power Save disabled */
434 WCN36XX_HAL_HT_MIMO_PS_NO_LIMIT = 3,
435
436 WCN36XX_HAL_HT_MIMO_PS_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
437};
438
439/* each station added has a rate mode which specifies the sta attributes */
440enum sta_rate_mode {
441 STA_TAURUS = 0,
442 STA_TITAN,
443 STA_POLARIS,
444 STA_11b,
445 STA_11bg,
446 STA_11a,
447 STA_11n,
448 STA_11ac,
449 STA_INVALID_RATE_MODE = WCN36XX_HAL_MAX_ENUM_SIZE
450};
451
452/* 1,2,5.5,11 */
453#define WCN36XX_HAL_NUM_DSSS_RATES 4
454
455/* 6,9,12,18,24,36,48,54 */
456#define WCN36XX_HAL_NUM_OFDM_RATES 8
457
458/* 72,96,108 */
459#define WCN36XX_HAL_NUM_POLARIS_RATES 3
460
461#define WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET 16
462
463enum wcn36xx_hal_bss_type {
464 WCN36XX_HAL_INFRASTRUCTURE_MODE,
465
466 /* Added for softAP support */
467 WCN36XX_HAL_INFRA_AP_MODE,
468
469 WCN36XX_HAL_IBSS_MODE,
470
471 /* Added for BT-AMP support */
472 WCN36XX_HAL_BTAMP_STA_MODE,
473
474 /* Added for BT-AMP support */
475 WCN36XX_HAL_BTAMP_AP_MODE,
476
477 WCN36XX_HAL_AUTO_MODE,
478
479 WCN36XX_HAL_DONOT_USE_BSS_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
480};
481
482enum wcn36xx_hal_nw_type {
483 WCN36XX_HAL_11A_NW_TYPE,
484 WCN36XX_HAL_11B_NW_TYPE,
485 WCN36XX_HAL_11G_NW_TYPE,
486 WCN36XX_HAL_11N_NW_TYPE,
487 WCN36XX_HAL_DONOT_USE_NW_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
488};
489
490#define WCN36XX_HAL_MAC_RATESET_EID_MAX 12
491
492enum wcn36xx_hal_ht_operating_mode {
493 /* No Protection */
494 WCN36XX_HAL_HT_OP_MODE_PURE,
495
496 /* Overlap Legacy device present, protection is optional */
497 WCN36XX_HAL_HT_OP_MODE_OVERLAP_LEGACY,
498
499 /* No legacy device, but 20 MHz HT present */
500 WCN36XX_HAL_HT_OP_MODE_NO_LEGACY_20MHZ_HT,
501
502 /* Protection is required */
503 WCN36XX_HAL_HT_OP_MODE_MIXED,
504
505 WCN36XX_HAL_HT_OP_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
506};
507
508/* Encryption type enum used with peer */
509enum ani_ed_type {
510 WCN36XX_HAL_ED_NONE,
511 WCN36XX_HAL_ED_WEP40,
512 WCN36XX_HAL_ED_WEP104,
513 WCN36XX_HAL_ED_TKIP,
514 WCN36XX_HAL_ED_CCMP,
515 WCN36XX_HAL_ED_WPI,
516 WCN36XX_HAL_ED_AES_128_CMAC,
517 WCN36XX_HAL_ED_NOT_IMPLEMENTED = WCN36XX_HAL_MAX_ENUM_SIZE
518};
519
520#define WLAN_MAX_KEY_RSC_LEN 16
521#define WLAN_WAPI_KEY_RSC_LEN 16
522
523/* MAX key length when ULA is used */
524#define WCN36XX_HAL_MAC_MAX_KEY_LENGTH 32
525#define WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS 4
526
527/*
528 * Enum to specify whether key is used for TX only, RX only or both.
529 */
530enum ani_key_direction {
531 WCN36XX_HAL_TX_ONLY,
532 WCN36XX_HAL_RX_ONLY,
533 WCN36XX_HAL_TX_RX,
534 WCN36XX_HAL_TX_DEFAULT,
535 WCN36XX_HAL_DONOT_USE_KEY_DIRECTION = WCN36XX_HAL_MAX_ENUM_SIZE
536};
537
538enum ani_wep_type {
539 WCN36XX_HAL_WEP_STATIC,
540 WCN36XX_HAL_WEP_DYNAMIC,
541 WCN36XX_HAL_WEP_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
542};
543
544enum wcn36xx_hal_link_state {
545
546 WCN36XX_HAL_LINK_IDLE_STATE = 0,
547 WCN36XX_HAL_LINK_PREASSOC_STATE = 1,
548 WCN36XX_HAL_LINK_POSTASSOC_STATE = 2,
549 WCN36XX_HAL_LINK_AP_STATE = 3,
550 WCN36XX_HAL_LINK_IBSS_STATE = 4,
551
552 /* BT-AMP Case */
553 WCN36XX_HAL_LINK_BTAMP_PREASSOC_STATE = 5,
554 WCN36XX_HAL_LINK_BTAMP_POSTASSOC_STATE = 6,
555 WCN36XX_HAL_LINK_BTAMP_AP_STATE = 7,
556 WCN36XX_HAL_LINK_BTAMP_STA_STATE = 8,
557
558 /* Reserved for HAL Internal Use */
559 WCN36XX_HAL_LINK_LEARN_STATE = 9,
560 WCN36XX_HAL_LINK_SCAN_STATE = 10,
561 WCN36XX_HAL_LINK_FINISH_SCAN_STATE = 11,
562 WCN36XX_HAL_LINK_INIT_CAL_STATE = 12,
563 WCN36XX_HAL_LINK_FINISH_CAL_STATE = 13,
564 WCN36XX_HAL_LINK_LISTEN_STATE = 14,
565
566 WCN36XX_HAL_LINK_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
567};
568
569enum wcn36xx_hal_stats_mask {
570 HAL_SUMMARY_STATS_INFO = 0x00000001,
571 HAL_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
572 HAL_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
573 HAL_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
574 HAL_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
575 HAL_PER_STA_STATS_INFO = 0x00000020
576};
577
578/* BT-AMP events type */
579enum bt_amp_event_type {
580 BTAMP_EVENT_CONNECTION_START,
581 BTAMP_EVENT_CONNECTION_STOP,
582 BTAMP_EVENT_CONNECTION_TERMINATED,
583
584 /* This and beyond are invalid values */
585 BTAMP_EVENT_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
586};
587
588/* PE Statistics */
589enum pe_stats_mask {
590 PE_SUMMARY_STATS_INFO = 0x00000001,
591 PE_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
592 PE_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
593 PE_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
594 PE_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
595 PE_PER_STA_STATS_INFO = 0x00000020,
596
597 /* This and beyond are invalid values */
598 PE_STATS_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
599};
600
601/*
602 * Configuration Parameter IDs
603 */
604#define WCN36XX_HAL_CFG_STA_ID 0
605#define WCN36XX_HAL_CFG_CURRENT_TX_ANTENNA 1
606#define WCN36XX_HAL_CFG_CURRENT_RX_ANTENNA 2
607#define WCN36XX_HAL_CFG_LOW_GAIN_OVERRIDE 3
608#define WCN36XX_HAL_CFG_POWER_STATE_PER_CHAIN 4
609#define WCN36XX_HAL_CFG_CAL_PERIOD 5
610#define WCN36XX_HAL_CFG_CAL_CONTROL 6
611#define WCN36XX_HAL_CFG_PROXIMITY 7
612#define WCN36XX_HAL_CFG_NETWORK_DENSITY 8
613#define WCN36XX_HAL_CFG_MAX_MEDIUM_TIME 9
614#define WCN36XX_HAL_CFG_MAX_MPDUS_IN_AMPDU 10
615#define WCN36XX_HAL_CFG_RTS_THRESHOLD 11
616#define WCN36XX_HAL_CFG_SHORT_RETRY_LIMIT 12
617#define WCN36XX_HAL_CFG_LONG_RETRY_LIMIT 13
618#define WCN36XX_HAL_CFG_FRAGMENTATION_THRESHOLD 14
619#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ZERO 15
620#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ONE 16
621#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_TWO 17
622#define WCN36XX_HAL_CFG_FIXED_RATE 18
623#define WCN36XX_HAL_CFG_RETRYRATE_POLICY 19
624#define WCN36XX_HAL_CFG_RETRYRATE_SECONDARY 20
625#define WCN36XX_HAL_CFG_RETRYRATE_TERTIARY 21
626#define WCN36XX_HAL_CFG_FORCE_POLICY_PROTECTION 22
627#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_24GHZ 23
628#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_5GHZ 24
629#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_24GHZ 25
630#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_5GHZ 26
631#define WCN36XX_HAL_CFG_MAX_BA_SESSIONS 27
632#define WCN36XX_HAL_CFG_PS_DATA_INACTIVITY_TIMEOUT 28
633#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_FILTER 29
634#define WCN36XX_HAL_CFG_PS_ENABLE_RSSI_MONITOR 30
635#define WCN36XX_HAL_CFG_NUM_BEACON_PER_RSSI_AVERAGE 31
636#define WCN36XX_HAL_CFG_STATS_PERIOD 32
637#define WCN36XX_HAL_CFG_CFP_MAX_DURATION 33
638#define WCN36XX_HAL_CFG_FRAME_TRANS_ENABLED 34
639#define WCN36XX_HAL_CFG_DTIM_PERIOD 35
640#define WCN36XX_HAL_CFG_EDCA_WMM_ACBK 36
641#define WCN36XX_HAL_CFG_EDCA_WMM_ACBE 37
642#define WCN36XX_HAL_CFG_EDCA_WMM_ACVO 38
643#define WCN36XX_HAL_CFG_EDCA_WMM_ACVI 39
644#define WCN36XX_HAL_CFG_BA_THRESHOLD_HIGH 40
645#define WCN36XX_HAL_CFG_MAX_BA_BUFFERS 41
646#define WCN36XX_HAL_CFG_RPE_POLLING_THRESHOLD 42
647#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC0_REG 43
648#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC1_REG 44
649#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC2_REG 45
650#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC3_REG 46
651#define WCN36XX_HAL_CFG_NO_OF_ONCHIP_REORDER_SESSIONS 47
652#define WCN36XX_HAL_CFG_PS_LISTEN_INTERVAL 48
653#define WCN36XX_HAL_CFG_PS_HEART_BEAT_THRESHOLD 49
654#define WCN36XX_HAL_CFG_PS_NTH_BEACON_FILTER 50
655#define WCN36XX_HAL_CFG_PS_MAX_PS_POLL 51
656#define WCN36XX_HAL_CFG_PS_MIN_RSSI_THRESHOLD 52
657#define WCN36XX_HAL_CFG_PS_RSSI_FILTER_PERIOD 53
658#define WCN36XX_HAL_CFG_PS_BROADCAST_FRAME_FILTER_ENABLE 54
659#define WCN36XX_HAL_CFG_PS_IGNORE_DTIM 55
660#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_EARLY_TERM 56
661#define WCN36XX_HAL_CFG_DYNAMIC_PS_POLL_VALUE 57
662#define WCN36XX_HAL_CFG_PS_NULLDATA_AP_RESP_TIMEOUT 58
663#define WCN36XX_HAL_CFG_TELE_BCN_WAKEUP_EN 59
664#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI 60
665#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS 61
666#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI 62
667#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI_IDLE_BCNS 63
668#define WCN36XX_HAL_CFG_TX_PWR_CTRL_ENABLE 64
669#define WCN36XX_HAL_CFG_VALID_RADAR_CHANNEL_LIST 65
670#define WCN36XX_HAL_CFG_TX_POWER_24_20 66
671#define WCN36XX_HAL_CFG_TX_POWER_24_40 67
672#define WCN36XX_HAL_CFG_TX_POWER_50_20 68
673#define WCN36XX_HAL_CFG_TX_POWER_50_40 69
674#define WCN36XX_HAL_CFG_MCAST_BCAST_FILTER_SETTING 70
675#define WCN36XX_HAL_CFG_BCN_EARLY_TERM_WAKEUP_INTERVAL 71
676#define WCN36XX_HAL_CFG_MAX_TX_POWER_2_4 72
677#define WCN36XX_HAL_CFG_MAX_TX_POWER_5 73
678#define WCN36XX_HAL_CFG_INFRA_STA_KEEP_ALIVE_PERIOD 74
679#define WCN36XX_HAL_CFG_ENABLE_CLOSE_LOOP 75
680#define WCN36XX_HAL_CFG_BTC_EXECUTION_MODE 76
681#define WCN36XX_HAL_CFG_BTC_DHCP_BT_SLOTS_TO_BLOCK 77
682#define WCN36XX_HAL_CFG_BTC_A2DP_DHCP_BT_SUB_INTERVALS 78
683#define WCN36XX_HAL_CFG_PS_TX_INACTIVITY_TIMEOUT 79
684#define WCN36XX_HAL_CFG_WCNSS_API_VERSION 80
685#define WCN36XX_HAL_CFG_AP_KEEPALIVE_TIMEOUT 81
686#define WCN36XX_HAL_CFG_GO_KEEPALIVE_TIMEOUT 82
687#define WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST 83
688#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_BT 84
689#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_BT 85
690#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_BT 86
691#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_BT 87
692#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_WLAN 88
693#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_WLAN 89
694#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_WLAN 90
695#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_WLAN 91
696#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_BT 92
697#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_WLAN 93
698#define WCN36XX_HAL_CFG_BTC_MAX_SCO_BLOCK_PERC 94
699#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_A2DP 95
700#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_SCO 96
701#define WCN36XX_HAL_CFG_ENABLE_UNICAST_FILTER 97
702#define WCN36XX_HAL_CFG_MAX_ASSOC_LIMIT 98
703#define WCN36XX_HAL_CFG_ENABLE_LPWR_IMG_TRANSITION 99
704#define WCN36XX_HAL_CFG_ENABLE_MCC_ADAPTIVE_SCHEDULER 100
705#define WCN36XX_HAL_CFG_ENABLE_DETECT_PS_SUPPORT 101
706#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102
707#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103
708#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104
709#define WCN36XX_HAL_CFG_MAX_PARAMS 105
710
711/* Message definitons - All the messages below need to be packed */
712
713/* Definition for HAL API Version. */
714struct wcnss_wlan_version {
715 u8 revision;
716 u8 version;
717 u8 minor;
718 u8 major;
719} __packed;
720
721/* Definition for Encryption Keys */
722struct wcn36xx_hal_keys {
723 u8 id;
724
725 /* 0 for multicast */
726 u8 unicast;
727
728 enum ani_key_direction direction;
729
730 /* Usage is unknown */
731 u8 rsc[WLAN_MAX_KEY_RSC_LEN];
732
733 /* =1 for authenticator,=0 for supplicant */
734 u8 pae_role;
735
736 u16 length;
737 u8 key[WCN36XX_HAL_MAC_MAX_KEY_LENGTH];
738} __packed;
739
740/*
741 * set_sta_key_params Moving here since it is shared by
742 * configbss/setstakey msgs
743 */
744struct wcn36xx_hal_set_sta_key_params {
745 /* STA Index */
746 u16 sta_index;
747
748 /* Encryption Type used with peer */
749 enum ani_ed_type enc_type;
750
751 /* STATIC/DYNAMIC - valid only for WEP */
752 enum ani_wep_type wep_type;
753
754 /* Default WEP key, valid only for static WEP, must between 0 and 3. */
755 u8 def_wep_idx;
756
757 /* valid only for non-static WEP encyrptions */
758 struct wcn36xx_hal_keys key[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
759
760 /*
761 * Control for Replay Count, 1= Single TID based replay count on Tx
762 * 0 = Per TID based replay count on TX
763 */
764 u8 single_tid_rc;
765
766} __packed;
767
768/* 4-byte control message header used by HAL*/
769struct wcn36xx_hal_msg_header {
770 enum wcn36xx_hal_host_msg_type msg_type:16;
771 enum wcn36xx_hal_host_msg_version msg_version:16;
772 u32 len;
773} __packed;
774
775/* Config format required by HAL for each CFG item*/
776struct wcn36xx_hal_cfg {
777 /* Cfg Id. The Id required by HAL is exported by HAL
778 * in shared header file between UMAC and HAL.*/
779 u16 id;
780
781 /* Length of the Cfg. This parameter is used to go to next cfg
782 * in the TLV format.*/
783 u16 len;
784
785 /* Padding bytes for unaligned address's */
786 u16 pad_bytes;
787
788 /* Reserve bytes for making cfgVal to align address */
789 u16 reserve;
790
791 /* Following the uCfgLen field there should be a 'uCfgLen' bytes
792 * containing the uCfgValue ; u8 uCfgValue[uCfgLen] */
793} __packed;
794
795struct wcn36xx_hal_mac_start_parameters {
796 /* Drive Type - Production or FTM etc */
797 enum driver_type type;
798
799 /* Length of the config buffer */
800 u32 len;
801
802 /* Following this there is a TLV formatted buffer of length
803 * "len" bytes containing all config values.
804 * The TLV is expected to be formatted like this:
805 * 0 15 31 31+CFG_LEN-1 length-1
806 * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
807 */
808} __packed;
809
810struct wcn36xx_hal_mac_start_req_msg {
811 /* config buffer must start in TLV format just here */
812 struct wcn36xx_hal_msg_header header;
813 struct wcn36xx_hal_mac_start_parameters params;
814} __packed;
815
816struct wcn36xx_hal_mac_start_rsp_params {
817 /* success or failure */
818 u16 status;
819
820 /* Max number of STA supported by the device */
821 u8 stations;
822
823 /* Max number of BSS supported by the device */
824 u8 bssids;
825
826 /* API Version */
827 struct wcnss_wlan_version version;
828
829 /* CRM build information */
830 u8 crm_version[WCN36XX_HAL_VERSION_LENGTH];
831
832 /* hardware/chipset/misc version information */
833 u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH];
834
835} __packed;
836
837struct wcn36xx_hal_mac_start_rsp_msg {
838 struct wcn36xx_hal_msg_header header;
839 struct wcn36xx_hal_mac_start_rsp_params start_rsp_params;
840} __packed;
841
842struct wcn36xx_hal_mac_stop_req_params {
843 /* The reason for which the device is being stopped */
844 enum wcn36xx_hal_stop_type reason;
845
846} __packed;
847
848struct wcn36xx_hal_mac_stop_req_msg {
849 struct wcn36xx_hal_msg_header header;
850 struct wcn36xx_hal_mac_stop_req_params stop_req_params;
851} __packed;
852
853struct wcn36xx_hal_mac_stop_rsp_msg {
854 struct wcn36xx_hal_msg_header header;
855
856 /* success or failure */
857 u32 status;
858} __packed;
859
860struct wcn36xx_hal_update_cfg_req_msg {
861 /*
862 * Note: The length specified in tHalUpdateCfgReqMsg messages should be
863 * header.msgLen = sizeof(tHalUpdateCfgReqMsg) + uConfigBufferLen
864 */
865 struct wcn36xx_hal_msg_header header;
866
867 /* Length of the config buffer. Allows UMAC to update multiple CFGs */
868 u32 len;
869
870 /*
871 * Following this there is a TLV formatted buffer of length
872 * "uConfigBufferLen" bytes containing all config values.
873 * The TLV is expected to be formatted like this:
874 * 0 15 31 31+CFG_LEN-1 length-1
875 * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
876 */
877
878} __packed;
879
880struct wcn36xx_hal_update_cfg_rsp_msg {
881 struct wcn36xx_hal_msg_header header;
882
883 /* success or failure */
884 u32 status;
885
886} __packed;
887
888/* Frame control field format (2 bytes) */
889struct wcn36xx_hal_mac_frame_ctl {
890
891#ifndef ANI_LITTLE_BIT_ENDIAN
892
893 u8 subType:4;
894 u8 type:2;
895 u8 protVer:2;
896
897 u8 order:1;
898 u8 wep:1;
899 u8 moreData:1;
900 u8 powerMgmt:1;
901 u8 retry:1;
902 u8 moreFrag:1;
903 u8 fromDS:1;
904 u8 toDS:1;
905
906#else
907
908 u8 protVer:2;
909 u8 type:2;
910 u8 subType:4;
911
912 u8 toDS:1;
913 u8 fromDS:1;
914 u8 moreFrag:1;
915 u8 retry:1;
916 u8 powerMgmt:1;
917 u8 moreData:1;
918 u8 wep:1;
919 u8 order:1;
920
921#endif
922
923};
924
925/* Sequence control field */
926struct wcn36xx_hal_mac_seq_ctl {
927 u8 fragNum:4;
928 u8 seqNumLo:4;
929 u8 seqNumHi:8;
930};
931
932/* Management header format */
933struct wcn36xx_hal_mac_mgmt_hdr {
934 struct wcn36xx_hal_mac_frame_ctl fc;
935 u8 durationLo;
936 u8 durationHi;
937 u8 da[6];
938 u8 sa[6];
939 u8 bssId[6];
940 struct wcn36xx_hal_mac_seq_ctl seqControl;
941};
942
943/* FIXME: pronto v1 apparently has 4 */
944#define WCN36XX_HAL_NUM_BSSID 2
945
946/* Scan Entry to hold active BSS idx's */
947struct wcn36xx_hal_scan_entry {
948 u8 bss_index[WCN36XX_HAL_NUM_BSSID];
949 u8 active_bss_count;
950};
951
952struct wcn36xx_hal_init_scan_req_msg {
953 struct wcn36xx_hal_msg_header header;
954
955 /* LEARN - AP Role
956 SCAN - STA Role */
957 enum wcn36xx_hal_sys_mode mode;
958
959 /* BSSID of the BSS */
960 u8 bssid[ETH_ALEN];
961
962 /* Whether BSS needs to be notified */
963 u8 notify;
964
965 /* Kind of frame to be used for notifying the BSS (Data Null, QoS
966 * Null, or CTS to Self). Must always be a valid frame type. */
967 u8 frame_type;
968
969 /* UMAC has the option of passing the MAC frame to be used for
970 * notifying the BSS. If non-zero, HAL will use the MAC frame
971 * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
972 * appropriate MAC frame based on frameType. */
973 u8 frame_len;
974
975 /* Following the framelength there is a MAC frame buffer if
976 * frameLength is non-zero. */
977 struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
978
979 /* Entry to hold number of active BSS idx's */
980 struct wcn36xx_hal_scan_entry scan_entry;
981};
982
983struct wcn36xx_hal_init_scan_con_req_msg {
984 struct wcn36xx_hal_msg_header header;
985
986 /* LEARN - AP Role
987 SCAN - STA Role */
988 enum wcn36xx_hal_sys_mode mode;
989
990 /* BSSID of the BSS */
991 u8 bssid[ETH_ALEN];
992
993 /* Whether BSS needs to be notified */
994 u8 notify;
995
996 /* Kind of frame to be used for notifying the BSS (Data Null, QoS
997 * Null, or CTS to Self). Must always be a valid frame type. */
998 u8 frame_type;
999
1000 /* UMAC has the option of passing the MAC frame to be used for
1001 * notifying the BSS. If non-zero, HAL will use the MAC frame
1002 * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
1003 * appropriate MAC frame based on frameType. */
1004 u8 frame_length;
1005
1006 /* Following the framelength there is a MAC frame buffer if
1007 * frameLength is non-zero. */
1008 struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
1009
1010 /* Entry to hold number of active BSS idx's */
1011 struct wcn36xx_hal_scan_entry scan_entry;
1012
1013 /* Single NoA usage in Scanning */
1014 u8 use_noa;
1015
1016 /* Indicates the scan duration (in ms) */
1017 u16 scan_duration;
1018
1019};
1020
1021struct wcn36xx_hal_init_scan_rsp_msg {
1022 struct wcn36xx_hal_msg_header header;
1023
1024 /* success or failure */
1025 u32 status;
1026
1027} __packed;
1028
1029struct wcn36xx_hal_start_scan_req_msg {
1030 struct wcn36xx_hal_msg_header header;
1031
1032 /* Indicates the channel to scan */
1033 u8 scan_channel;
1034} __packed;
1035
1036struct wcn36xx_hal_start_rsp_msg {
1037 struct wcn36xx_hal_msg_header header;
1038
1039 /* success or failure */
1040 u32 status;
1041
1042 u32 start_tsf[2];
1043 u8 tx_mgmt_power;
1044
1045} __packed;
1046
1047struct wcn36xx_hal_end_scan_req_msg {
1048 struct wcn36xx_hal_msg_header header;
1049
1050 /* Indicates the channel to stop scanning. Not used really. But
1051 * retained for symmetry with "start Scan" message. It can also
1052 * help in error check if needed. */
1053 u8 scan_channel;
1054} __packed;
1055
1056struct wcn36xx_hal_end_scan_rsp_msg {
1057 struct wcn36xx_hal_msg_header header;
1058
1059 /* success or failure */
1060 u32 status;
1061} __packed;
1062
1063struct wcn36xx_hal_finish_scan_req_msg {
1064 struct wcn36xx_hal_msg_header header;
1065
1066 /* Identifies the operational state of the AP/STA
1067 * LEARN - AP Role SCAN - STA Role */
1068 enum wcn36xx_hal_sys_mode mode;
1069
1070 /* Operating channel to tune to. */
1071 u8 oper_channel;
1072
1073 /* Channel Bonding state If 20/40 MHz is operational, this will
1074 * indicate the 40 MHz extension channel in combination with the
1075 * control channel */
1076 enum phy_chan_bond_state cb_state;
1077
1078 /* BSSID of the BSS */
1079 u8 bssid[ETH_ALEN];
1080
1081 /* Whether BSS needs to be notified */
1082 u8 notify;
1083
1084 /* Kind of frame to be used for notifying the BSS (Data Null, QoS
1085 * Null, or CTS to Self). Must always be a valid frame type. */
1086 u8 frame_type;
1087
1088 /* UMAC has the option of passing the MAC frame to be used for
1089 * notifying the BSS. If non-zero, HAL will use the MAC frame
1090 * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
1091 * appropriate MAC frame based on frameType. */
1092 u8 frame_length;
1093
1094 /* Following the framelength there is a MAC frame buffer if
1095 * frameLength is non-zero. */
1096 struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
1097
1098 /* Entry to hold number of active BSS idx's */
1099 struct wcn36xx_hal_scan_entry scan_entry;
1100
1101} __packed;
1102
1103struct wcn36xx_hal_finish_scan_rsp_msg {
1104 struct wcn36xx_hal_msg_header header;
1105
1106 /* success or failure */
1107 u32 status;
1108
1109} __packed;
1110
1111enum wcn36xx_hal_rate_index {
1112 HW_RATE_INDEX_1MBPS = 0x82,
1113 HW_RATE_INDEX_2MBPS = 0x84,
1114 HW_RATE_INDEX_5_5MBPS = 0x8B,
1115 HW_RATE_INDEX_6MBPS = 0x0C,
1116 HW_RATE_INDEX_9MBPS = 0x12,
1117 HW_RATE_INDEX_11MBPS = 0x96,
1118 HW_RATE_INDEX_12MBPS = 0x18,
1119 HW_RATE_INDEX_18MBPS = 0x24,
1120 HW_RATE_INDEX_24MBPS = 0x30,
1121 HW_RATE_INDEX_36MBPS = 0x48,
1122 HW_RATE_INDEX_48MBPS = 0x60,
1123 HW_RATE_INDEX_54MBPS = 0x6C
1124};
1125
1126struct wcn36xx_hal_supported_rates {
1127 /*
1128 * For Self STA Entry: this represents Self Mode.
1129 * For Peer Stations, this represents the mode of the peer.
1130 * On Station:
1131 *
1132 * --this mode is updated when PE adds the Self Entry.
1133 *
1134 * -- OR when PE sends 'ADD_BSS' message and station context in BSS
1135 * is used to indicate the mode of the AP.
1136 *
1137 * ON AP:
1138 *
1139 * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
1140 * for that BSS is used to indicate the self mode of the AP.
1141 *
1142 * -- OR when a station is associated, PE sends 'ADD_STA' message
1143 * with this mode updated.
1144 */
1145
1146 enum sta_rate_mode op_rate_mode;
1147
1148 /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
1149 * unit of 500Kbps */
1150 u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
1151 u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
1152 u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
1153 u16 reserved;
1154
1155 /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
1156 * supported) First 26 bits are reserved for those Titan rates and
1157 * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
1158 * reserved. */
1159 /* Titan and Taurus Rates */
1160 u32 enhanced_rate_bitmap;
1161
1162 /*
1163 * 0-76 bits used, remaining reserved
1164 * bits 0-15 and 32 should be set.
1165 */
1166 u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
1167
1168 /*
1169 * RX Highest Supported Data Rate defines the highest data
1170 * rate that the STA is able to receive, in unites of 1Mbps.
1171 * This value is derived from "Supported MCS Set field" inside
1172 * the HT capability element.
1173 */
1174 u16 rx_highest_data_rate;
1175
1176} __packed;
1177
1178struct wcn36xx_hal_config_sta_params {
1179 /* BSSID of STA */
1180 u8 bssid[ETH_ALEN];
1181
1182 /* ASSOC ID, as assigned by UMAC */
1183 u16 aid;
1184
1185 /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
1186 u8 type;
1187
1188 /* Short Preamble Supported. */
1189 u8 short_preamble_supported;
1190
1191 /* MAC Address of STA */
1192 u8 mac[ETH_ALEN];
1193
1194 /* Listen interval of the STA */
1195 u16 listen_interval;
1196
1197 /* Support for 11e/WMM */
1198 u8 wmm_enabled;
1199
1200 /* 11n HT capable STA */
1201 u8 ht_capable;
1202
1203 /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
1204 u8 tx_channel_width_set;
1205
1206 /* RIFS mode 0 - NA, 1 - Allowed */
1207 u8 rifs_mode;
1208
1209 /* L-SIG TXOP Protection mechanism
1210 0 - No Support, 1 - Supported
1211 SG - there is global field */
1212 u8 lsig_txop_protection;
1213
1214 /* Max Ampdu Size supported by STA. TPE programming.
1215 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
1216 u8 max_ampdu_size;
1217
1218 /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
1219 u8 max_ampdu_density;
1220
1221 /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
1222 u8 max_amsdu_size;
1223
1224 /* Short GI support for 40Mhz packets */
1225 u8 sgi_40mhz;
1226
1227 /* Short GI support for 20Mhz packets */
1228 u8 sgi_20Mhz;
1229
1230 /* TODO move this parameter to the end for 3680 */
1231 /* These rates are the intersection of peer and self capabilities. */
1232 struct wcn36xx_hal_supported_rates supported_rates;
1233
1234 /* Robust Management Frame (RMF) enabled/disabled */
1235 u8 rmf;
1236
1237 /* The unicast encryption type in the association */
1238 u32 encrypt_type;
1239
1240 /* HAL should update the existing STA entry, if this flag is set. UMAC
1241 will set this flag in case of RE-ASSOC, where we want to reuse the
1242 old STA ID. 0 = Add, 1 = Update */
1243 u8 action;
1244
1245 /* U-APSD Flags: 1b per AC. Encoded as follows:
1246 b7 b6 b5 b4 b3 b2 b1 b0 =
1247 X X X X BE BK VI VO */
1248 u8 uapsd;
1249
1250 /* Max SP Length */
1251 u8 max_sp_len;
1252
1253 /* 11n Green Field preamble support
1254 0 - Not supported, 1 - Supported */
1255 u8 green_field_capable;
1256
1257 /* MIMO Power Save mode */
1258 enum wcn36xx_hal_ht_mimo_state mimo_ps;
1259
1260 /* Delayed BA Support */
1261 u8 delayed_ba_support;
1262
1263 /* Max AMPDU duration in 32us */
1264 u8 max_ampdu_duration;
1265
1266 /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
1267 * set it to 0 if AP does not support it. This indication is sent
1268 * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
1269 * rates. */
1270 u8 dsss_cck_mode_40mhz;
1271
1272 /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
1273 * Retained for backward compalibity with existing HAL code */
1274 u8 sta_index;
1275
1276 /* BSSID of BSS to which station is associated. Set to 0xFF when
1277 * invalid. Retained for backward compalibity with existing HAL
1278 * code */
1279 u8 bssid_index;
1280
1281 u8 p2p;
1282
1283 /* TODO add this parameter for 3680. */
1284 /* Reserved to align next field on a dword boundary */
1285 /* u8 reserved; */
1286} __packed;
1287
1288struct wcn36xx_hal_config_sta_req_msg {
1289 struct wcn36xx_hal_msg_header header;
1290 struct wcn36xx_hal_config_sta_params sta_params;
1291} __packed;
1292
1293struct wcn36xx_hal_config_sta_params_v1 {
1294 /* BSSID of STA */
1295 u8 bssid[ETH_ALEN];
1296
1297 /* ASSOC ID, as assigned by UMAC */
1298 u16 aid;
1299
1300 /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
1301 u8 type;
1302
1303 /* Short Preamble Supported. */
1304 u8 short_preamble_supported;
1305
1306 /* MAC Address of STA */
1307 u8 mac[ETH_ALEN];
1308
1309 /* Listen interval of the STA */
1310 u16 listen_interval;
1311
1312 /* Support for 11e/WMM */
1313 u8 wmm_enabled;
1314
1315 /* 11n HT capable STA */
1316 u8 ht_capable;
1317
1318 /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
1319 u8 tx_channel_width_set;
1320
1321 /* RIFS mode 0 - NA, 1 - Allowed */
1322 u8 rifs_mode;
1323
1324 /* L-SIG TXOP Protection mechanism
1325 0 - No Support, 1 - Supported
1326 SG - there is global field */
1327 u8 lsig_txop_protection;
1328
1329 /* Max Ampdu Size supported by STA. TPE programming.
1330 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
1331 u8 max_ampdu_size;
1332
1333 /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
1334 u8 max_ampdu_density;
1335
1336 /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
1337 u8 max_amsdu_size;
1338
1339 /* Short GI support for 40Mhz packets */
1340 u8 sgi_40mhz;
1341
1342 /* Short GI support for 20Mhz packets */
1343 u8 sgi_20Mhz;
1344
1345 /* Robust Management Frame (RMF) enabled/disabled */
1346 u8 rmf;
1347
1348 /* The unicast encryption type in the association */
1349 u32 encrypt_type;
1350
1351 /* HAL should update the existing STA entry, if this flag is set. UMAC
1352 will set this flag in case of RE-ASSOC, where we want to reuse the
1353 old STA ID. 0 = Add, 1 = Update */
1354 u8 action;
1355
1356 /* U-APSD Flags: 1b per AC. Encoded as follows:
1357 b7 b6 b5 b4 b3 b2 b1 b0 =
1358 X X X X BE BK VI VO */
1359 u8 uapsd;
1360
1361 /* Max SP Length */
1362 u8 max_sp_len;
1363
1364 /* 11n Green Field preamble support
1365 0 - Not supported, 1 - Supported */
1366 u8 green_field_capable;
1367
1368 /* MIMO Power Save mode */
1369 enum wcn36xx_hal_ht_mimo_state mimo_ps;
1370
1371 /* Delayed BA Support */
1372 u8 delayed_ba_support;
1373
1374 /* Max AMPDU duration in 32us */
1375 u8 max_ampdu_duration;
1376
1377 /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
1378 * set it to 0 if AP does not support it. This indication is sent
1379 * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
1380 * rates. */
1381 u8 dsss_cck_mode_40mhz;
1382
1383 /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
1384 * Retained for backward compalibity with existing HAL code */
1385 u8 sta_index;
1386
1387 /* BSSID of BSS to which station is associated. Set to 0xFF when
1388 * invalid. Retained for backward compalibity with existing HAL
1389 * code */
1390 u8 bssid_index;
1391
1392 u8 p2p;
1393
1394 /* Reserved to align next field on a dword boundary */
1395 u8 reserved;
1396
1397 /* These rates are the intersection of peer and self capabilities. */
1398 struct wcn36xx_hal_supported_rates supported_rates;
1399} __packed;
1400
1401struct wcn36xx_hal_config_sta_req_msg_v1 {
1402 struct wcn36xx_hal_msg_header header;
1403 struct wcn36xx_hal_config_sta_params_v1 sta_params;
1404} __packed;
1405
1406struct config_sta_rsp_params {
1407 /* success or failure */
1408 u32 status;
1409
1410 /* Station index; valid only when 'status' field value SUCCESS */
1411 u8 sta_index;
1412
1413 /* BSSID Index of BSS to which the station is associated */
1414 u8 bssid_index;
1415
1416 /* DPU Index for PTK */
1417 u8 dpu_index;
1418
1419 /* DPU Index for GTK */
1420 u8 bcast_dpu_index;
1421
1422 /* DPU Index for IGTK */
1423 u8 bcast_mgmt_dpu_idx;
1424
1425 /* PTK DPU signature */
1426 u8 uc_ucast_sig;
1427
1428 /* GTK DPU isignature */
1429 u8 uc_bcast_sig;
1430
1431 /* IGTK DPU signature */
1432 u8 uc_mgmt_sig;
1433
1434 u8 p2p;
1435
1436} __packed;
1437
1438struct wcn36xx_hal_config_sta_rsp_msg {
1439 struct wcn36xx_hal_msg_header header;
1440
1441 struct config_sta_rsp_params params;
1442} __packed;
1443
1444/* Delete STA Request message */
1445struct wcn36xx_hal_delete_sta_req_msg {
1446 struct wcn36xx_hal_msg_header header;
1447
1448 /* Index of STA to delete */
1449 u8 sta_index;
1450
1451} __packed;
1452
1453/* Delete STA Response message */
1454struct wcn36xx_hal_delete_sta_rsp_msg {
1455 struct wcn36xx_hal_msg_header header;
1456
1457 /* success or failure */
1458 u32 status;
1459
1460 /* Index of STA deleted */
1461 u8 sta_id;
1462} __packed;
1463
1464/* 12 Bytes long because this structure can be used to represent rate and
1465 * extended rate set IEs. The parser assume this to be at least 12 */
1466struct wcn36xx_hal_rate_set {
1467 u8 num_rates;
1468 u8 rate[WCN36XX_HAL_MAC_RATESET_EID_MAX];
1469} __packed;
1470
1471/* access category record */
1472struct wcn36xx_hal_aci_aifsn {
1473#ifndef ANI_LITTLE_BIT_ENDIAN
1474 u8 rsvd:1;
1475 u8 aci:2;
1476 u8 acm:1;
1477 u8 aifsn:4;
1478#else
1479 u8 aifsn:4;
1480 u8 acm:1;
1481 u8 aci:2;
1482 u8 rsvd:1;
1483#endif
1484} __packed;
1485
1486/* contention window size */
1487struct wcn36xx_hal_mac_cw {
1488#ifndef ANI_LITTLE_BIT_ENDIAN
1489 u8 max:4;
1490 u8 min:4;
1491#else
1492 u8 min:4;
1493 u8 max:4;
1494#endif
1495} __packed;
1496
1497struct wcn36xx_hal_edca_param_record {
1498 struct wcn36xx_hal_aci_aifsn aci;
1499 struct wcn36xx_hal_mac_cw cw;
1500 u16 txop_limit;
1501} __packed;
1502
1503struct wcn36xx_hal_mac_ssid {
1504 u8 length;
1505 u8 ssid[32];
1506} __packed;
1507
1508/* Concurrency role. These are generic IDs that identify the various roles
1509 * in the software system. */
1510enum wcn36xx_hal_con_mode {
1511 WCN36XX_HAL_STA_MODE = 0,
1512
1513 /* to support softAp mode . This is misleading.
1514 It means AP MODE only. */
1515 WCN36XX_HAL_STA_SAP_MODE = 1,
1516
1517 WCN36XX_HAL_P2P_CLIENT_MODE,
1518 WCN36XX_HAL_P2P_GO_MODE,
1519 WCN36XX_HAL_MONITOR_MODE,
1520};
1521
1522/* This is a bit pattern to be set for each mode
1523 * bit 0 - sta mode
1524 * bit 1 - ap mode
1525 * bit 2 - p2p client mode
1526 * bit 3 - p2p go mode */
1527enum wcn36xx_hal_concurrency_mode {
1528 HAL_STA = 1,
1529 HAL_SAP = 2,
1530
1531 /* to support sta, softAp mode . This means STA+AP mode */
1532 HAL_STA_SAP = 3,
1533
1534 HAL_P2P_CLIENT = 4,
1535 HAL_P2P_GO = 8,
1536 HAL_MAX_CONCURRENCY_PERSONA = 4
1537};
1538
1539struct wcn36xx_hal_config_bss_params {
1540 /* BSSID */
1541 u8 bssid[ETH_ALEN];
1542
1543 /* Self Mac Address */
1544 u8 self_mac_addr[ETH_ALEN];
1545
1546 /* BSS type */
1547 enum wcn36xx_hal_bss_type bss_type;
1548
1549 /* Operational Mode: AP =0, STA = 1 */
1550 u8 oper_mode;
1551
1552 /* Network Type */
1553 enum wcn36xx_hal_nw_type nw_type;
1554
1555 /* Used to classify PURE_11G/11G_MIXED to program MTU */
1556 u8 short_slot_time_supported;
1557
1558 /* Co-exist with 11a STA */
1559 u8 lla_coexist;
1560
1561 /* Co-exist with 11b STA */
1562 u8 llb_coexist;
1563
1564 /* Co-exist with 11g STA */
1565 u8 llg_coexist;
1566
1567 /* Coexistence with 11n STA */
1568 u8 ht20_coexist;
1569
1570 /* Non GF coexist flag */
1571 u8 lln_non_gf_coexist;
1572
1573 /* TXOP protection support */
1574 u8 lsig_tx_op_protection_full_support;
1575
1576 /* RIFS mode */
1577 u8 rifs_mode;
1578
1579 /* Beacon Interval in TU */
1580 u16 beacon_interval;
1581
1582 /* DTIM period */
1583 u8 dtim_period;
1584
1585 /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
1586 u8 tx_channel_width_set;
1587
1588 /* Operating channel */
1589 u8 oper_channel;
1590
1591 /* Extension channel for channel bonding */
1592 u8 ext_channel;
1593
1594 /* Reserved to align next field on a dword boundary */
1595 u8 reserved;
1596
1597 /* TODO move sta to the end for 3680 */
1598 /* Context of the station being added in HW
1599 * Add a STA entry for "itself" -
1600 *
1601 * On AP - Add the AP itself in an "STA context"
1602 *
1603 * On STA - Add the AP to which this STA is joining in an
1604 * "STA context"
1605 */
1606 struct wcn36xx_hal_config_sta_params sta;
1607 /* SSID of the BSS */
1608 struct wcn36xx_hal_mac_ssid ssid;
1609
1610 /* HAL should update the existing BSS entry, if this flag is set.
1611 * UMAC will set this flag in case of reassoc, where we want to
1612 * resue the the old BSSID and still return success 0 = Add, 1 =
1613 * Update */
1614 u8 action;
1615
1616 /* MAC Rate Set */
1617 struct wcn36xx_hal_rate_set rateset;
1618
1619 /* Enable/Disable HT capabilities of the BSS */
1620 u8 ht;
1621
1622 /* Enable/Disable OBSS protection */
1623 u8 obss_prot_enabled;
1624
1625 /* RMF enabled/disabled */
1626 u8 rmf;
1627
1628 /* HT Operating Mode operating mode of the 802.11n STA */
1629 enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
1630
1631 /* Dual CTS Protection: 0 - Unused, 1 - Used */
1632 u8 dual_cts_protection;
1633
1634 /* Probe Response Max retries */
1635 u8 max_probe_resp_retry_limit;
1636
1637 /* To Enable Hidden ssid */
1638 u8 hidden_ssid;
1639
1640 /* To Enable Disable FW Proxy Probe Resp */
1641 u8 proxy_probe_resp;
1642
1643 /* Boolean to indicate if EDCA params are valid. UMAC might not
1644 * have valid EDCA params or might not desire to apply EDCA params
1645 * during config BSS. 0 implies Not Valid ; Non-Zero implies
1646 * valid */
1647 u8 edca_params_valid;
1648
1649 /* EDCA Parameters for Best Effort Access Category */
1650 struct wcn36xx_hal_edca_param_record acbe;
1651
1652 /* EDCA Parameters forBackground Access Category */
1653 struct wcn36xx_hal_edca_param_record acbk;
1654
1655 /* EDCA Parameters for Video Access Category */
1656 struct wcn36xx_hal_edca_param_record acvi;
1657
1658 /* EDCA Parameters for Voice Access Category */
1659 struct wcn36xx_hal_edca_param_record acvo;
1660
1661 /* Ext Bss Config Msg if set */
1662 u8 ext_set_sta_key_param_valid;
1663
1664 /* SetStaKeyParams for ext bss msg */
1665 struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
1666
1667 /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
1668 * wcn36xx_hal_con_mode */
1669 u8 wcn36xx_hal_persona;
1670
1671 u8 spectrum_mgt_enable;
1672
1673 /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
1674 s8 tx_mgmt_power;
1675
1676 /* maxTxPower has max power to be used after applying the power
1677 * constraint if any */
1678 s8 max_tx_power;
1679} __packed;
1680
1681struct wcn36xx_hal_config_bss_req_msg {
1682 struct wcn36xx_hal_msg_header header;
1683 struct wcn36xx_hal_config_bss_params bss_params;
1684} __packed;
1685
1686struct wcn36xx_hal_config_bss_params_v1 {
1687 /* BSSID */
1688 u8 bssid[ETH_ALEN];
1689
1690 /* Self Mac Address */
1691 u8 self_mac_addr[ETH_ALEN];
1692
1693 /* BSS type */
1694 enum wcn36xx_hal_bss_type bss_type;
1695
1696 /* Operational Mode: AP =0, STA = 1 */
1697 u8 oper_mode;
1698
1699 /* Network Type */
1700 enum wcn36xx_hal_nw_type nw_type;
1701
1702 /* Used to classify PURE_11G/11G_MIXED to program MTU */
1703 u8 short_slot_time_supported;
1704
1705 /* Co-exist with 11a STA */
1706 u8 lla_coexist;
1707
1708 /* Co-exist with 11b STA */
1709 u8 llb_coexist;
1710
1711 /* Co-exist with 11g STA */
1712 u8 llg_coexist;
1713
1714 /* Coexistence with 11n STA */
1715 u8 ht20_coexist;
1716
1717 /* Non GF coexist flag */
1718 u8 lln_non_gf_coexist;
1719
1720 /* TXOP protection support */
1721 u8 lsig_tx_op_protection_full_support;
1722
1723 /* RIFS mode */
1724 u8 rifs_mode;
1725
1726 /* Beacon Interval in TU */
1727 u16 beacon_interval;
1728
1729 /* DTIM period */
1730 u8 dtim_period;
1731
1732 /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
1733 u8 tx_channel_width_set;
1734
1735 /* Operating channel */
1736 u8 oper_channel;
1737
1738 /* Extension channel for channel bonding */
1739 u8 ext_channel;
1740
1741 /* Reserved to align next field on a dword boundary */
1742 u8 reserved;
1743
1744 /* SSID of the BSS */
1745 struct wcn36xx_hal_mac_ssid ssid;
1746
1747 /* HAL should update the existing BSS entry, if this flag is set.
1748 * UMAC will set this flag in case of reassoc, where we want to
1749 * resue the the old BSSID and still return success 0 = Add, 1 =
1750 * Update */
1751 u8 action;
1752
1753 /* MAC Rate Set */
1754 struct wcn36xx_hal_rate_set rateset;
1755
1756 /* Enable/Disable HT capabilities of the BSS */
1757 u8 ht;
1758
1759 /* Enable/Disable OBSS protection */
1760 u8 obss_prot_enabled;
1761
1762 /* RMF enabled/disabled */
1763 u8 rmf;
1764
1765 /* HT Operating Mode operating mode of the 802.11n STA */
1766 enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
1767
1768 /* Dual CTS Protection: 0 - Unused, 1 - Used */
1769 u8 dual_cts_protection;
1770
1771 /* Probe Response Max retries */
1772 u8 max_probe_resp_retry_limit;
1773
1774 /* To Enable Hidden ssid */
1775 u8 hidden_ssid;
1776
1777 /* To Enable Disable FW Proxy Probe Resp */
1778 u8 proxy_probe_resp;
1779
1780 /* Boolean to indicate if EDCA params are valid. UMAC might not
1781 * have valid EDCA params or might not desire to apply EDCA params
1782 * during config BSS. 0 implies Not Valid ; Non-Zero implies
1783 * valid */
1784 u8 edca_params_valid;
1785
1786 /* EDCA Parameters for Best Effort Access Category */
1787 struct wcn36xx_hal_edca_param_record acbe;
1788
1789 /* EDCA Parameters forBackground Access Category */
1790 struct wcn36xx_hal_edca_param_record acbk;
1791
1792 /* EDCA Parameters for Video Access Category */
1793 struct wcn36xx_hal_edca_param_record acvi;
1794
1795 /* EDCA Parameters for Voice Access Category */
1796 struct wcn36xx_hal_edca_param_record acvo;
1797
1798 /* Ext Bss Config Msg if set */
1799 u8 ext_set_sta_key_param_valid;
1800
1801 /* SetStaKeyParams for ext bss msg */
1802 struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
1803
1804 /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
1805 * wcn36xx_hal_con_mode */
1806 u8 wcn36xx_hal_persona;
1807
1808 u8 spectrum_mgt_enable;
1809
1810 /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
1811 s8 tx_mgmt_power;
1812
1813 /* maxTxPower has max power to be used after applying the power
1814 * constraint if any */
1815 s8 max_tx_power;
1816
1817 /* Context of the station being added in HW
1818 * Add a STA entry for "itself" -
1819 *
1820 * On AP - Add the AP itself in an "STA context"
1821 *
1822 * On STA - Add the AP to which this STA is joining in an
1823 * "STA context"
1824 */
1825 struct wcn36xx_hal_config_sta_params_v1 sta;
1826} __packed;
1827
1828struct wcn36xx_hal_config_bss_req_msg_v1 {
1829 struct wcn36xx_hal_msg_header header;
1830 struct wcn36xx_hal_config_bss_params_v1 bss_params;
1831} __packed;
1832
1833struct wcn36xx_hal_config_bss_rsp_params {
1834 /* Success or Failure */
1835 u32 status;
1836
1837 /* BSS index allocated by HAL */
1838 u8 bss_index;
1839
1840 /* DPU descriptor index for PTK */
1841 u8 dpu_desc_index;
1842
1843 /* PTK DPU signature */
1844 u8 ucast_dpu_signature;
1845
1846 /* DPU descriptor index for GTK */
1847 u8 bcast_dpu_desc_indx;
1848
1849 /* GTK DPU signature */
1850 u8 bcast_dpu_signature;
1851
1852 /* DPU descriptor for IGTK */
1853 u8 mgmt_dpu_desc_index;
1854
1855 /* IGTK DPU signature */
1856 u8 mgmt_dpu_signature;
1857
1858 /* Station Index for BSS entry */
1859 u8 bss_sta_index;
1860
1861 /* Self station index for this BSS */
1862 u8 bss_self_sta_index;
1863
1864 /* Bcast station for buffering bcast frames in AP role */
1865 u8 bss_bcast_sta_idx;
1866
1867 /* MAC Address of STA(PEER/SELF) in staContext of configBSSReq */
1868 u8 mac[ETH_ALEN];
1869
1870 /* HAL fills in the tx power used for mgmt frames in this field. */
1871 s8 tx_mgmt_power;
1872
1873} __packed;
1874
1875struct wcn36xx_hal_config_bss_rsp_msg {
1876 struct wcn36xx_hal_msg_header header;
1877 struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
1878} __packed;
1879
1880struct wcn36xx_hal_delete_bss_req_msg {
1881 struct wcn36xx_hal_msg_header header;
1882
1883 /* BSS index to be deleted */
1884 u8 bss_index;
1885
1886} __packed;
1887
1888struct wcn36xx_hal_delete_bss_rsp_msg {
1889 struct wcn36xx_hal_msg_header header;
1890
1891 /* Success or Failure */
1892 u32 status;
1893
1894 /* BSS index that has been deleted */
1895 u8 bss_index;
1896
1897} __packed;
1898
1899struct wcn36xx_hal_join_req_msg {
1900 struct wcn36xx_hal_msg_header header;
1901
1902 /* Indicates the BSSID to which STA is going to associate */
1903 u8 bssid[ETH_ALEN];
1904
1905 /* Indicates the channel to switch to. */
1906 u8 channel;
1907
1908 /* Self STA MAC */
1909 u8 self_sta_mac_addr[ETH_ALEN];
1910
1911 /* Local power constraint */
1912 u8 local_power_constraint;
1913
1914 /* Secondary channel offset */
1915 enum phy_chan_bond_state secondary_channel_offset;
1916
1917 /* link State */
1918 enum wcn36xx_hal_link_state link_state;
1919
1920 /* Max TX power */
1921 s8 max_tx_power;
1922} __packed;
1923
1924struct wcn36xx_hal_join_rsp_msg {
1925 struct wcn36xx_hal_msg_header header;
1926
1927 /* success or failure */
1928 u32 status;
1929
1930 /* HAL fills in the tx power used for mgmt frames in this field */
1931 u8 tx_mgmt_power;
1932} __packed;
1933
1934struct post_assoc_req_msg {
1935 struct wcn36xx_hal_msg_header header;
1936
1937 struct wcn36xx_hal_config_sta_params sta_params;
1938 struct wcn36xx_hal_config_bss_params bss_params;
1939};
1940
1941struct post_assoc_rsp_msg {
1942 struct wcn36xx_hal_msg_header header;
1943 struct config_sta_rsp_params sta_rsp_params;
1944 struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
1945};
1946
1947/* This is used to create a set of WEP keys for a given BSS. */
1948struct wcn36xx_hal_set_bss_key_req_msg {
1949 struct wcn36xx_hal_msg_header header;
1950
1951 /* BSS Index of the BSS */
1952 u8 bss_idx;
1953
1954 /* Encryption Type used with peer */
1955 enum ani_ed_type enc_type;
1956
1957 /* Number of keys */
1958 u8 num_keys;
1959
1960 /* Array of keys. */
1961 struct wcn36xx_hal_keys keys[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
1962
1963 /* Control for Replay Count, 1= Single TID based replay count on Tx
1964 * 0 = Per TID based replay count on TX */
1965 u8 single_tid_rc;
1966} __packed;
1967
1968/* tagged version of set bss key */
1969struct wcn36xx_hal_set_bss_key_req_msg_tagged {
1970 struct wcn36xx_hal_set_bss_key_req_msg Msg;
1971 u32 tag;
1972} __packed;
1973
1974struct wcn36xx_hal_set_bss_key_rsp_msg {
1975 struct wcn36xx_hal_msg_header header;
1976
1977 /* success or failure */
1978 u32 status;
1979} __packed;
1980
1981/*
1982 * This is used configure the key information on a given station.
1983 * When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
1984 * a preconfigured key from a BSS the station assoicated with; otherwise
1985 * a new key descriptor is created based on the key field.
1986 */
1987struct wcn36xx_hal_set_sta_key_req_msg {
1988 struct wcn36xx_hal_msg_header header;
1989 struct wcn36xx_hal_set_sta_key_params set_sta_key_params;
1990} __packed;
1991
1992struct wcn36xx_hal_set_sta_key_rsp_msg {
1993 struct wcn36xx_hal_msg_header header;
1994
1995 /* success or failure */
1996 u32 status;
1997} __packed;
1998
1999struct wcn36xx_hal_remove_bss_key_req_msg {
2000 struct wcn36xx_hal_msg_header header;
2001
2002 /* BSS Index of the BSS */
2003 u8 bss_idx;
2004
2005 /* Encryption Type used with peer */
2006 enum ani_ed_type enc_type;
2007
2008 /* Key Id */
2009 u8 key_id;
2010
2011 /* STATIC/DYNAMIC. Used in Nullifying in Key Descriptors for
2012 * Static/Dynamic keys */
2013 enum ani_wep_type wep_type;
2014} __packed;
2015
2016struct wcn36xx_hal_remove_bss_key_rsp_msg {
2017 struct wcn36xx_hal_msg_header header;
2018
2019 /* success or failure */
2020 u32 status;
2021} __packed;
2022
2023/*
2024 * This is used by PE to Remove the key information on a given station.
2025 */
2026struct wcn36xx_hal_remove_sta_key_req_msg {
2027 struct wcn36xx_hal_msg_header header;
2028
2029 /* STA Index */
2030 u16 sta_idx;
2031
2032 /* Encryption Type used with peer */
2033 enum ani_ed_type enc_type;
2034
2035 /* Key Id */
2036 u8 key_id;
2037
2038 /* Whether to invalidate the Broadcast key or Unicast key. In case
2039 * of WEP, the same key is used for both broadcast and unicast. */
2040 u8 unicast;
2041
2042} __packed;
2043
2044struct wcn36xx_hal_remove_sta_key_rsp_msg {
2045 struct wcn36xx_hal_msg_header header;
2046
2047 /*success or failure */
2048 u32 status;
2049
2050} __packed;
2051
2052#ifdef FEATURE_OEM_DATA_SUPPORT
2053
2054#ifndef OEM_DATA_REQ_SIZE
2055#define OEM_DATA_REQ_SIZE 134
2056#endif
2057
2058#ifndef OEM_DATA_RSP_SIZE
2059#define OEM_DATA_RSP_SIZE 1968
2060#endif
2061
2062struct start_oem_data_req_msg {
2063 struct wcn36xx_hal_msg_header header;
2064
2065 u32 status;
2066 tSirMacAddr self_mac_addr;
2067 u8 oem_data_req[OEM_DATA_REQ_SIZE];
2068
2069};
2070
2071struct start_oem_data_rsp_msg {
2072 struct wcn36xx_hal_msg_header header;
2073
2074 u8 oem_data_rsp[OEM_DATA_RSP_SIZE];
2075};
2076
2077#endif
2078
2079struct wcn36xx_hal_switch_channel_req_msg {
2080 struct wcn36xx_hal_msg_header header;
2081
2082 /* Channel number */
2083 u8 channel_number;
2084
2085 /* Local power constraint */
2086 u8 local_power_constraint;
2087
2088 /* Secondary channel offset */
2089 enum phy_chan_bond_state secondary_channel_offset;
2090
2091 /* HAL fills in the tx power used for mgmt frames in this field. */
2092 u8 tx_mgmt_power;
2093
2094 /* Max TX power */
2095 u8 max_tx_power;
2096
2097 /* Self STA MAC */
2098 u8 self_sta_mac_addr[ETH_ALEN];
2099
2100 /* VO WIFI comment: BSSID needed to identify session. As the
2101 * request has power constraints, this should be applied only to
2102 * that session Since MTU timing and EDCA are sessionized, this
2103 * struct needs to be sessionized and bssid needs to be out of the
2104 * VOWifi feature flag V IMP: Keep bssId field at the end of this
2105 * msg. It is used to mantain backward compatbility by way of
2106 * ignoring if using new host/old FW or old host/new FW since it is
2107 * at the end of this struct
2108 */
2109 u8 bssid[ETH_ALEN];
2110} __packed;
2111
2112struct wcn36xx_hal_switch_channel_rsp_msg {
2113 struct wcn36xx_hal_msg_header header;
2114
2115 /* Status */
2116 u32 status;
2117
2118 /* Channel number - same as in request */
2119 u8 channel_number;
2120
2121 /* HAL fills in the tx power used for mgmt frames in this field */
2122 u8 tx_mgmt_power;
2123
2124 /* BSSID needed to identify session - same as in request */
2125 u8 bssid[ETH_ALEN];
2126
2127} __packed;
2128
2129struct update_edca_params_req_msg {
2130 struct wcn36xx_hal_msg_header header;
2131
2132 /*BSS Index */
2133 u16 bss_index;
2134
2135 /* Best Effort */
2136 struct wcn36xx_hal_edca_param_record acbe;
2137
2138 /* Background */
2139 struct wcn36xx_hal_edca_param_record acbk;
2140
2141 /* Video */
2142 struct wcn36xx_hal_edca_param_record acvi;
2143
2144 /* Voice */
2145 struct wcn36xx_hal_edca_param_record acvo;
2146};
2147
2148struct update_edca_params_rsp_msg {
2149 struct wcn36xx_hal_msg_header header;
2150
2151 /* success or failure */
2152 u32 status;
2153};
2154
2155struct dpu_stats_params {
2156 /* Index of STA to which the statistics */
2157 u16 sta_index;
2158
2159 /* Encryption mode */
2160 u8 enc_mode;
2161
2162 /* status */
2163 u32 status;
2164
2165 /* Statistics */
2166 u32 send_blocks;
2167 u32 recv_blocks;
2168 u32 replays;
2169 u8 mic_error_cnt;
2170 u32 prot_excl_cnt;
2171 u16 format_err_cnt;
2172 u16 un_decryptable_cnt;
2173 u32 decrypt_err_cnt;
2174 u32 decrypt_ok_cnt;
2175};
2176
2177struct wcn36xx_hal_stats_req_msg {
2178 struct wcn36xx_hal_msg_header header;
2179
2180 /* Valid STA Idx for per STA stats request */
2181 u32 sta_id;
2182
2183 /* Categories of stats requested as specified in eHalStatsMask */
2184 u32 stats_mask;
2185};
2186
2187struct ani_summary_stats_info {
2188 /* Total number of packets(per AC) that were successfully
2189 * transmitted with retries */
2190 u32 retry_cnt[4];
2191
2192 /* The number of MSDU packets and MMPDU frames per AC that the
2193 * 802.11 station successfully transmitted after more than one
2194 * retransmission attempt */
2195 u32 multiple_retry_cnt[4];
2196
2197 /* Total number of packets(per AC) that were successfully
2198 * transmitted (with and without retries, including multi-cast,
2199 * broadcast) */
2200 u32 tx_frm_cnt[4];
2201
2202 /* Total number of packets that were successfully received (after
2203 * appropriate filter rules including multi-cast, broadcast) */
2204 u32 rx_frm_cnt;
2205
2206 /* Total number of duplicate frames received successfully */
2207 u32 frm_dup_cnt;
2208
2209 /* Total number packets(per AC) failed to transmit */
2210 u32 fail_cnt[4];
2211
2212 /* Total number of RTS/CTS sequence failures for transmission of a
2213 * packet */
2214 u32 rts_fail_cnt;
2215
2216 /* Total number packets failed transmit because of no ACK from the
2217 * remote entity */
2218 u32 ack_fail_cnt;
2219
2220 /* Total number of RTS/CTS sequence success for transmission of a
2221 * packet */
2222 u32 rts_succ_cnt;
2223
2224 /* The sum of the receive error count and dropped-receive-buffer
2225 * error count. HAL will provide this as a sum of (FCS error) +
2226 * (Fail get BD/PDU in HW) */
2227 u32 rx_discard_cnt;
2228
2229 /*
2230 * The receive error count. HAL will provide the RxP FCS error
2231 * global counter. */
2232 u32 rx_error_cnt;
2233
2234 /* The sum of the transmit-directed byte count, transmit-multicast
2235 * byte count and transmit-broadcast byte count. HAL will sum TPE
2236 * UC/MC/BCAST global counters to provide this. */
2237 u32 tx_byte_cnt;
2238};
2239
2240/* defines tx_rate_flags */
2241enum tx_rate_info {
2242 /* Legacy rates */
2243 HAL_TX_RATE_LEGACY = 0x1,
2244
2245 /* HT20 rates */
2246 HAL_TX_RATE_HT20 = 0x2,
2247
2248 /* HT40 rates */
2249 HAL_TX_RATE_HT40 = 0x4,
2250
2251 /* Rate with Short guard interval */
2252 HAL_TX_RATE_SGI = 0x8,
2253
2254 /* Rate with Long guard interval */
2255 HAL_TX_RATE_LGI = 0x10
2256};
2257
2258struct ani_global_class_a_stats_info {
2259 /* The number of MPDU frames received by the 802.11 station for
2260 * MSDU packets or MMPDU frames */
2261 u32 rx_frag_cnt;
2262
2263 /* The number of MPDU frames received by the 802.11 station for
2264 * MSDU packets or MMPDU frames when a promiscuous packet filter
2265 * was enabled */
2266 u32 promiscuous_rx_frag_cnt;
2267
2268 /* The receiver input sensitivity referenced to a FER of 8% at an
2269 * MPDU length of 1024 bytes at the antenna connector. Each element
2270 * of the array shall correspond to a supported rate and the order
2271 * shall be the same as the supporteRates parameter. */
2272 u32 rx_input_sensitivity;
2273
2274 /* The maximum transmit power in dBm upto one decimal. for eg: if
2275 * it is 10.5dBm, the value would be 105 */
2276 u32 max_pwr;
2277
2278 /* Number of times the receiver failed to synchronize with the
2279 * incoming signal after detecting the sync in the preamble of the
2280 * transmitted PLCP protocol data unit. */
2281 u32 sync_fail_cnt;
2282
2283 /* Legacy transmit rate, in units of 500 kbit/sec, for the most
2284 * recently transmitted frame */
2285 u32 tx_rate;
2286
2287 /* mcs index for HT20 and HT40 rates */
2288 u32 mcs_index;
2289
2290 /* to differentiate between HT20 and HT40 rates; short and long
2291 * guard interval */
2292 u32 tx_rate_flags;
2293};
2294
2295struct ani_global_security_stats {
2296 /* The number of unencrypted received MPDU frames that the MAC
2297 * layer discarded when the IEEE 802.11 dot11ExcludeUnencrypted
2298 * management information base (MIB) object is enabled */
2299 u32 rx_wep_unencrypted_frm_cnt;
2300
2301 /* The number of received MSDU packets that that the 802.11 station
2302 * discarded because of MIC failures */
2303 u32 rx_mic_fail_cnt;
2304
2305 /* The number of encrypted MPDU frames that the 802.11 station
2306 * failed to decrypt because of a TKIP ICV error */
2307 u32 tkip_icv_err;
2308
2309 /* The number of received MPDU frames that the 802.11 discarded
2310 * because of an invalid AES-CCMP format */
2311 u32 aes_ccmp_format_err;
2312
2313 /* The number of received MPDU frames that the 802.11 station
2314 * discarded because of the AES-CCMP replay protection procedure */
2315 u32 aes_ccmp_replay_cnt;
2316
2317 /* The number of received MPDU frames that the 802.11 station
2318 * discarded because of errors detected by the AES-CCMP decryption
2319 * algorithm */
2320 u32 aes_ccmp_decrpt_err;
2321
2322 /* The number of encrypted MPDU frames received for which a WEP
2323 * decryption key was not available on the 802.11 station */
2324 u32 wep_undecryptable_cnt;
2325
2326 /* The number of encrypted MPDU frames that the 802.11 station
2327 * failed to decrypt because of a WEP ICV error */
2328 u32 wep_icv_err;
2329
2330 /* The number of received encrypted packets that the 802.11 station
2331 * successfully decrypted */
2332 u32 rx_decrypt_succ_cnt;
2333
2334 /* The number of encrypted packets that the 802.11 station failed
2335 * to decrypt */
2336 u32 rx_decrypt_fail_cnt;
2337};
2338
2339struct ani_global_class_b_stats_info {
2340 struct ani_global_security_stats uc_stats;
2341 struct ani_global_security_stats mc_bc_stats;
2342};
2343
2344struct ani_global_class_c_stats_info {
2345 /* This counter shall be incremented for a received A-MSDU frame
2346 * with the stations MAC address in the address 1 field or an
2347 * A-MSDU frame with a group address in the address 1 field */
2348 u32 rx_amsdu_cnt;
2349
2350 /* This counter shall be incremented when the MAC receives an AMPDU
2351 * from the PHY */
2352 u32 rx_ampdu_cnt;
2353
2354 /* This counter shall be incremented when a Frame is transmitted
2355 * only on the primary channel */
2356 u32 tx_20_frm_cnt;
2357
2358 /* This counter shall be incremented when a Frame is received only
2359 * on the primary channel */
2360 u32 rx_20_frm_cnt;
2361
2362 /* This counter shall be incremented by the number of MPDUs
2363 * received in the A-MPDU when an A-MPDU is received */
2364 u32 rx_mpdu_in_ampdu_cnt;
2365
2366 /* This counter shall be incremented when an MPDU delimiter has a
2367 * CRC error when this is the first CRC error in the received AMPDU
2368 * or when the previous delimiter has been decoded correctly */
2369 u32 ampdu_delimiter_crc_err;
2370};
2371
2372struct ani_per_sta_stats_info {
2373 /* The number of MPDU frames that the 802.11 station transmitted
2374 * and acknowledged through a received 802.11 ACK frame */
2375 u32 tx_frag_cnt[4];
2376
2377 /* This counter shall be incremented when an A-MPDU is transmitted */
2378 u32 tx_ampdu_cnt;
2379
2380 /* This counter shall increment by the number of MPDUs in the AMPDU
2381 * when an A-MPDU is transmitted */
2382 u32 tx_mpdu_in_ampdu_cnt;
2383};
2384
2385struct wcn36xx_hal_stats_rsp_msg {
2386 struct wcn36xx_hal_msg_header header;
2387
2388 /* Success or Failure */
2389 u32 status;
2390
2391 /* STA Idx */
2392 u32 sta_index;
2393
2394 /* Categories of STATS being returned as per eHalStatsMask */
2395 u32 stats_mask;
2396
2397 /* message type is same as the request type */
2398 u16 msg_type;
2399
2400 /* length of the entire request, includes the pStatsBuf length too */
2401 u16 msg_len;
2402};
2403
2404struct wcn36xx_hal_set_link_state_req_msg {
2405 struct wcn36xx_hal_msg_header header;
2406
2407 u8 bssid[ETH_ALEN];
2408 enum wcn36xx_hal_link_state state;
2409 u8 self_mac_addr[ETH_ALEN];
2410
2411} __packed;
2412
2413struct set_link_state_rsp_msg {
2414 struct wcn36xx_hal_msg_header header;
2415
2416 /* success or failure */
2417 u32 status;
2418};
2419
2420/* TSPEC Params */
2421struct wcn36xx_hal_ts_info_tfc {
2422#ifndef ANI_LITTLE_BIT_ENDIAN
2423 u16 ackPolicy:2;
2424 u16 userPrio:3;
2425 u16 psb:1;
2426 u16 aggregation:1;
2427 u16 accessPolicy:2;
2428 u16 direction:2;
2429 u16 tsid:4;
2430 u16 trafficType:1;
2431#else
2432 u16 trafficType:1;
2433 u16 tsid:4;
2434 u16 direction:2;
2435 u16 accessPolicy:2;
2436 u16 aggregation:1;
2437 u16 psb:1;
2438 u16 userPrio:3;
2439 u16 ackPolicy:2;
2440#endif
2441};
2442
2443/* Flag to schedule the traffic type */
2444struct wcn36xx_hal_ts_info_sch {
2445#ifndef ANI_LITTLE_BIT_ENDIAN
2446 u8 rsvd:7;
2447 u8 schedule:1;
2448#else
2449 u8 schedule:1;
2450 u8 rsvd:7;
2451#endif
2452};
2453
2454/* Traffic and scheduling info */
2455struct wcn36xx_hal_ts_info {
2456 struct wcn36xx_hal_ts_info_tfc traffic;
2457 struct wcn36xx_hal_ts_info_sch schedule;
2458};
2459
2460/* Information elements */
2461struct wcn36xx_hal_tspec_ie {
2462 u8 type;
2463 u8 length;
2464 struct wcn36xx_hal_ts_info ts_info;
2465 u16 nom_msdu_size;
2466 u16 max_msdu_size;
2467 u32 min_svc_interval;
2468 u32 max_svc_interval;
2469 u32 inact_interval;
2470 u32 suspend_interval;
2471 u32 svc_start_time;
2472 u32 min_data_rate;
2473 u32 mean_data_rate;
2474 u32 peak_data_rate;
2475 u32 max_burst_sz;
2476 u32 delay_bound;
2477 u32 min_phy_rate;
2478 u16 surplus_bw;
2479 u16 medium_time;
2480};
2481
2482struct add_ts_req_msg {
2483 struct wcn36xx_hal_msg_header header;
2484
2485 /* Station Index */
2486 u16 sta_index;
2487
2488 /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS */
2489 u16 tspec_index;
2490
2491 /* To program TPE with required parameters */
2492 struct wcn36xx_hal_tspec_ie tspec;
2493
2494 /* U-APSD Flags: 1b per AC. Encoded as follows:
2495 b7 b6 b5 b4 b3 b2 b1 b0 =
2496 X X X X BE BK VI VO */
2497 u8 uapsd;
2498
2499 /* These parameters are for all the access categories */
2500
2501 /* Service Interval */
2502 u32 service_interval[WCN36XX_HAL_MAX_AC];
2503
2504 /* Suspend Interval */
2505 u32 suspend_interval[WCN36XX_HAL_MAX_AC];
2506
2507 /* Delay Interval */
2508 u32 delay_interval[WCN36XX_HAL_MAX_AC];
2509};
2510
2511struct add_rs_rsp_msg {
2512 struct wcn36xx_hal_msg_header header;
2513
2514 /* success or failure */
2515 u32 status;
2516};
2517
2518struct del_ts_req_msg {
2519 struct wcn36xx_hal_msg_header header;
2520
2521 /* Station Index */
2522 u16 sta_index;
2523
2524 /* TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS */
2525 u16 tspec_index;
2526
2527 /* To lookup station id using the mac address */
2528 u8 bssid[ETH_ALEN];
2529};
2530
2531struct del_ts_rsp_msg {
2532 struct wcn36xx_hal_msg_header header;
2533
2534 /* success or failure */
2535 u32 status;
2536};
2537
2538/* End of TSpec Parameters */
2539
2540/* Start of BLOCK ACK related Parameters */
2541
2542struct wcn36xx_hal_add_ba_session_req_msg {
2543 struct wcn36xx_hal_msg_header header;
2544
2545 /* Station Index */
2546 u16 sta_index;
2547
2548 /* Peer MAC Address */
2549 u8 mac_addr[ETH_ALEN];
2550
2551 /* ADDBA Action Frame dialog token
2552 HAL will not interpret this object */
2553 u8 dialog_token;
2554
2555 /* TID for which the BA is being setup
2556 This identifies the TC or TS of interest */
2557 u8 tid;
2558
2559 /* 0 - Delayed BA (Not supported)
2560 1 - Immediate BA */
2561 u8 policy;
2562
2563 /* Indicates the number of buffers for this TID (baTID)
2564 NOTE - This is the requested buffer size. When this
2565 is processed by HAL and subsequently by HDD, it is
2566 possible that HDD may change this buffer size. Any
2567 change in the buffer size should be noted by PE and
2568 advertized appropriately in the ADDBA response */
2569 u16 buffer_size;
2570
2571 /* BA timeout in TU's 0 means no timeout will occur */
2572 u16 timeout;
2573
2574 /* b0..b3 - Fragment Number - Always set to 0
2575 b4..b15 - Starting Sequence Number of first MSDU
2576 for which this BA is setup */
2577 u16 ssn;
2578
2579 /* ADDBA direction
2580 1 - Originator
2581 0 - Recipient */
2582 u8 direction;
2583} __packed;
2584
2585struct wcn36xx_hal_add_ba_session_rsp_msg {
2586 struct wcn36xx_hal_msg_header header;
2587
2588 /* success or failure */
2589 u32 status;
2590
2591 /* Dialog token */
2592 u8 dialog_token;
2593
2594 /* TID for which the BA session has been setup */
2595 u8 ba_tid;
2596
2597 /* BA Buffer Size allocated for the current BA session */
2598 u8 ba_buffer_size;
2599
2600 u8 ba_session_id;
2601
2602 /* Reordering Window buffer */
2603 u8 win_size;
2604
2605 /* Station Index to id the sta */
2606 u8 sta_index;
2607
2608 /* Starting Sequence Number */
2609 u16 ssn;
2610} __packed;
2611
2612struct wcn36xx_hal_add_ba_req_msg {
2613 struct wcn36xx_hal_msg_header header;
2614
2615 /* Session Id */
2616 u8 session_id;
2617
2618 /* Reorder Window Size */
2619 u8 win_size;
2620/* Old FW 1.2.2.4 does not support this*/
2621#ifdef FEATURE_ON_CHIP_REORDERING
2622 u8 reordering_done_on_chip;
2623#endif
2624} __packed;
2625
2626struct wcn36xx_hal_add_ba_rsp_msg {
2627 struct wcn36xx_hal_msg_header header;
2628
2629 /* success or failure */
2630 u32 status;
2631
2632 /* Dialog token */
2633 u8 dialog_token;
2634} __packed;
2635
2636struct add_ba_info {
2637 u16 ba_enable:1;
2638 u16 starting_seq_num:12;
2639 u16 reserved:3;
2640};
2641
2642struct wcn36xx_hal_trigger_ba_rsp_candidate {
2643 u8 sta_addr[ETH_ALEN];
2644 struct add_ba_info ba_info[STACFG_MAX_TC];
2645} __packed;
2646
2647struct wcn36xx_hal_trigget_ba_req_candidate {
2648 u8 sta_index;
2649 u8 tid_bitmap;
2650} __packed;
2651
2652struct wcn36xx_hal_trigger_ba_req_msg {
2653 struct wcn36xx_hal_msg_header header;
2654
2655 /* Session Id */
2656 u8 session_id;
2657
2658 /* baCandidateCnt is followed by trigger BA
2659 * Candidate List(tTriggerBaCandidate)
2660 */
2661 u16 candidate_cnt;
2662
2663} __packed;
2664
2665struct wcn36xx_hal_trigger_ba_rsp_msg {
2666 struct wcn36xx_hal_msg_header header;
2667
2668 /* TO SUPPORT BT-AMP */
2669 u8 bssid[ETH_ALEN];
2670
2671 /* success or failure */
2672 u32 status;
2673
2674 /* baCandidateCnt is followed by trigger BA
2675 * Rsp Candidate List(tTriggerRspBaCandidate)
2676 */
2677 u16 candidate_cnt;
2678} __packed;
2679
2680struct wcn36xx_hal_del_ba_req_msg {
2681 struct wcn36xx_hal_msg_header header;
2682
2683 /* Station Index */
2684 u16 sta_index;
2685
2686 /* TID for which the BA session is being deleted */
2687 u8 tid;
2688
2689 /* DELBA direction
2690 1 - Originator
2691 0 - Recipient */
2692 u8 direction;
2693} __packed;
2694
2695struct wcn36xx_hal_del_ba_rsp_msg {
2696 struct wcn36xx_hal_msg_header header;
2697
2698 /* success or failure */
2699 u32 status;
2700} __packed;
2701
2702struct tsm_stats_req_msg {
2703 struct wcn36xx_hal_msg_header header;
2704
2705 /* Traffic Id */
2706 u8 tid;
2707
2708 u8 bssid[ETH_ALEN];
2709};
2710
2711struct tsm_stats_rsp_msg {
2712 struct wcn36xx_hal_msg_header header;
2713
2714 /*success or failure */
2715 u32 status;
2716
2717 /* Uplink Packet Queue delay */
2718 u16 uplink_pkt_queue_delay;
2719
2720 /* Uplink Packet Queue delay histogram */
2721 u16 uplink_pkt_queue_delay_hist[4];
2722
2723 /* Uplink Packet Transmit delay */
2724 u32 uplink_pkt_tx_delay;
2725
2726 /* Uplink Packet loss */
2727 u16 uplink_pkt_loss;
2728
2729 /* Uplink Packet count */
2730 u16 uplink_pkt_count;
2731
2732 /* Roaming count */
2733 u8 roaming_count;
2734
2735 /* Roaming Delay */
2736 u16 roaming_delay;
2737};
2738
2739struct set_key_done_msg {
2740 struct wcn36xx_hal_msg_header header;
2741
2742 /*bssid of the keys */
2743 u8 bssidx;
2744 u8 enc_type;
2745};
2746
2747struct wcn36xx_hal_nv_img_download_req_msg {
2748 /* Note: The length specified in wcn36xx_hal_nv_img_download_req_msg
2749 * messages should be
2750 * header.len = sizeof(wcn36xx_hal_nv_img_download_req_msg) +
2751 * nv_img_buffer_size */
2752 struct wcn36xx_hal_msg_header header;
2753
2754 /* Fragment sequence number of the NV Image. Note that NV Image
2755 * might not fit into one message due to size limitation of the SMD
2756 * channel FIFO. UMAC can hence choose to chop the NV blob into
2757 * multiple fragments starting with seqeunce number 0, 1, 2 etc.
2758 * The last fragment MUST be indicated by marking the
2759 * isLastFragment field to 1. Note that all the NV blobs would be
2760 * concatenated together by HAL without any padding bytes in
2761 * between.*/
2762 u16 frag_number;
2763
2764 /* Is this the last fragment? When set to 1 it indicates that no
2765 * more fragments will be sent by UMAC and HAL can concatenate all
2766 * the NV blobs rcvd & proceed with the parsing. HAL would generate
2767 * a WCN36XX_HAL_DOWNLOAD_NV_RSP to the WCN36XX_HAL_DOWNLOAD_NV_REQ
2768 * after it receives each fragment */
2769 u16 last_fragment;
2770
2771 /* NV Image size (number of bytes) */
2772 u32 nv_img_buffer_size;
2773
2774 /* Following the 'nv_img_buffer_size', there should be
2775 * nv_img_buffer_size bytes of NV Image i.e.
2776 * u8[nv_img_buffer_size] */
2777} __packed;
2778
2779struct wcn36xx_hal_nv_img_download_rsp_msg {
2780 struct wcn36xx_hal_msg_header header;
2781
2782 /* Success or Failure. HAL would generate a
2783 * WCN36XX_HAL_DOWNLOAD_NV_RSP after each fragment */
2784 u32 status;
2785} __packed;
2786
2787struct wcn36xx_hal_nv_store_ind {
2788 /* Note: The length specified in tHalNvStoreInd messages should be
2789 * header.msgLen = sizeof(tHalNvStoreInd) + nvBlobSize */
2790 struct wcn36xx_hal_msg_header header;
2791
2792 /* NV Item */
2793 u32 table_id;
2794
2795 /* Size of NV Blob */
2796 u32 nv_blob_size;
2797
2798 /* Following the 'nvBlobSize', there should be nvBlobSize bytes of
2799 * NV blob i.e. u8[nvBlobSize] */
2800};
2801
2802/* End of Block Ack Related Parameters */
2803
2804#define WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE 6
2805
2806/* Definition for MIC failure indication MAC reports this each time a MIC
2807 * failure occures on Rx TKIP packet
2808 */
2809struct mic_failure_ind_msg {
2810 struct wcn36xx_hal_msg_header header;
2811
2812 u8 bssid[ETH_ALEN];
2813
2814 /* address used to compute MIC */
2815 u8 src_addr[ETH_ALEN];
2816
2817 /* transmitter address */
2818 u8 ta_addr[ETH_ALEN];
2819
2820 u8 dst_addr[ETH_ALEN];
2821
2822 u8 multicast;
2823
2824 /* first byte of IV */
2825 u8 iv1;
2826
2827 /* second byte of IV */
2828 u8 key_id;
2829
2830 /* sequence number */
2831 u8 tsc[WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE];
2832
2833 /* receive address */
2834 u8 rx_addr[ETH_ALEN];
2835};
2836
2837struct update_vht_op_mode_req_msg {
2838 struct wcn36xx_hal_msg_header header;
2839
2840 u16 op_mode;
2841 u16 sta_id;
2842};
2843
2844struct update_vht_op_mode_params_rsp_msg {
2845 struct wcn36xx_hal_msg_header header;
2846
2847 u32 status;
2848};
2849
2850struct update_beacon_req_msg {
2851 struct wcn36xx_hal_msg_header header;
2852
2853 u8 bss_index;
2854
2855 /* shortPreamble mode. HAL should update all the STA rates when it
2856 * receives this message */
2857 u8 short_preamble;
2858
2859 /* short Slot time. */
2860 u8 short_slot_time;
2861
2862 /* Beacon Interval */
2863 u16 beacon_interval;
2864
2865 /* Protection related */
2866 u8 lla_coexist;
2867 u8 llb_coexist;
2868 u8 llg_coexist;
2869 u8 ht20_coexist;
2870 u8 lln_non_gf_coexist;
2871 u8 lsig_tx_op_protection_full_support;
2872 u8 rifs_mode;
2873
2874 u16 param_change_bitmap;
2875};
2876
2877struct update_beacon_rsp_msg {
2878 struct wcn36xx_hal_msg_header header;
2879 u32 status;
2880};
2881
2882struct wcn36xx_hal_send_beacon_req_msg {
2883 struct wcn36xx_hal_msg_header header;
2884
2885 /* length of the template. */
2886 u32 beacon_length;
2887
2888 /* Beacon data. */
2889 u8 beacon[BEACON_TEMPLATE_SIZE];
2890
2891 u8 bssid[ETH_ALEN];
2892
2893 /* TIM IE offset from the beginning of the template. */
2894 u32 tim_ie_offset;
2895
2896 /* P2P IE offset from the begining of the template */
2897 u16 p2p_ie_offset;
2898} __packed;
2899
2900struct send_beacon_rsp_msg {
2901 struct wcn36xx_hal_msg_header header;
2902 u32 status;
2903} __packed;
2904
2905struct enable_radar_req_msg {
2906 struct wcn36xx_hal_msg_header header;
2907
2908 u8 bssid[ETH_ALEN];
2909 u8 channel;
2910};
2911
2912struct enable_radar_rsp_msg {
2913 struct wcn36xx_hal_msg_header header;
2914
2915 /* Link Parameters */
2916 u8 bssid[ETH_ALEN];
2917
2918 /* success or failure */
2919 u32 status;
2920};
2921
2922struct radar_detect_intr_ind_msg {
2923 struct wcn36xx_hal_msg_header header;
2924
2925 u8 radar_det_channel;
2926};
2927
2928struct radar_detect_ind_msg {
2929 struct wcn36xx_hal_msg_header header;
2930
2931 /* channel number in which the RADAR detected */
2932 u8 channel_number;
2933
2934 /* RADAR pulse width in usecond */
2935 u16 radar_pulse_width;
2936
2937 /* Number of RADAR pulses */
2938 u16 num_radar_pulse;
2939};
2940
2941struct wcn36xx_hal_get_tpc_report_req_msg {
2942 struct wcn36xx_hal_msg_header header;
2943
2944 u8 sta[ETH_ALEN];
2945 u8 dialog_token;
2946 u8 txpower;
2947};
2948
2949struct wcn36xx_hal_get_tpc_report_rsp_msg {
2950 struct wcn36xx_hal_msg_header header;
2951
2952 /* success or failure */
2953 u32 status;
2954};
2955
2956struct wcn36xx_hal_send_probe_resp_req_msg {
2957 struct wcn36xx_hal_msg_header header;
2958
2959 u8 probe_resp_template[BEACON_TEMPLATE_SIZE];
2960 u32 probe_resp_template_len;
2961 u32 proxy_probe_req_valid_ie_bmap[8];
2962 u8 bssid[ETH_ALEN];
2963};
2964
2965struct send_probe_resp_rsp_msg {
2966 struct wcn36xx_hal_msg_header header;
2967
2968 /* success or failure */
2969 u32 status;
2970};
2971
2972struct send_unknown_frame_rx_ind_msg {
2973 struct wcn36xx_hal_msg_header header;
2974
2975 /* success or failure */
2976 u32 status;
2977};
2978
2979struct wcn36xx_hal_delete_sta_context_ind_msg {
2980 struct wcn36xx_hal_msg_header header;
2981
2982 u16 aid;
2983 u16 sta_id;
2984
2985 /* TO SUPPORT BT-AMP */
2986 u8 bssid[ETH_ALEN];
2987
2988 /* HAL copies bssid from the sta table. */
2989 u8 addr2[ETH_ALEN];
2990
2991 /* To unify the keepalive / unknown A2 / tim-based disa */
2992 u16 reason_code;
2993} __packed;
2994
2995struct indicate_del_sta {
2996 struct wcn36xx_hal_msg_header header;
2997 u8 aid;
2998 u8 sta_index;
2999 u8 bss_index;
3000 u8 reason_code;
3001 u32 status;
3002};
3003
3004struct bt_amp_event_msg {
3005 struct wcn36xx_hal_msg_header header;
3006
3007 enum bt_amp_event_type btAmpEventType;
3008};
3009
3010struct bt_amp_event_rsp {
3011 struct wcn36xx_hal_msg_header header;
3012
3013 /* success or failure */
3014 u32 status;
3015};
3016
3017struct tl_hal_flush_ac_req_msg {
3018 struct wcn36xx_hal_msg_header header;
3019
3020 /* Station Index. originates from HAL */
3021 u8 sta_id;
3022
3023 /* TID for which the transmit queue is being flushed */
3024 u8 tid;
3025};
3026
3027struct tl_hal_flush_ac_rsp_msg {
3028 struct wcn36xx_hal_msg_header header;
3029
3030 /* Station Index. originates from HAL */
3031 u8 sta_id;
3032
3033 /* TID for which the transmit queue is being flushed */
3034 u8 tid;
3035
3036 /* success or failure */
3037 u32 status;
3038};
3039
3040struct wcn36xx_hal_enter_imps_req_msg {
3041 struct wcn36xx_hal_msg_header header;
3042};
3043
3044struct wcn36xx_hal_exit_imps_req {
3045 struct wcn36xx_hal_msg_header header;
3046};
3047
3048struct wcn36xx_hal_enter_bmps_req_msg {
3049 struct wcn36xx_hal_msg_header header;
3050
3051 u8 bss_index;
3052
3053 /* TBTT value derived from the last beacon */
3054#ifndef BUILD_QWPTTSTATIC
3055 u64 tbtt;
3056#endif
3057 u8 dtim_count;
3058
3059 /* DTIM period given to HAL during association may not be valid, if
3060 * association is based on ProbeRsp instead of beacon. */
3061 u8 dtim_period;
3062
3063 /* For CCX and 11R Roaming */
3064 u32 rssi_filter_period;
3065
3066 u32 num_beacon_per_rssi_average;
3067 u8 rssi_filter_enable;
3068} __packed;
3069
3070struct wcn36xx_hal_exit_bmps_req_msg {
3071 struct wcn36xx_hal_msg_header header;
3072
3073 u8 send_data_null;
3074 u8 bss_index;
3075} __packed;
3076
3077struct wcn36xx_hal_missed_beacon_ind_msg {
3078 struct wcn36xx_hal_msg_header header;
3079
3080 u8 bss_index;
3081} __packed;
3082
3083/* Beacon Filtering data structures */
3084
3085/* The above structure would be followed by multiple of below mentioned
3086 * structure
3087 */
3088struct beacon_filter_ie {
3089 u8 element_id;
3090 u8 check_ie_presence;
3091 u8 offset;
3092 u8 value;
3093 u8 bitmask;
3094 u8 ref;
3095};
3096
3097struct wcn36xx_hal_add_bcn_filter_req_msg {
3098 struct wcn36xx_hal_msg_header header;
3099
3100 u16 capability_info;
3101 u16 capability_mask;
3102 u16 beacon_interval;
3103 u16 ie_num;
3104 u8 bss_index;
3105 u8 reserved;
3106};
3107
3108struct wcn36xx_hal_rem_bcn_filter_req {
3109 struct wcn36xx_hal_msg_header header;
3110
3111 u8 ie_Count;
3112 u8 rem_ie_id[1];
3113};
3114
3115#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0
3116#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1
3117#define WCN36XX_HAL_IPV6_NS_OFFLOAD 2
3118#define WCN36XX_HAL_IPV6_ADDR_LEN 16
3119#define WCN36XX_HAL_OFFLOAD_DISABLE 0
3120#define WCN36XX_HAL_OFFLOAD_ENABLE 1
3121#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE 0x2
3122#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE \
3123 (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
3124
3125struct wcn36xx_hal_ns_offload_params {
3126 u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
3127 u8 self_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
3128
3129 /* Only support 2 possible Network Advertisement IPv6 address */
3130 u8 target_ipv6_addr1[WCN36XX_HAL_IPV6_ADDR_LEN];
3131 u8 target_ipv6_addr2[WCN36XX_HAL_IPV6_ADDR_LEN];
3132
3133 u8 self_addr[ETH_ALEN];
3134 u8 src_ipv6_addr_valid:1;
3135 u8 target_ipv6_addr1_valid:1;
3136 u8 target_ipv6_addr2_valid:1;
3137 u8 reserved1:5;
3138
3139 /* make it DWORD aligned */
3140 u8 reserved2;
3141
3142 /* slot index for this offload */
3143 u32 slot_index;
3144 u8 bss_index;
3145};
3146
3147struct wcn36xx_hal_host_offload_req {
3148 u8 offload_Type;
3149
3150 /* enable or disable */
3151 u8 enable;
3152
3153 union {
3154 u8 host_ipv4_addr[4];
3155 u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
3156 } u;
3157};
3158
3159struct wcn36xx_hal_host_offload_req_msg {
3160 struct wcn36xx_hal_msg_header header;
3161 struct wcn36xx_hal_host_offload_req host_offload_params;
3162 struct wcn36xx_hal_ns_offload_params ns_offload_params;
3163};
3164
3165/* Packet Types. */
3166#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT 1
3167#define WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP 2
3168
3169/* Enable or disable keep alive */
3170#define WCN36XX_HAL_KEEP_ALIVE_DISABLE 0
3171#define WCN36XX_HAL_KEEP_ALIVE_ENABLE 1
3172#define WCN36XX_KEEP_ALIVE_TIME_PERIOD 30 /* unit: s */
3173
3174/* Keep Alive request. */
3175struct wcn36xx_hal_keep_alive_req_msg {
3176 struct wcn36xx_hal_msg_header header;
3177
3178 u8 packet_type;
3179 u32 time_period;
3180 u8 host_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
3181 u8 dest_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
3182 u8 dest_addr[ETH_ALEN];
3183 u8 bss_index;
3184} __packed;
3185
3186struct wcn36xx_hal_rssi_threshold_req_msg {
3187 struct wcn36xx_hal_msg_header header;
3188
3189 s8 threshold1:8;
3190 s8 threshold2:8;
3191 s8 threshold3:8;
3192 u8 thres1_pos_notify:1;
3193 u8 thres1_neg_notify:1;
3194 u8 thres2_pos_notify:1;
3195 u8 thres2_neg_notify:1;
3196 u8 thres3_pos_notify:1;
3197 u8 thres3_neg_notify:1;
3198 u8 reserved10:2;
3199};
3200
3201struct wcn36xx_hal_enter_uapsd_req_msg {
3202 struct wcn36xx_hal_msg_header header;
3203
3204 u8 bk_delivery:1;
3205 u8 be_delivery:1;
3206 u8 vi_delivery:1;
3207 u8 vo_delivery:1;
3208 u8 bk_trigger:1;
3209 u8 be_trigger:1;
3210 u8 vi_trigger:1;
3211 u8 vo_trigger:1;
3212 u8 bss_index;
3213};
3214
3215struct wcn36xx_hal_exit_uapsd_req_msg {
3216 struct wcn36xx_hal_msg_header header;
3217 u8 bss_index;
3218};
3219
3220#define WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE 128
3221#define WCN36XX_HAL_WOWL_BCAST_MAX_NUM_PATTERNS 16
3222
3223struct wcn36xx_hal_wowl_add_bcast_ptrn_req_msg {
3224 struct wcn36xx_hal_msg_header header;
3225
3226 /* Pattern ID */
3227 u8 id;
3228
3229 /* Pattern byte offset from beginning of the 802.11 packet to start
3230 * of the wake-up pattern */
3231 u8 byte_Offset;
3232
3233 /* Non-Zero Pattern size */
3234 u8 size;
3235
3236 /* Pattern */
3237 u8 pattern[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
3238
3239 /* Non-zero pattern mask size */
3240 u8 mask_size;
3241
3242 /* Pattern mask */
3243 u8 mask[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
3244
3245 /* Extra pattern */
3246 u8 extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
3247
3248 /* Extra pattern mask */
3249 u8 mask_extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
3250
3251 u8 bss_index;
3252};
3253
3254struct wcn36xx_hal_wow_del_bcast_ptrn_req_msg {
3255 struct wcn36xx_hal_msg_header header;
3256
3257 /* Pattern ID of the wakeup pattern to be deleted */
3258 u8 id;
3259 u8 bss_index;
3260};
3261
3262struct wcn36xx_hal_wowl_enter_req_msg {
3263 struct wcn36xx_hal_msg_header header;
3264
3265 /* Enables/disables magic packet filtering */
3266 u8 magic_packet_enable;
3267
3268 /* Magic pattern */
3269 u8 magic_pattern[ETH_ALEN];
3270
3271 /* Enables/disables packet pattern filtering in firmware. Enabling
3272 * this flag enables broadcast pattern matching in Firmware. If
3273 * unicast pattern matching is also desired,
3274 * ucUcastPatternFilteringEnable flag must be set tot true as well
3275 */
3276 u8 pattern_filtering_enable;
3277
3278 /* Enables/disables unicast packet pattern filtering. This flag
3279 * specifies whether we want to do pattern match on unicast packets
3280 * as well and not just broadcast packets. This flag has no effect
3281 * if the ucPatternFilteringEnable (main controlling flag) is set
3282 * to false
3283 */
3284 u8 ucast_pattern_filtering_enable;
3285
3286 /* This configuration is valid only when magicPktEnable=1. It
3287 * requests hardware to wake up when it receives the Channel Switch
3288 * Action Frame.
3289 */
3290 u8 wow_channel_switch_receive;
3291
3292 /* This configuration is valid only when magicPktEnable=1. It
3293 * requests hardware to wake up when it receives the
3294 * Deauthentication Frame.
3295 */
3296 u8 wow_deauth_receive;
3297
3298 /* This configuration is valid only when magicPktEnable=1. It
3299 * requests hardware to wake up when it receives the Disassociation
3300 * Frame.
3301 */
3302 u8 wow_disassoc_receive;
3303
3304 /* This configuration is valid only when magicPktEnable=1. It
3305 * requests hardware to wake up when it has missed consecutive
3306 * beacons. This is a hardware register configuration (NOT a
3307 * firmware configuration).
3308 */
3309 u8 wow_max_missed_beacons;
3310
3311 /* This configuration is valid only when magicPktEnable=1. This is
3312 * a timeout value in units of microsec. It requests hardware to
3313 * unconditionally wake up after it has stayed in WoWLAN mode for
3314 * some time. Set 0 to disable this feature.
3315 */
3316 u8 wow_max_sleep;
3317
3318 /* This configuration directs the WoW packet filtering to look for
3319 * EAP-ID requests embedded in EAPOL frames and use this as a wake
3320 * source.
3321 */
3322 u8 wow_eap_id_request_enable;
3323
3324 /* This configuration directs the WoW packet filtering to look for
3325 * EAPOL-4WAY requests and use this as a wake source.
3326 */
3327 u8 wow_eapol_4way_enable;
3328
3329 /* This configuration allows a host wakeup on an network scan
3330 * offload match.
3331 */
3332 u8 wow_net_scan_offload_match;
3333
3334 /* This configuration allows a host wakeup on any GTK rekeying
3335 * error.
3336 */
3337 u8 wow_gtk_rekey_error;
3338
3339 /* This configuration allows a host wakeup on BSS connection loss.
3340 */
3341 u8 wow_bss_connection_loss;
3342
3343 u8 bss_index;
3344};
3345
3346struct wcn36xx_hal_wowl_exit_req_msg {
3347 struct wcn36xx_hal_msg_header header;
3348
3349 u8 bss_index;
3350};
3351
3352struct wcn36xx_hal_get_rssi_req_msg {
3353 struct wcn36xx_hal_msg_header header;
3354};
3355
3356struct wcn36xx_hal_get_roam_rssi_req_msg {
3357 struct wcn36xx_hal_msg_header header;
3358
3359 /* Valid STA Idx for per STA stats request */
3360 u32 sta_id;
3361};
3362
3363struct wcn36xx_hal_set_uapsd_ac_params_req_msg {
3364 struct wcn36xx_hal_msg_header header;
3365
3366 /* STA index */
3367 u8 sta_idx;
3368
3369 /* Access Category */
3370 u8 ac;
3371
3372 /* User Priority */
3373 u8 up;
3374
3375 /* Service Interval */
3376 u32 service_interval;
3377
3378 /* Suspend Interval */
3379 u32 suspend_interval;
3380
3381 /* Delay Interval */
3382 u32 delay_interval;
3383};
3384
3385struct wcn36xx_hal_configure_rxp_filter_req_msg {
3386 struct wcn36xx_hal_msg_header header;
3387
3388 u8 set_mcst_bcst_filter_setting;
3389 u8 set_mcst_bcst_filter;
3390};
3391
3392struct wcn36xx_hal_enter_imps_rsp_msg {
3393 struct wcn36xx_hal_msg_header header;
3394
3395 /* success or failure */
3396 u32 status;
3397};
3398
3399struct wcn36xx_hal_exit_imps_rsp_msg {
3400 struct wcn36xx_hal_msg_header header;
3401
3402 /* success or failure */
3403 u32 status;
3404};
3405
3406struct wcn36xx_hal_enter_bmps_rsp_msg {
3407 struct wcn36xx_hal_msg_header header;
3408
3409 /* success or failure */
3410 u32 status;
3411
3412 u8 bss_index;
3413} __packed;
3414
3415struct wcn36xx_hal_exit_bmps_rsp_msg {
3416 struct wcn36xx_hal_msg_header header;
3417
3418 /* success or failure */
3419 u32 status;
3420
3421 u8 bss_index;
3422} __packed;
3423
3424struct wcn36xx_hal_enter_uapsd_rsp_msg {
3425 struct wcn36xx_hal_msg_header header;
3426
3427 /* success or failure */
3428 u32 status;
3429
3430 u8 bss_index;
3431};
3432
3433struct wcn36xx_hal_exit_uapsd_rsp_msg {
3434 struct wcn36xx_hal_msg_header header;
3435
3436 /* success or failure */
3437 u32 status;
3438
3439 u8 bss_index;
3440};
3441
3442struct wcn36xx_hal_rssi_notification_ind_msg {
3443 struct wcn36xx_hal_msg_header header;
3444
3445 u32 rssi_thres1_pos_cross:1;
3446 u32 rssi_thres1_neg_cross:1;
3447 u32 rssi_thres2_pos_cross:1;
3448 u32 rssi_thres2_neg_cross:1;
3449 u32 rssi_thres3_pos_cross:1;
3450 u32 rssi_thres3_neg_cross:1;
3451 u32 avg_rssi:8;
3452 u32 reserved:18;
3453
3454};
3455
3456struct wcn36xx_hal_get_rssio_rsp_msg {
3457 struct wcn36xx_hal_msg_header header;
3458
3459 /* success or failure */
3460 u32 status;
3461 s8 rssi;
3462
3463};
3464
3465struct wcn36xx_hal_get_roam_rssi_rsp_msg {
3466 struct wcn36xx_hal_msg_header header;
3467
3468 /* success or failure */
3469 u32 status;
3470
3471 u8 sta_id;
3472 s8 rssi;
3473};
3474
3475struct wcn36xx_hal_wowl_enter_rsp_msg {
3476 struct wcn36xx_hal_msg_header header;
3477
3478 /* success or failure */
3479 u32 status;
3480 u8 bss_index;
3481};
3482
3483struct wcn36xx_hal_wowl_exit_rsp_msg {
3484 struct wcn36xx_hal_msg_header header;
3485
3486 /* success or failure */
3487 u32 status;
3488 u8 bss_index;
3489};
3490
3491struct wcn36xx_hal_add_bcn_filter_rsp_msg {
3492 struct wcn36xx_hal_msg_header header;
3493
3494 /* success or failure */
3495 u32 status;
3496};
3497
3498struct wcn36xx_hal_rem_bcn_filter_rsp_msg {
3499 struct wcn36xx_hal_msg_header header;
3500
3501 /* success or failure */
3502 u32 status;
3503};
3504
3505struct wcn36xx_hal_add_wowl_bcast_ptrn_rsp_msg {
3506 struct wcn36xx_hal_msg_header header;
3507
3508 /* success or failure */
3509 u32 status;
3510 u8 bss_index;
3511};
3512
3513struct wcn36xx_hal_del_wowl_bcast_ptrn_rsp_msg {
3514 struct wcn36xx_hal_msg_header header;
3515
3516 /* success or failure */
3517 u32 status;
3518 u8 bss_index;
3519};
3520
3521struct wcn36xx_hal_host_offload_rsp_msg {
3522 struct wcn36xx_hal_msg_header header;
3523
3524 /* success or failure */
3525 u32 status;
3526};
3527
3528struct wcn36xx_hal_keep_alive_rsp_msg {
3529 struct wcn36xx_hal_msg_header header;
3530
3531 /* success or failure */
3532 u32 status;
3533};
3534
3535struct wcn36xx_hal_set_rssi_thresh_rsp_msg {
3536 struct wcn36xx_hal_msg_header header;
3537
3538 /* success or failure */
3539 u32 status;
3540};
3541
3542struct wcn36xx_hal_set_uapsd_ac_params_rsp_msg {
3543 struct wcn36xx_hal_msg_header header;
3544
3545 /* success or failure */
3546 u32 status;
3547};
3548
3549struct wcn36xx_hal_configure_rxp_filter_rsp_msg {
3550 struct wcn36xx_hal_msg_header header;
3551
3552 /* success or failure */
3553 u32 status;
3554};
3555
3556struct set_max_tx_pwr_req {
3557 struct wcn36xx_hal_msg_header header;
3558
3559 /* BSSID is needed to identify which session issued this request.
3560 * As the request has power constraints, this should be applied
3561 * only to that session */
3562 u8 bssid[ETH_ALEN];
3563
3564 u8 self_addr[ETH_ALEN];
3565
3566 /* In request, power == MaxTx power to be used. */
3567 u8 power;
3568};
3569
3570struct set_max_tx_pwr_rsp_msg {
3571 struct wcn36xx_hal_msg_header header;
3572
3573 /* power == tx power used for management frames */
3574 u8 power;
3575
3576 /* success or failure */
3577 u32 status;
3578};
3579
3580struct set_tx_pwr_req_msg {
3581 struct wcn36xx_hal_msg_header header;
3582
3583 /* TX Power in milli watts */
3584 u32 tx_power;
3585
3586 u8 bss_index;
3587};
3588
3589struct set_tx_pwr_rsp_msg {
3590 struct wcn36xx_hal_msg_header header;
3591
3592 /* success or failure */
3593 u32 status;
3594};
3595
3596struct get_tx_pwr_req_msg {
3597 struct wcn36xx_hal_msg_header header;
3598
3599 u8 sta_id;
3600};
3601
3602struct get_tx_pwr_rsp_msg {
3603 struct wcn36xx_hal_msg_header header;
3604
3605 /* success or failure */
3606 u32 status;
3607
3608 /* TX Power in milli watts */
3609 u32 tx_power;
3610};
3611
3612struct set_p2p_gonoa_req_msg {
3613 struct wcn36xx_hal_msg_header header;
3614
3615 u8 opp_ps;
3616 u32 ct_window;
3617 u8 count;
3618 u32 duration;
3619 u32 interval;
3620 u32 single_noa_duration;
3621 u8 ps_selection;
3622};
3623
3624struct set_p2p_gonoa_rsp_msg {
3625 struct wcn36xx_hal_msg_header header;
3626
3627 /* success or failure */
3628 u32 status;
3629};
3630
3631struct wcn36xx_hal_add_sta_self_req {
3632 struct wcn36xx_hal_msg_header header;
3633
3634 u8 self_addr[ETH_ALEN];
3635 u32 status;
3636} __packed;
3637
3638struct wcn36xx_hal_add_sta_self_rsp_msg {
3639 struct wcn36xx_hal_msg_header header;
3640
3641 /* success or failure */
3642 u32 status;
3643
3644 /* Self STA Index */
3645 u8 self_sta_index;
3646
3647 /* DPU Index (IGTK, PTK, GTK all same) */
3648 u8 dpu_index;
3649
3650 /* DPU Signature */
3651 u8 dpu_signature;
3652} __packed;
3653
3654struct wcn36xx_hal_del_sta_self_req_msg {
3655 struct wcn36xx_hal_msg_header header;
3656
3657 u8 self_addr[ETH_ALEN];
3658} __packed;
3659
3660struct wcn36xx_hal_del_sta_self_rsp_msg {
3661 struct wcn36xx_hal_msg_header header;
3662
3663 /*success or failure */
3664 u32 status;
3665
3666 u8 self_addr[ETH_ALEN];
3667} __packed;
3668
3669struct aggr_add_ts_req {
3670 struct wcn36xx_hal_msg_header header;
3671
3672 /* Station Index */
3673 u16 sta_idx;
3674
3675 /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS.
3676 * This will carry the bitmap with the bit positions representing
3677 * different AC.s */
3678 u16 tspec_index;
3679
3680 /* Tspec info per AC To program TPE with required parameters */
3681 struct wcn36xx_hal_tspec_ie tspec[WCN36XX_HAL_MAX_AC];
3682
3683 /* U-APSD Flags: 1b per AC. Encoded as follows:
3684 b7 b6 b5 b4 b3 b2 b1 b0 =
3685 X X X X BE BK VI VO */
3686 u8 uapsd;
3687
3688 /* These parameters are for all the access categories */
3689
3690 /* Service Interval */
3691 u32 service_interval[WCN36XX_HAL_MAX_AC];
3692
3693 /* Suspend Interval */
3694 u32 suspend_interval[WCN36XX_HAL_MAX_AC];
3695
3696 /* Delay Interval */
3697 u32 delay_interval[WCN36XX_HAL_MAX_AC];
3698};
3699
3700struct aggr_add_ts_rsp_msg {
3701 struct wcn36xx_hal_msg_header header;
3702
3703 /* success or failure */
3704 u32 status0;
3705
3706 /* FIXME PRIMA for future use for 11R */
3707 u32 status1;
3708};
3709
3710struct wcn36xx_hal_configure_apps_cpu_wakeup_state_req_msg {
3711 struct wcn36xx_hal_msg_header header;
3712
3713 u8 is_apps_cpu_awake;
3714};
3715
3716struct wcn36xx_hal_configure_apps_cpu_wakeup_state_rsp_msg {
3717 struct wcn36xx_hal_msg_header header;
3718
3719 /* success or failure */
3720 u32 status;
3721};
3722
3723struct wcn36xx_hal_dump_cmd_req_msg {
3724 struct wcn36xx_hal_msg_header header;
3725
3726 u32 arg1;
3727 u32 arg2;
3728 u32 arg3;
3729 u32 arg4;
3730 u32 arg5;
3731} __packed;
3732
3733struct wcn36xx_hal_dump_cmd_rsp_msg {
3734 struct wcn36xx_hal_msg_header header;
3735
3736 /* success or failure */
3737 u32 status;
3738
3739 /* Length of the responce message */
3740 u32 rsp_length;
3741
3742 /* FIXME: Currently considering the the responce will be less than
3743 * 100bytes */
3744 u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
3745} __packed;
3746
3747#define WLAN_COEX_IND_DATA_SIZE (4)
3748#define WLAN_COEX_IND_TYPE_DISABLE_HB_MONITOR (0)
3749#define WLAN_COEX_IND_TYPE_ENABLE_HB_MONITOR (1)
3750
3751struct coex_ind_msg {
3752 struct wcn36xx_hal_msg_header header;
3753
3754 /* Coex Indication Type */
3755 u32 type;
3756
3757 /* Coex Indication Data */
3758 u32 data[WLAN_COEX_IND_DATA_SIZE];
3759};
3760
3761struct wcn36xx_hal_tx_compl_ind_msg {
3762 struct wcn36xx_hal_msg_header header;
3763
3764 /* Tx Complete Indication Success or Failure */
3765 u32 status;
3766};
3767
3768struct wcn36xx_hal_wlan_host_suspend_ind_msg {
3769 struct wcn36xx_hal_msg_header header;
3770
3771 u32 configured_mcst_bcst_filter_setting;
3772 u32 active_session_count;
3773};
3774
3775struct wcn36xx_hal_wlan_exclude_unencrpted_ind_msg {
3776 struct wcn36xx_hal_msg_header header;
3777
3778 u8 dot11_exclude_unencrypted;
3779 u8 bssid[ETH_ALEN];
3780};
3781
3782struct noa_attr_ind_msg {
3783 struct wcn36xx_hal_msg_header header;
3784
3785 u8 index;
3786 u8 opp_ps_flag;
3787 u16 ctwin;
3788
3789 u16 noa1_interval_count;
3790 u16 bss_index;
3791 u32 noa1_duration;
3792 u32 noa1_interval;
3793 u32 noa1_starttime;
3794
3795 u16 noa2_interval_count;
3796 u16 reserved2;
3797 u32 noa2_duration;
3798 u32 noa2_interval;
3799 u32 noa2_start_time;
3800
3801 u32 status;
3802};
3803
3804struct noa_start_ind_msg {
3805 struct wcn36xx_hal_msg_header header;
3806
3807 u32 status;
3808 u32 bss_index;
3809};
3810
3811struct wcn36xx_hal_wlan_host_resume_req_msg {
3812 struct wcn36xx_hal_msg_header header;
3813
3814 u8 configured_mcst_bcst_filter_setting;
3815};
3816
3817struct wcn36xx_hal_host_resume_rsp_msg {
3818 struct wcn36xx_hal_msg_header header;
3819
3820 /* success or failure */
3821 u32 status;
3822};
3823
3824struct wcn36xx_hal_del_ba_ind_msg {
3825 struct wcn36xx_hal_msg_header header;
3826
3827 u16 sta_idx;
3828
3829 /* Peer MAC Address, whose BA session has timed out */
3830 u8 peer_addr[ETH_ALEN];
3831
3832 /* TID for which a BA session timeout is being triggered */
3833 u8 ba_tid;
3834
3835 /* DELBA direction
3836 * 1 - Originator
3837 * 0 - Recipient
3838 */
3839 u8 direction;
3840
3841 u32 reason_code;
3842
3843 /* TO SUPPORT BT-AMP */
3844 u8 bssid[ETH_ALEN];
3845};
3846
3847/* PNO Messages */
3848
3849/* Max number of channels that a network can be found on */
3850#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS 26
3851
3852/* Max number of channels that a network can be found on */
3853#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX 60
3854
3855/* Maximum numbers of networks supported by PNO */
3856#define WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS 16
3857
3858/* The number of scan time intervals that can be programmed into PNO */
3859#define WCN36XX_HAL_PNO_MAX_SCAN_TIMERS 10
3860
3861/* Maximum size of the probe template */
3862#define WCN36XX_HAL_PNO_MAX_PROBE_SIZE 450
3863
3864/* Type of PNO enabling:
3865 *
3866 * Immediate - scanning will start immediately and PNO procedure will be
3867 * repeated based on timer
3868 *
3869 * Suspend - scanning will start at suspend
3870 *
3871 * Resume - scanning will start on system resume
3872 */
3873enum pno_mode {
3874 PNO_MODE_IMMEDIATE,
3875 PNO_MODE_ON_SUSPEND,
3876 PNO_MODE_ON_RESUME,
3877 PNO_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
3878};
3879
3880/* Authentication type */
3881enum auth_type {
3882 AUTH_TYPE_ANY = 0,
3883 AUTH_TYPE_OPEN_SYSTEM = 1,
3884
3885 /* Upper layer authentication types */
3886 AUTH_TYPE_WPA = 2,
3887 AUTH_TYPE_WPA_PSK = 3,
3888
3889 AUTH_TYPE_RSN = 4,
3890 AUTH_TYPE_RSN_PSK = 5,
3891 AUTH_TYPE_FT_RSN = 6,
3892 AUTH_TYPE_FT_RSN_PSK = 7,
3893 AUTH_TYPE_WAPI_WAI_CERTIFICATE = 8,
3894 AUTH_TYPE_WAPI_WAI_PSK = 9,
3895
3896 AUTH_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
3897};
3898
3899/* Encryption type */
3900enum ed_type {
3901 ED_ANY = 0,
3902 ED_NONE = 1,
3903 ED_WEP = 2,
3904 ED_TKIP = 3,
3905 ED_CCMP = 4,
3906 ED_WPI = 5,
3907
3908 ED_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
3909};
3910
3911/* SSID broadcast type */
3912enum ssid_bcast_type {
3913 BCAST_UNKNOWN = 0,
3914 BCAST_NORMAL = 1,
3915 BCAST_HIDDEN = 2,
3916
3917 BCAST_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
3918};
3919
3920/* The network description for which PNO will have to look for */
3921struct network_type {
3922 /* SSID of the BSS */
3923 struct wcn36xx_hal_mac_ssid ssid;
3924
3925 /* Authentication type for the network */
3926 enum auth_type authentication;
3927
3928 /* Encryption type for the network */
3929 enum ed_type encryption;
3930
3931 /* Indicate the channel on which the Network can be found 0 - if
3932 * all channels */
3933 u8 channel_count;
3934 u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
3935
3936 /* Indicates the RSSI threshold for the network to be considered */
3937 u8 rssi_threshold;
3938};
3939
3940struct scan_timer {
3941 /* How much it should wait */
3942 u32 value;
3943
3944 /* How many times it should repeat that wait value 0 - keep using
3945 * this timer until PNO is disabled */
3946 u32 repeat;
3947
3948 /* e.g: 2 3 4 0 - it will wait 2s between consecutive scans for 3
3949 * times - after that it will wait 4s between consecutive scans
3950 * until disabled */
3951};
3952
3953/* The network parameters to be sent to the PNO algorithm */
3954struct scan_timers_type {
3955 /* set to 0 if you wish for PNO to use its default telescopic timer */
3956 u8 count;
3957
3958 /* A set value represents the amount of time that PNO will wait
3959 * between two consecutive scan procedures If the desired is for a
3960 * uniform timer that fires always at the exact same interval - one
3961 * single value is to be set If there is a desire for a more
3962 * complex - telescopic like timer multiple values can be set -
3963 * once PNO reaches the end of the array it will continue scanning
3964 * at intervals presented by the last value */
3965 struct scan_timer values[WCN36XX_HAL_PNO_MAX_SCAN_TIMERS];
3966};
3967
3968/* Preferred network list request */
3969struct set_pref_netw_list_req {
3970 struct wcn36xx_hal_msg_header header;
3971
3972 /* Enable PNO */
3973 u32 enable;
3974
3975 /* Immediate, On Suspend, On Resume */
3976 enum pno_mode mode;
3977
3978 /* Number of networks sent for PNO */
3979 u32 networks_count;
3980
3981 /* The networks that PNO needs to look for */
3982 struct network_type networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
3983
3984 /* The scan timers required for PNO */
3985 struct scan_timers_type scan_timers;
3986
3987 /* Probe template for 2.4GHz band */
3988 u16 band_24g_probe_size;
3989 u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
3990
3991 /* Probe template for 5GHz band */
3992 u16 band_5g_probe_size;
3993 u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
3994};
3995
3996/* The network description for which PNO will have to look for */
3997struct network_type_new {
3998 /* SSID of the BSS */
3999 struct wcn36xx_hal_mac_ssid ssid;
4000
4001 /* Authentication type for the network */
4002 enum auth_type authentication;
4003
4004 /* Encryption type for the network */
4005 enum ed_type encryption;
4006
4007 /* SSID broadcast type, normal, hidden or unknown */
4008 enum ssid_bcast_type bcast_network_type;
4009
4010 /* Indicate the channel on which the Network can be found 0 - if
4011 * all channels */
4012 u8 channel_count;
4013 u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
4014
4015 /* Indicates the RSSI threshold for the network to be considered */
4016 u8 rssi_threshold;
4017};
4018
4019/* Preferred network list request new */
4020struct set_pref_netw_list_req_new {
4021 struct wcn36xx_hal_msg_header header;
4022
4023 /* Enable PNO */
4024 u32 enable;
4025
4026 /* Immediate, On Suspend, On Resume */
4027 enum pno_mode mode;
4028
4029 /* Number of networks sent for PNO */
4030 u32 networks_count;
4031
4032 /* The networks that PNO needs to look for */
4033 struct network_type_new networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
4034
4035 /* The scan timers required for PNO */
4036 struct scan_timers_type scan_timers;
4037
4038 /* Probe template for 2.4GHz band */
4039 u16 band_24g_probe_size;
4040 u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
4041
4042 /* Probe template for 5GHz band */
4043 u16 band_5g_probe_size;
4044 u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
4045};
4046
4047/* Preferred network list response */
4048struct set_pref_netw_list_resp {
4049 struct wcn36xx_hal_msg_header header;
4050
4051 /* status of the request - just to indicate that PNO has
4052 * acknowledged the request and will start scanning */
4053 u32 status;
4054};
4055
4056/* Preferred network found indication */
4057struct pref_netw_found_ind {
4058
4059 struct wcn36xx_hal_msg_header header;
4060
4061 /* Network that was found with the highest RSSI */
4062 struct wcn36xx_hal_mac_ssid ssid;
4063
4064 /* Indicates the RSSI */
4065 u8 rssi;
4066};
4067
4068/* RSSI Filter request */
4069struct set_rssi_filter_req {
4070 struct wcn36xx_hal_msg_header header;
4071
4072 /* RSSI Threshold */
4073 u8 rssi_threshold;
4074};
4075
4076/* Set RSSI filter resp */
4077struct set_rssi_filter_resp {
4078 struct wcn36xx_hal_msg_header header;
4079
4080 /* status of the request */
4081 u32 status;
4082};
4083
4084/* Update scan params - sent from host to PNO to be used during PNO
4085 * scanningx */
4086struct wcn36xx_hal_update_scan_params_req {
4087
4088 struct wcn36xx_hal_msg_header header;
4089
4090 /* Host setting for 11d */
4091 u8 dot11d_enabled;
4092
4093 /* Lets PNO know that host has determined the regulatory domain */
4094 u8 dot11d_resolved;
4095
4096 /* Channels on which PNO is allowed to scan */
4097 u8 channel_count;
4098 u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
4099
4100 /* Minimum channel time */
4101 u16 active_min_ch_time;
4102
4103 /* Maximum channel time */
4104 u16 active_max_ch_time;
4105
4106 /* Minimum channel time */
4107 u16 passive_min_ch_time;
4108
4109 /* Maximum channel time */
4110 u16 passive_max_ch_time;
4111
4112 /* Cb State */
4113 enum phy_chan_bond_state state;
4114} __packed;
4115
4116/* Update scan params - sent from host to PNO to be used during PNO
4117 * scanningx */
4118struct update_scan_params_req_ex {
4119
4120 struct wcn36xx_hal_msg_header header;
4121
4122 /* Host setting for 11d */
4123 u8 dot11d_enabled;
4124
4125 /* Lets PNO know that host has determined the regulatory domain */
4126 u8 dot11d_resolved;
4127
4128 /* Channels on which PNO is allowed to scan */
4129 u8 channel_count;
4130 u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
4131
4132 /* Minimum channel time */
4133 u16 active_min_ch_time;
4134
4135 /* Maximum channel time */
4136 u16 active_max_ch_time;
4137
4138 /* Minimum channel time */
4139 u16 passive_min_ch_time;
4140
4141 /* Maximum channel time */
4142 u16 passive_max_ch_time;
4143
4144 /* Cb State */
4145 enum phy_chan_bond_state state;
4146};
4147
4148/* Update scan params - sent from host to PNO to be used during PNO
4149 * scanningx */
4150struct wcn36xx_hal_update_scan_params_resp {
4151
4152 struct wcn36xx_hal_msg_header header;
4153
4154 /* status of the request */
4155 u32 status;
4156} __packed;
4157
4158struct wcn36xx_hal_set_tx_per_tracking_req_msg {
4159 struct wcn36xx_hal_msg_header header;
4160
4161 /* 0: disable, 1:enable */
4162 u8 tx_per_tracking_enable;
4163
4164 /* Check period, unit is sec. */
4165 u8 tx_per_tracking_period;
4166
4167 /* (Fail TX packet)/(Total TX packet) ratio, the unit is 10%. */
4168 u8 tx_per_tracking_ratio;
4169
4170 /* A watermark of check number, once the tx packet exceed this
4171 * number, we do the check, default is 5 */
4172 u32 tx_per_tracking_watermark;
4173};
4174
4175struct wcn36xx_hal_set_tx_per_tracking_rsp_msg {
4176 struct wcn36xx_hal_msg_header header;
4177
4178 /* success or failure */
4179 u32 status;
4180
4181};
4182
4183struct tx_per_hit_ind_msg {
4184 struct wcn36xx_hal_msg_header header;
4185};
4186
4187/* Packet Filtering Definitions Begin */
4188#define WCN36XX_HAL_PROTOCOL_DATA_LEN 8
4189#define WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS 240
4190#define WCN36XX_HAL_MAX_NUM_FILTERS 20
4191#define WCN36XX_HAL_MAX_CMP_PER_FILTER 10
4192
4193enum wcn36xx_hal_receive_packet_filter_type {
4194 HAL_RCV_FILTER_TYPE_INVALID,
4195 HAL_RCV_FILTER_TYPE_FILTER_PKT,
4196 HAL_RCV_FILTER_TYPE_BUFFER_PKT,
4197 HAL_RCV_FILTER_TYPE_MAX_ENUM_SIZE
4198};
4199
4200enum wcn36xx_hal_rcv_pkt_flt_protocol_type {
4201 HAL_FILTER_PROTO_TYPE_INVALID,
4202 HAL_FILTER_PROTO_TYPE_MAC,
4203 HAL_FILTER_PROTO_TYPE_ARP,
4204 HAL_FILTER_PROTO_TYPE_IPV4,
4205 HAL_FILTER_PROTO_TYPE_IPV6,
4206 HAL_FILTER_PROTO_TYPE_UDP,
4207 HAL_FILTER_PROTO_TYPE_MAX
4208};
4209
4210enum wcn36xx_hal_rcv_pkt_flt_cmp_flag_type {
4211 HAL_FILTER_CMP_TYPE_INVALID,
4212 HAL_FILTER_CMP_TYPE_EQUAL,
4213 HAL_FILTER_CMP_TYPE_MASK_EQUAL,
4214 HAL_FILTER_CMP_TYPE_NOT_EQUAL,
4215 HAL_FILTER_CMP_TYPE_MAX
4216};
4217
4218struct wcn36xx_hal_rcv_pkt_filter_params {
4219 u8 protocol_layer;
4220 u8 cmp_flag;
4221
4222 /* Length of the data to compare */
4223 u16 data_length;
4224
4225 /* from start of the respective frame header */
4226 u8 data_offset;
4227
4228 /* Reserved field */
4229 u8 reserved;
4230
4231 /* Data to compare */
4232 u8 compare_data[WCN36XX_HAL_PROTOCOL_DATA_LEN];
4233
4234 /* Mask to be applied on the received packet data before compare */
4235 u8 data_mask[WCN36XX_HAL_PROTOCOL_DATA_LEN];
4236};
4237
4238struct wcn36xx_hal_sessionized_rcv_pkt_filter_cfg_type {
4239 u8 id;
4240 u8 type;
4241 u8 params_count;
4242 u32 coleasce_time;
4243 u8 bss_index;
4244 struct wcn36xx_hal_rcv_pkt_filter_params params[1];
4245};
4246
4247struct wcn36xx_hal_set_rcv_pkt_filter_req_msg {
4248 struct wcn36xx_hal_msg_header header;
4249
4250 u8 id;
4251 u8 type;
4252 u8 params_count;
4253 u32 coalesce_time;
4254 struct wcn36xx_hal_rcv_pkt_filter_params params[1];
4255};
4256
4257struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
4258 /* from start of the respective frame header */
4259 u8 data_offset;
4260
4261 u32 mc_addr_count;
4262 u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
4263 u8 bss_index;
4264};
4265
4266struct wcn36xx_hal_set_pkt_filter_rsp_msg {
4267 struct wcn36xx_hal_msg_header header;
4268
4269 /* success or failure */
4270 u32 status;
4271
4272 u8 bss_index;
4273};
4274
4275struct wcn36xx_hal_rcv_flt_pkt_match_cnt_req_msg {
4276 struct wcn36xx_hal_msg_header header;
4277
4278 u8 bss_index;
4279};
4280
4281struct wcn36xx_hal_rcv_flt_pkt_match_cnt {
4282 u8 id;
4283 u32 match_cnt;
4284};
4285
4286struct wcn36xx_hal_rcv_flt_pkt_match_cnt_rsp_msg {
4287 struct wcn36xx_hal_msg_header header;
4288
4289 /* Success or Failure */
4290 u32 status;
4291
4292 u32 match_count;
4293 struct wcn36xx_hal_rcv_flt_pkt_match_cnt
4294 matches[WCN36XX_HAL_MAX_NUM_FILTERS];
4295 u8 bss_index;
4296};
4297
4298struct wcn36xx_hal_rcv_flt_pkt_clear_param {
4299 /* only valid for response message */
4300 u32 status;
4301 u8 id;
4302 u8 bss_index;
4303};
4304
4305struct wcn36xx_hal_rcv_flt_pkt_clear_req_msg {
4306 struct wcn36xx_hal_msg_header header;
4307 struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
4308};
4309
4310struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
4311 struct wcn36xx_hal_msg_header header;
4312 struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
4313};
4314
4315struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
4316 struct wcn36xx_hal_msg_header header;
4317 struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
4318};
4319
4320struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
4321 struct wcn36xx_hal_msg_header header;
4322 u32 status;
4323 u8 bss_index;
4324};
4325
4326/* Packet Filtering Definitions End */
4327
4328struct wcn36xx_hal_set_power_params_req_msg {
4329 struct wcn36xx_hal_msg_header header;
4330
4331 /* Ignore DTIM */
4332 u32 ignore_dtim;
4333
4334 /* DTIM Period */
4335 u32 dtim_period;
4336
4337 /* Listen Interval */
4338 u32 listen_interval;
4339
4340 /* Broadcast Multicast Filter */
4341 u32 bcast_mcast_filter;
4342
4343 /* Beacon Early Termination */
4344 u32 enable_bet;
4345
4346 /* Beacon Early Termination Interval */
4347 u32 bet_interval;
4348} __packed;
4349
4350struct wcn36xx_hal_set_power_params_resp {
4351
4352 struct wcn36xx_hal_msg_header header;
4353
4354 /* status of the request */
4355 u32 status;
4356} __packed;
4357
4358/* Capability bitmap exchange definitions and macros starts */
4359
4360enum place_holder_in_cap_bitmap {
4361 MCC = 0,
4362 P2P = 1,
4363 DOT11AC = 2,
4364 SLM_SESSIONIZATION = 3,
4365 DOT11AC_OPMODE = 4,
4366 SAP32STA = 5,
4367 TDLS = 6,
4368 P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
4369 WLANACTIVE_OFFLOAD = 8,
4370 BEACON_OFFLOAD = 9,
4371 SCAN_OFFLOAD = 10,
4372 ROAM_OFFLOAD = 11,
4373 BCN_MISS_OFFLOAD = 12,
4374 STA_POWERSAVE = 13,
4375 STA_ADVANCED_PWRSAVE = 14,
4376 AP_UAPSD = 15,
4377 AP_DFS = 16,
4378 BLOCKACK = 17,
4379 PHY_ERR = 18,
4380 BCN_FILTER = 19,
4381 RTT = 20,
4382 RATECTRL = 21,
4383 WOW = 22,
4384 MAX_FEATURE_SUPPORTED = 128,
4385};
4386
4387struct wcn36xx_hal_feat_caps_msg {
4388
4389 struct wcn36xx_hal_msg_header header;
4390
4391 u32 feat_caps[4];
4392} __packed;
4393
4394/* status codes to help debug rekey failures */
4395enum gtk_rekey_status {
4396 WCN36XX_HAL_GTK_REKEY_STATUS_SUCCESS = 0,
4397
4398 /* rekey detected, but not handled */
4399 WCN36XX_HAL_GTK_REKEY_STATUS_NOT_HANDLED = 1,
4400
4401 /* MIC check error on M1 */
4402 WCN36XX_HAL_GTK_REKEY_STATUS_MIC_ERROR = 2,
4403
4404 /* decryption error on M1 */
4405 WCN36XX_HAL_GTK_REKEY_STATUS_DECRYPT_ERROR = 3,
4406
4407 /* M1 replay detected */
4408 WCN36XX_HAL_GTK_REKEY_STATUS_REPLAY_ERROR = 4,
4409
4410 /* missing GTK key descriptor in M1 */
4411 WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_KDE = 5,
4412
4413 /* missing iGTK key descriptor in M1 */
4414 WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_IGTK_KDE = 6,
4415
4416 /* key installation error */
4417 WCN36XX_HAL_GTK_REKEY_STATUS_INSTALL_ERROR = 7,
4418
4419 /* iGTK key installation error */
4420 WCN36XX_HAL_GTK_REKEY_STATUS_IGTK_INSTALL_ERROR = 8,
4421
4422 /* GTK rekey M2 response TX error */
4423 WCN36XX_HAL_GTK_REKEY_STATUS_RESP_TX_ERROR = 9,
4424
4425 /* non-specific general error */
4426 WCN36XX_HAL_GTK_REKEY_STATUS_GEN_ERROR = 255
4427};
4428
4429/* wake reason types */
4430enum wake_reason_type {
4431 WCN36XX_HAL_WAKE_REASON_NONE = 0,
4432
4433 /* magic packet match */
4434 WCN36XX_HAL_WAKE_REASON_MAGIC_PACKET = 1,
4435
4436 /* host defined pattern match */
4437 WCN36XX_HAL_WAKE_REASON_PATTERN_MATCH = 2,
4438
4439 /* EAP-ID frame detected */
4440 WCN36XX_HAL_WAKE_REASON_EAPID_PACKET = 3,
4441
4442 /* start of EAPOL 4-way handshake detected */
4443 WCN36XX_HAL_WAKE_REASON_EAPOL4WAY_PACKET = 4,
4444
4445 /* network scan offload match */
4446 WCN36XX_HAL_WAKE_REASON_NETSCAN_OFFL_MATCH = 5,
4447
4448 /* GTK rekey status wakeup (see status) */
4449 WCN36XX_HAL_WAKE_REASON_GTK_REKEY_STATUS = 6,
4450
4451 /* BSS connection lost */
4452 WCN36XX_HAL_WAKE_REASON_BSS_CONN_LOST = 7,
4453};
4454
4455/*
4456 Wake Packet which is saved at tWakeReasonParams.DataStart
4457 This data is sent for any wake reasons that involve a packet-based wakeup :
4458
4459 WCN36XX_HAL_WAKE_REASON_TYPE_MAGIC_PACKET
4460 WCN36XX_HAL_WAKE_REASON_TYPE_PATTERN_MATCH
4461 WCN36XX_HAL_WAKE_REASON_TYPE_EAPID_PACKET
4462 WCN36XX_HAL_WAKE_REASON_TYPE_EAPOL4WAY_PACKET
4463 WCN36XX_HAL_WAKE_REASON_TYPE_GTK_REKEY_STATUS
4464
4465 The information is provided to the host for auditing and debug purposes
4466
4467*/
4468
4469/* Wake reason indication */
4470struct wcn36xx_hal_wake_reason_ind {
4471 struct wcn36xx_hal_msg_header header;
4472
4473 /* see tWakeReasonType */
4474 u32 reason;
4475
4476 /* argument specific to the reason type */
4477 u32 reason_arg;
4478
4479 /* length of optional data stored in this message, in case HAL
4480 * truncates the data (i.e. data packets) this length will be less
4481 * than the actual length */
4482 u32 stored_data_len;
4483
4484 /* actual length of data */
4485 u32 actual_data_len;
4486
4487 /* variable length start of data (length == storedDataLen) see
4488 * specific wake type */
4489 u8 data_start[1];
4490
4491 u32 bss_index:8;
4492 u32 reserved:24;
4493};
4494
4495#define WCN36XX_HAL_GTK_KEK_BYTES 16
4496#define WCN36XX_HAL_GTK_KCK_BYTES 16
4497
4498#define WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE (1 << 0)
4499
4500#define GTK_SET_BSS_KEY_TAG 0x1234AA55
4501
4502struct wcn36xx_hal_gtk_offload_req_msg {
4503 struct wcn36xx_hal_msg_header header;
4504
4505 /* optional flags */
4506 u32 flags;
4507
4508 /* Key confirmation key */
4509 u8 kck[WCN36XX_HAL_GTK_KCK_BYTES];
4510
4511 /* key encryption key */
4512 u8 kek[WCN36XX_HAL_GTK_KEK_BYTES];
4513
4514 /* replay counter */
4515 u64 key_replay_counter;
4516
4517 u8 bss_index;
4518};
4519
4520struct wcn36xx_hal_gtk_offload_rsp_msg {
4521 struct wcn36xx_hal_msg_header header;
4522
4523 /* success or failure */
4524 u32 status;
4525
4526 u8 bss_index;
4527};
4528
4529struct wcn36xx_hal_gtk_offload_get_info_req_msg {
4530 struct wcn36xx_hal_msg_header header;
4531 u8 bss_index;
4532};
4533
4534struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
4535 struct wcn36xx_hal_msg_header header;
4536
4537 /* success or failure */
4538 u32 status;
4539
4540 /* last rekey status when the rekey was offloaded */
4541 u32 last_rekey_status;
4542
4543 /* current replay counter value */
4544 u64 key_replay_counter;
4545
4546 /* total rekey attempts */
4547 u32 total_rekey_count;
4548
4549 /* successful GTK rekeys */
4550 u32 gtk_rekey_count;
4551
4552 /* successful iGTK rekeys */
4553 u32 igtk_rekey_count;
4554
4555 u8 bss_index;
4556};
4557
4558struct dhcp_info {
4559 /* Indicates the device mode which indicates about the DHCP activity */
4560 u8 device_mode;
4561
4562 u8 addr[ETH_ALEN];
4563};
4564
4565struct dhcp_ind_status {
4566 struct wcn36xx_hal_msg_header header;
4567
4568 /* success or failure */
4569 u32 status;
4570};
4571
4572/*
4573 * Thermal Mitigation mode of operation.
4574 *
4575 * WCN36XX_HAL_THERMAL_MITIGATION_MODE_0 - Based on AMPDU disabling aggregation
4576 *
4577 * WCN36XX_HAL_THERMAL_MITIGATION_MODE_1 - Based on AMPDU disabling aggregation
4578 * and reducing transmit power
4579 *
4580 * WCN36XX_HAL_THERMAL_MITIGATION_MODE_2 - Not supported */
4581enum wcn36xx_hal_thermal_mitigation_mode_type {
4582 HAL_THERMAL_MITIGATION_MODE_INVALID = -1,
4583 HAL_THERMAL_MITIGATION_MODE_0,
4584 HAL_THERMAL_MITIGATION_MODE_1,
4585 HAL_THERMAL_MITIGATION_MODE_2,
4586 HAL_THERMAL_MITIGATION_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
4587};
4588
4589
4590/*
4591 * Thermal Mitigation level.
4592 * Note the levels are incremental i.e WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 =
4593 * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 +
4594 * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1
4595 *
4596 * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 - lowest level of thermal mitigation.
4597 * This level indicates normal mode of operation
4598 *
4599 * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1 - 1st level of thermal mitigation
4600 *
4601 * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 - 2nd level of thermal mitigation
4602 *
4603 * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_3 - 3rd level of thermal mitigation
4604 *
4605 * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_4 - 4th level of thermal mitigation
4606 */
4607enum wcn36xx_hal_thermal_mitigation_level_type {
4608 HAL_THERMAL_MITIGATION_LEVEL_INVALID = -1,
4609 HAL_THERMAL_MITIGATION_LEVEL_0,
4610 HAL_THERMAL_MITIGATION_LEVEL_1,
4611 HAL_THERMAL_MITIGATION_LEVEL_2,
4612 HAL_THERMAL_MITIGATION_LEVEL_3,
4613 HAL_THERMAL_MITIGATION_LEVEL_4,
4614 HAL_THERMAL_MITIGATION_LEVEL_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
4615};
4616
4617
4618/* WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ */
4619struct set_thermal_mitigation_req_msg {
4620 struct wcn36xx_hal_msg_header header;
4621
4622 /* Thermal Mitigation Operation Mode */
4623 enum wcn36xx_hal_thermal_mitigation_mode_type mode;
4624
4625 /* Thermal Mitigation Level */
4626 enum wcn36xx_hal_thermal_mitigation_level_type level;
4627};
4628
4629struct set_thermal_mitigation_resp {
4630
4631 struct wcn36xx_hal_msg_header header;
4632
4633 /* status of the request */
4634 u32 status;
4635};
4636
4637/* Per STA Class B Statistics. Class B statistics are STA TX/RX stats
4638 * provided to FW from Host via periodic messages */
4639struct stats_class_b_ind {
4640 struct wcn36xx_hal_msg_header header;
4641
4642 /* Duration over which this stats was collected */
4643 u32 duration;
4644
4645 /* Per STA Stats */
4646
4647 /* TX stats */
4648 u32 tx_bytes_pushed;
4649 u32 tx_packets_pushed;
4650
4651 /* RX stats */
4652 u32 rx_bytes_rcvd;
4653 u32 rx_packets_rcvd;
4654 u32 rx_time_total;
4655};
4656
4657#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
new file mode 100644
index 000000000000..7839b31e4826
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -0,0 +1,1036 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include "wcn36xx.h"
22
23unsigned int wcn36xx_dbg_mask;
24module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
25MODULE_PARM_DESC(debug_mask, "Debugging mask");
26
27#define CHAN2G(_freq, _idx) { \
28 .band = IEEE80211_BAND_2GHZ, \
29 .center_freq = (_freq), \
30 .hw_value = (_idx), \
31 .max_power = 25, \
32}
33
34#define CHAN5G(_freq, _idx) { \
35 .band = IEEE80211_BAND_5GHZ, \
36 .center_freq = (_freq), \
37 .hw_value = (_idx), \
38 .max_power = 25, \
39}
40
41/* The wcn firmware expects channel values to matching
42 * their mnemonic values. So use these for .hw_value. */
43static struct ieee80211_channel wcn_2ghz_channels[] = {
44 CHAN2G(2412, 1), /* Channel 1 */
45 CHAN2G(2417, 2), /* Channel 2 */
46 CHAN2G(2422, 3), /* Channel 3 */
47 CHAN2G(2427, 4), /* Channel 4 */
48 CHAN2G(2432, 5), /* Channel 5 */
49 CHAN2G(2437, 6), /* Channel 6 */
50 CHAN2G(2442, 7), /* Channel 7 */
51 CHAN2G(2447, 8), /* Channel 8 */
52 CHAN2G(2452, 9), /* Channel 9 */
53 CHAN2G(2457, 10), /* Channel 10 */
54 CHAN2G(2462, 11), /* Channel 11 */
55 CHAN2G(2467, 12), /* Channel 12 */
56 CHAN2G(2472, 13), /* Channel 13 */
57 CHAN2G(2484, 14) /* Channel 14 */
58
59};
60
61static struct ieee80211_channel wcn_5ghz_channels[] = {
62 CHAN5G(5180, 36),
63 CHAN5G(5200, 40),
64 CHAN5G(5220, 44),
65 CHAN5G(5240, 48),
66 CHAN5G(5260, 52),
67 CHAN5G(5280, 56),
68 CHAN5G(5300, 60),
69 CHAN5G(5320, 64),
70 CHAN5G(5500, 100),
71 CHAN5G(5520, 104),
72 CHAN5G(5540, 108),
73 CHAN5G(5560, 112),
74 CHAN5G(5580, 116),
75 CHAN5G(5600, 120),
76 CHAN5G(5620, 124),
77 CHAN5G(5640, 128),
78 CHAN5G(5660, 132),
79 CHAN5G(5700, 140),
80 CHAN5G(5745, 149),
81 CHAN5G(5765, 153),
82 CHAN5G(5785, 157),
83 CHAN5G(5805, 161),
84 CHAN5G(5825, 165)
85};
86
87#define RATE(_bitrate, _hw_rate, _flags) { \
88 .bitrate = (_bitrate), \
89 .flags = (_flags), \
90 .hw_value = (_hw_rate), \
91 .hw_value_short = (_hw_rate) \
92}
93
94static struct ieee80211_rate wcn_2ghz_rates[] = {
95 RATE(10, HW_RATE_INDEX_1MBPS, 0),
96 RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
97 RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
98 RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
99 RATE(60, HW_RATE_INDEX_6MBPS, 0),
100 RATE(90, HW_RATE_INDEX_9MBPS, 0),
101 RATE(120, HW_RATE_INDEX_12MBPS, 0),
102 RATE(180, HW_RATE_INDEX_18MBPS, 0),
103 RATE(240, HW_RATE_INDEX_24MBPS, 0),
104 RATE(360, HW_RATE_INDEX_36MBPS, 0),
105 RATE(480, HW_RATE_INDEX_48MBPS, 0),
106 RATE(540, HW_RATE_INDEX_54MBPS, 0)
107};
108
109static struct ieee80211_rate wcn_5ghz_rates[] = {
110 RATE(60, HW_RATE_INDEX_6MBPS, 0),
111 RATE(90, HW_RATE_INDEX_9MBPS, 0),
112 RATE(120, HW_RATE_INDEX_12MBPS, 0),
113 RATE(180, HW_RATE_INDEX_18MBPS, 0),
114 RATE(240, HW_RATE_INDEX_24MBPS, 0),
115 RATE(360, HW_RATE_INDEX_36MBPS, 0),
116 RATE(480, HW_RATE_INDEX_48MBPS, 0),
117 RATE(540, HW_RATE_INDEX_54MBPS, 0)
118};
119
120static struct ieee80211_supported_band wcn_band_2ghz = {
121 .channels = wcn_2ghz_channels,
122 .n_channels = ARRAY_SIZE(wcn_2ghz_channels),
123 .bitrates = wcn_2ghz_rates,
124 .n_bitrates = ARRAY_SIZE(wcn_2ghz_rates),
125 .ht_cap = {
126 .cap = IEEE80211_HT_CAP_GRN_FLD |
127 IEEE80211_HT_CAP_SGI_20 |
128 IEEE80211_HT_CAP_DSSSCCK40 |
129 IEEE80211_HT_CAP_LSIG_TXOP_PROT,
130 .ht_supported = true,
131 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
132 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
133 .mcs = {
134 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
135 .rx_highest = cpu_to_le16(72),
136 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
137 }
138 }
139};
140
141static struct ieee80211_supported_band wcn_band_5ghz = {
142 .channels = wcn_5ghz_channels,
143 .n_channels = ARRAY_SIZE(wcn_5ghz_channels),
144 .bitrates = wcn_5ghz_rates,
145 .n_bitrates = ARRAY_SIZE(wcn_5ghz_rates),
146 .ht_cap = {
147 .cap = IEEE80211_HT_CAP_GRN_FLD |
148 IEEE80211_HT_CAP_SGI_20 |
149 IEEE80211_HT_CAP_DSSSCCK40 |
150 IEEE80211_HT_CAP_LSIG_TXOP_PROT |
151 IEEE80211_HT_CAP_SGI_40 |
152 IEEE80211_HT_CAP_SUP_WIDTH_20_40,
153 .ht_supported = true,
154 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
155 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
156 .mcs = {
157 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
158 .rx_highest = cpu_to_le16(72),
159 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
160 }
161 }
162};
163
164#ifdef CONFIG_PM
165
166static const struct wiphy_wowlan_support wowlan_support = {
167 .flags = WIPHY_WOWLAN_ANY
168};
169
170#endif
171
172static inline u8 get_sta_index(struct ieee80211_vif *vif,
173 struct wcn36xx_sta *sta_priv)
174{
175 return NL80211_IFTYPE_STATION == vif->type ?
176 sta_priv->bss_sta_index :
177 sta_priv->sta_index;
178}
179
180static int wcn36xx_start(struct ieee80211_hw *hw)
181{
182 struct wcn36xx *wcn = hw->priv;
183 int ret;
184
185 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
186
187 /* SMD initialization */
188 ret = wcn36xx_smd_open(wcn);
189 if (ret) {
190 wcn36xx_err("Failed to open smd channel: %d\n", ret);
191 goto out_err;
192 }
193
194 /* Allocate memory pools for Mgmt BD headers and Data BD headers */
195 ret = wcn36xx_dxe_allocate_mem_pools(wcn);
196 if (ret) {
197 wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
198 goto out_smd_close;
199 }
200
201 ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
202 if (ret) {
203 wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
204 goto out_free_dxe_pool;
205 }
206
207 wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
208 if (!wcn->hal_buf) {
209 wcn36xx_err("Failed to allocate smd buf\n");
210 ret = -ENOMEM;
211 goto out_free_dxe_ctl;
212 }
213
214 ret = wcn36xx_smd_load_nv(wcn);
215 if (ret) {
216 wcn36xx_err("Failed to push NV to chip\n");
217 goto out_free_smd_buf;
218 }
219
220 ret = wcn36xx_smd_start(wcn);
221 if (ret) {
222 wcn36xx_err("Failed to start chip\n");
223 goto out_free_smd_buf;
224 }
225
226 /* DMA channel initialization */
227 ret = wcn36xx_dxe_init(wcn);
228 if (ret) {
229 wcn36xx_err("DXE init failed\n");
230 goto out_smd_stop;
231 }
232
233 wcn36xx_debugfs_init(wcn);
234
235 if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
236 ret = wcn36xx_smd_feature_caps_exchange(wcn);
237 if (ret)
238 wcn36xx_warn("Exchange feature caps failed\n");
239 }
240 INIT_LIST_HEAD(&wcn->vif_list);
241 return 0;
242
243out_smd_stop:
244 wcn36xx_smd_stop(wcn);
245out_free_smd_buf:
246 kfree(wcn->hal_buf);
247out_free_dxe_pool:
248 wcn36xx_dxe_free_mem_pools(wcn);
249out_free_dxe_ctl:
250 wcn36xx_dxe_free_ctl_blks(wcn);
251out_smd_close:
252 wcn36xx_smd_close(wcn);
253out_err:
254 return ret;
255}
256
257static void wcn36xx_stop(struct ieee80211_hw *hw)
258{
259 struct wcn36xx *wcn = hw->priv;
260
261 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
262
263 wcn36xx_debugfs_exit(wcn);
264 wcn36xx_smd_stop(wcn);
265 wcn36xx_dxe_deinit(wcn);
266 wcn36xx_smd_close(wcn);
267
268 wcn36xx_dxe_free_mem_pools(wcn);
269 wcn36xx_dxe_free_ctl_blks(wcn);
270
271 kfree(wcn->hal_buf);
272}
273
274static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
275{
276 struct wcn36xx *wcn = hw->priv;
277 struct ieee80211_vif *vif = NULL;
278 struct wcn36xx_vif *tmp;
279
280 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
281
282 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
283 int ch = WCN36XX_HW_CHANNEL(wcn);
284 wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
285 ch);
286 list_for_each_entry(tmp, &wcn->vif_list, list) {
287 vif = container_of((void *)tmp,
288 struct ieee80211_vif,
289 drv_priv);
290 wcn36xx_smd_switch_channel(wcn, vif, ch);
291 }
292 }
293
294 return 0;
295}
296
297#define WCN36XX_SUPPORTED_FILTERS (0)
298
299static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
300 unsigned int changed,
301 unsigned int *total, u64 multicast)
302{
303 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
304
305 *total &= WCN36XX_SUPPORTED_FILTERS;
306}
307
308static void wcn36xx_tx(struct ieee80211_hw *hw,
309 struct ieee80211_tx_control *control,
310 struct sk_buff *skb)
311{
312 struct wcn36xx *wcn = hw->priv;
313 struct wcn36xx_sta *sta_priv = NULL;
314
315 if (control->sta)
316 sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
317
318 if (wcn36xx_start_tx(wcn, sta_priv, skb))
319 ieee80211_free_txskb(wcn->hw, skb);
320}
321
322static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
323 struct ieee80211_vif *vif,
324 struct ieee80211_sta *sta,
325 struct ieee80211_key_conf *key_conf)
326{
327 struct wcn36xx *wcn = hw->priv;
328 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
329 struct wcn36xx_sta *sta_priv = vif_priv->sta;
330 int ret = 0;
331 u8 key[WLAN_MAX_KEY_LEN];
332
333 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
334 wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
335 cmd, key_conf->cipher, key_conf->keyidx,
336 key_conf->keylen, key_conf->flags);
337 wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
338 key_conf->key,
339 key_conf->keylen);
340
341 switch (key_conf->cipher) {
342 case WLAN_CIPHER_SUITE_WEP40:
343 vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
344 break;
345 case WLAN_CIPHER_SUITE_WEP104:
346 vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
347 break;
348 case WLAN_CIPHER_SUITE_CCMP:
349 vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
350 break;
351 case WLAN_CIPHER_SUITE_TKIP:
352 vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
353 break;
354 default:
355 wcn36xx_err("Unsupported key type 0x%x\n",
356 key_conf->cipher);
357 ret = -EOPNOTSUPP;
358 goto out;
359 }
360
361 switch (cmd) {
362 case SET_KEY:
363 if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
364 /*
365 * Supplicant is sending key in the wrong order:
366 * Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
367 * but HW expects it to be in the order as described in
368 * IEEE 802.11 spec (see chapter 11.7) like this:
369 * Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
370 */
371 memcpy(key, key_conf->key, 16);
372 memcpy(key + 16, key_conf->key + 24, 8);
373 memcpy(key + 24, key_conf->key + 16, 8);
374 } else {
375 memcpy(key, key_conf->key, key_conf->keylen);
376 }
377
378 if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
379 sta_priv->is_data_encrypted = true;
380 /* Reconfigure bss with encrypt_type */
381 if (NL80211_IFTYPE_STATION == vif->type)
382 wcn36xx_smd_config_bss(wcn,
383 vif,
384 sta,
385 sta->addr,
386 true);
387
388 wcn36xx_smd_set_stakey(wcn,
389 vif_priv->encrypt_type,
390 key_conf->keyidx,
391 key_conf->keylen,
392 key,
393 get_sta_index(vif, sta_priv));
394 } else {
395 wcn36xx_smd_set_bsskey(wcn,
396 vif_priv->encrypt_type,
397 key_conf->keyidx,
398 key_conf->keylen,
399 key);
400 if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
401 (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
402 sta_priv->is_data_encrypted = true;
403 wcn36xx_smd_set_stakey(wcn,
404 vif_priv->encrypt_type,
405 key_conf->keyidx,
406 key_conf->keylen,
407 key,
408 get_sta_index(vif, sta_priv));
409 }
410 }
411 break;
412 case DISABLE_KEY:
413 if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
414 wcn36xx_smd_remove_bsskey(wcn,
415 vif_priv->encrypt_type,
416 key_conf->keyidx);
417 } else {
418 sta_priv->is_data_encrypted = false;
419 /* do not remove key if disassociated */
420 if (sta_priv->aid)
421 wcn36xx_smd_remove_stakey(wcn,
422 vif_priv->encrypt_type,
423 key_conf->keyidx,
424 get_sta_index(vif, sta_priv));
425 }
426 break;
427 default:
428 wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
429 ret = -EOPNOTSUPP;
430 goto out;
431 break;
432 }
433
434out:
435 return ret;
436}
437
438static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
439{
440 struct wcn36xx *wcn = hw->priv;
441
442 wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
443 wcn36xx_smd_start_scan(wcn);
444}
445
446static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
447{
448 struct wcn36xx *wcn = hw->priv;
449
450 wcn36xx_smd_end_scan(wcn);
451 wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
452}
453
454static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
455 enum ieee80211_band band)
456{
457 int i, size;
458 u16 *rates_table;
459 struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
460 u32 rates = sta->supp_rates[band];
461
462 memset(&sta_priv->supported_rates, 0,
463 sizeof(sta_priv->supported_rates));
464 sta_priv->supported_rates.op_rate_mode = STA_11n;
465
466 size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
467 rates_table = sta_priv->supported_rates.dsss_rates;
468 if (band == IEEE80211_BAND_2GHZ) {
469 for (i = 0; i < size; i++) {
470 if (rates & 0x01) {
471 rates_table[i] = wcn_2ghz_rates[i].hw_value;
472 rates = rates >> 1;
473 }
474 }
475 }
476
477 size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
478 rates_table = sta_priv->supported_rates.ofdm_rates;
479 for (i = 0; i < size; i++) {
480 if (rates & 0x01) {
481 rates_table[i] = wcn_5ghz_rates[i].hw_value;
482 rates = rates >> 1;
483 }
484 }
485
486 if (sta->ht_cap.ht_supported) {
487 BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
488 sizeof(sta_priv->supported_rates.supported_mcs_set));
489 memcpy(sta_priv->supported_rates.supported_mcs_set,
490 sta->ht_cap.mcs.rx_mask,
491 sizeof(sta->ht_cap.mcs.rx_mask));
492 }
493}
494void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
495{
496 u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
497 HW_RATE_INDEX_6MBPS,
498 HW_RATE_INDEX_9MBPS,
499 HW_RATE_INDEX_12MBPS,
500 HW_RATE_INDEX_18MBPS,
501 HW_RATE_INDEX_24MBPS,
502 HW_RATE_INDEX_36MBPS,
503 HW_RATE_INDEX_48MBPS,
504 HW_RATE_INDEX_54MBPS
505 };
506 u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
507 HW_RATE_INDEX_1MBPS,
508 HW_RATE_INDEX_2MBPS,
509 HW_RATE_INDEX_5_5MBPS,
510 HW_RATE_INDEX_11MBPS
511 };
512
513 rates->op_rate_mode = STA_11n;
514 memcpy(rates->dsss_rates, dsss_rates,
515 sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
516 memcpy(rates->ofdm_rates, ofdm_rates,
517 sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
518 rates->supported_mcs_set[0] = 0xFF;
519}
520static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
521 struct ieee80211_vif *vif,
522 struct ieee80211_bss_conf *bss_conf,
523 u32 changed)
524{
525 struct wcn36xx *wcn = hw->priv;
526 struct sk_buff *skb = NULL;
527 u16 tim_off, tim_len;
528 enum wcn36xx_hal_link_state link_state;
529 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
530
531 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
532 vif, changed);
533
534 if (changed & BSS_CHANGED_BEACON_INFO) {
535 wcn36xx_dbg(WCN36XX_DBG_MAC,
536 "mac bss changed dtim period %d\n",
537 bss_conf->dtim_period);
538
539 vif_priv->dtim_period = bss_conf->dtim_period;
540 }
541
542 if (changed & BSS_CHANGED_PS) {
543 wcn36xx_dbg(WCN36XX_DBG_MAC,
544 "mac bss PS set %d\n",
545 bss_conf->ps);
546 if (bss_conf->ps) {
547 wcn36xx_pmc_enter_bmps_state(wcn, vif);
548 } else {
549 wcn36xx_pmc_exit_bmps_state(wcn, vif);
550 }
551 }
552
553 if (changed & BSS_CHANGED_BSSID) {
554 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
555 bss_conf->bssid);
556
557 if (!is_zero_ether_addr(bss_conf->bssid)) {
558 vif_priv->is_joining = true;
559 vif_priv->bss_index = 0xff;
560 wcn36xx_smd_join(wcn, bss_conf->bssid,
561 vif->addr, WCN36XX_HW_CHANNEL(wcn));
562 wcn36xx_smd_config_bss(wcn, vif, NULL,
563 bss_conf->bssid, false);
564 } else {
565 vif_priv->is_joining = false;
566 wcn36xx_smd_delete_bss(wcn, vif);
567 }
568 }
569
570 if (changed & BSS_CHANGED_SSID) {
571 wcn36xx_dbg(WCN36XX_DBG_MAC,
572 "mac bss changed ssid\n");
573 wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
574 bss_conf->ssid, bss_conf->ssid_len);
575
576 vif_priv->ssid.length = bss_conf->ssid_len;
577 memcpy(&vif_priv->ssid.ssid,
578 bss_conf->ssid,
579 bss_conf->ssid_len);
580 }
581
582 if (changed & BSS_CHANGED_ASSOC) {
583 vif_priv->is_joining = false;
584 if (bss_conf->assoc) {
585 struct ieee80211_sta *sta;
586 struct wcn36xx_sta *sta_priv;
587
588 wcn36xx_dbg(WCN36XX_DBG_MAC,
589 "mac assoc bss %pM vif %pM AID=%d\n",
590 bss_conf->bssid,
591 vif->addr,
592 bss_conf->aid);
593
594 rcu_read_lock();
595 sta = ieee80211_find_sta(vif, bss_conf->bssid);
596 if (!sta) {
597 wcn36xx_err("sta %pM is not found\n",
598 bss_conf->bssid);
599 rcu_read_unlock();
600 goto out;
601 }
602 sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
603
604 wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
605
606 wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
607 vif->addr,
608 WCN36XX_HAL_LINK_POSTASSOC_STATE);
609 wcn36xx_smd_config_bss(wcn, vif, sta,
610 bss_conf->bssid,
611 true);
612 sta_priv->aid = bss_conf->aid;
613 /*
614 * config_sta must be called from because this is the
615 * place where AID is available.
616 */
617 wcn36xx_smd_config_sta(wcn, vif, sta);
618 rcu_read_unlock();
619 } else {
620 wcn36xx_dbg(WCN36XX_DBG_MAC,
621 "disassociated bss %pM vif %pM AID=%d\n",
622 bss_conf->bssid,
623 vif->addr,
624 bss_conf->aid);
625 wcn36xx_smd_set_link_st(wcn,
626 bss_conf->bssid,
627 vif->addr,
628 WCN36XX_HAL_LINK_IDLE_STATE);
629 }
630 }
631
632 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
633 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
634 skb = ieee80211_proberesp_get(hw, vif);
635 if (!skb) {
636 wcn36xx_err("failed to alloc probereq skb\n");
637 goto out;
638 }
639
640 wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
641 dev_kfree_skb(skb);
642 }
643
644 if (changed & BSS_CHANGED_BEACON_ENABLED) {
645 wcn36xx_dbg(WCN36XX_DBG_MAC,
646 "mac bss changed beacon enabled %d\n",
647 bss_conf->enable_beacon);
648
649 if (bss_conf->enable_beacon) {
650 vif_priv->bss_index = 0xff;
651 wcn36xx_smd_config_bss(wcn, vif, NULL,
652 vif->addr, false);
653 skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
654 &tim_len);
655 if (!skb) {
656 wcn36xx_err("failed to alloc beacon skb\n");
657 goto out;
658 }
659 wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
660 dev_kfree_skb(skb);
661
662 if (vif->type == NL80211_IFTYPE_ADHOC ||
663 vif->type == NL80211_IFTYPE_MESH_POINT)
664 link_state = WCN36XX_HAL_LINK_IBSS_STATE;
665 else
666 link_state = WCN36XX_HAL_LINK_AP_STATE;
667
668 wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
669 link_state);
670 } else {
671 wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
672 WCN36XX_HAL_LINK_IDLE_STATE);
673 wcn36xx_smd_delete_bss(wcn, vif);
674 }
675 }
676out:
677 return;
678}
679
680/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
681static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
682{
683 struct wcn36xx *wcn = hw->priv;
684 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
685
686 wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
687 return 0;
688}
689
690static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
691 struct ieee80211_vif *vif)
692{
693 struct wcn36xx *wcn = hw->priv;
694 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
695 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
696
697 list_del(&vif_priv->list);
698 wcn36xx_smd_delete_sta_self(wcn, vif->addr);
699}
700
701static int wcn36xx_add_interface(struct ieee80211_hw *hw,
702 struct ieee80211_vif *vif)
703{
704 struct wcn36xx *wcn = hw->priv;
705 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
706
707 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
708 vif, vif->type);
709
710 if (!(NL80211_IFTYPE_STATION == vif->type ||
711 NL80211_IFTYPE_AP == vif->type ||
712 NL80211_IFTYPE_ADHOC == vif->type ||
713 NL80211_IFTYPE_MESH_POINT == vif->type)) {
714 wcn36xx_warn("Unsupported interface type requested: %d\n",
715 vif->type);
716 return -EOPNOTSUPP;
717 }
718
719 list_add(&vif_priv->list, &wcn->vif_list);
720 wcn36xx_smd_add_sta_self(wcn, vif);
721
722 return 0;
723}
724
725static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
726 struct ieee80211_sta *sta)
727{
728 struct wcn36xx *wcn = hw->priv;
729 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
730 struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
731 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
732 vif, sta->addr);
733
734 vif_priv->sta = sta_priv;
735 sta_priv->vif = vif_priv;
736 /*
737 * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
738 * at this stage AID is not available yet.
739 */
740 if (NL80211_IFTYPE_STATION != vif->type) {
741 wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
742 sta_priv->aid = sta->aid;
743 wcn36xx_smd_config_sta(wcn, vif, sta);
744 }
745 return 0;
746}
747
748static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
749 struct ieee80211_vif *vif,
750 struct ieee80211_sta *sta)
751{
752 struct wcn36xx *wcn = hw->priv;
753 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
754 struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
755
756 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
757 vif, sta->addr, sta_priv->sta_index);
758
759 wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
760 vif_priv->sta = NULL;
761 sta_priv->vif = NULL;
762 return 0;
763}
764
765#ifdef CONFIG_PM
766
767static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
768{
769 struct wcn36xx *wcn = hw->priv;
770
771 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
772
773 flush_workqueue(wcn->hal_ind_wq);
774 wcn36xx_smd_set_power_params(wcn, true);
775 return 0;
776}
777
778static int wcn36xx_resume(struct ieee80211_hw *hw)
779{
780 struct wcn36xx *wcn = hw->priv;
781
782 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
783
784 flush_workqueue(wcn->hal_ind_wq);
785 wcn36xx_smd_set_power_params(wcn, false);
786 return 0;
787}
788
789#endif
790
791static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
792 struct ieee80211_vif *vif,
793 enum ieee80211_ampdu_mlme_action action,
794 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
795 u8 buf_size)
796{
797 struct wcn36xx *wcn = hw->priv;
798 struct wcn36xx_sta *sta_priv = NULL;
799
800 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
801 action, tid);
802
803 sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
804
805 switch (action) {
806 case IEEE80211_AMPDU_RX_START:
807 sta_priv->tid = tid;
808 wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
809 get_sta_index(vif, sta_priv));
810 wcn36xx_smd_add_ba(wcn);
811 wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
812 ieee80211_start_tx_ba_session(sta, tid, 0);
813 break;
814 case IEEE80211_AMPDU_RX_STOP:
815 wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
816 break;
817 case IEEE80211_AMPDU_TX_START:
818 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
819 break;
820 case IEEE80211_AMPDU_TX_OPERATIONAL:
821 wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
822 get_sta_index(vif, sta_priv));
823 break;
824 case IEEE80211_AMPDU_TX_STOP_FLUSH:
825 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
826 case IEEE80211_AMPDU_TX_STOP_CONT:
827 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
828 break;
829 default:
830 wcn36xx_err("Unknown AMPDU action\n");
831 }
832
833 return 0;
834}
835
836static const struct ieee80211_ops wcn36xx_ops = {
837 .start = wcn36xx_start,
838 .stop = wcn36xx_stop,
839 .add_interface = wcn36xx_add_interface,
840 .remove_interface = wcn36xx_remove_interface,
841#ifdef CONFIG_PM
842 .suspend = wcn36xx_suspend,
843 .resume = wcn36xx_resume,
844#endif
845 .config = wcn36xx_config,
846 .configure_filter = wcn36xx_configure_filter,
847 .tx = wcn36xx_tx,
848 .set_key = wcn36xx_set_key,
849 .sw_scan_start = wcn36xx_sw_scan_start,
850 .sw_scan_complete = wcn36xx_sw_scan_complete,
851 .bss_info_changed = wcn36xx_bss_info_changed,
852 .set_rts_threshold = wcn36xx_set_rts_threshold,
853 .sta_add = wcn36xx_sta_add,
854 .sta_remove = wcn36xx_sta_remove,
855 .ampdu_action = wcn36xx_ampdu_action,
856};
857
858static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
859{
860 int ret = 0;
861
862 static const u32 cipher_suites[] = {
863 WLAN_CIPHER_SUITE_WEP40,
864 WLAN_CIPHER_SUITE_WEP104,
865 WLAN_CIPHER_SUITE_TKIP,
866 WLAN_CIPHER_SUITE_CCMP,
867 };
868
869 wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
870 IEEE80211_HW_HAS_RATE_CONTROL |
871 IEEE80211_HW_SUPPORTS_PS |
872 IEEE80211_HW_CONNECTION_MONITOR |
873 IEEE80211_HW_AMPDU_AGGREGATION |
874 IEEE80211_HW_TIMING_BEACON_ONLY;
875
876 wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
877 BIT(NL80211_IFTYPE_AP) |
878 BIT(NL80211_IFTYPE_ADHOC) |
879 BIT(NL80211_IFTYPE_MESH_POINT);
880
881 wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
882 wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
883
884 wcn->hw->wiphy->cipher_suites = cipher_suites;
885 wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
886
887 wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
888
889#ifdef CONFIG_PM
890 wcn->hw->wiphy->wowlan = &wowlan_support;
891#endif
892
893 wcn->hw->max_listen_interval = 200;
894
895 wcn->hw->queues = 4;
896
897 SET_IEEE80211_DEV(wcn->hw, wcn->dev);
898
899 wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
900 wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
901
902 return ret;
903}
904
905static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
906 struct platform_device *pdev)
907{
908 struct resource *res;
909 /* Set TX IRQ */
910 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
911 "wcnss_wlantx_irq");
912 if (!res) {
913 wcn36xx_err("failed to get tx_irq\n");
914 return -ENOENT;
915 }
916 wcn->tx_irq = res->start;
917
918 /* Set RX IRQ */
919 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
920 "wcnss_wlanrx_irq");
921 if (!res) {
922 wcn36xx_err("failed to get rx_irq\n");
923 return -ENOENT;
924 }
925 wcn->rx_irq = res->start;
926
927 /* Map the memory */
928 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
929 "wcnss_mmio");
930 if (!res) {
931 wcn36xx_err("failed to get mmio\n");
932 return -ENOENT;
933 }
934 wcn->mmio = ioremap(res->start, resource_size(res));
935 if (!wcn->mmio) {
936 wcn36xx_err("failed to map io memory\n");
937 return -ENOMEM;
938 }
939 return 0;
940}
941
942static int wcn36xx_probe(struct platform_device *pdev)
943{
944 struct ieee80211_hw *hw;
945 struct wcn36xx *wcn;
946 int ret;
947 u8 addr[ETH_ALEN];
948
949 wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
950
951 hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
952 if (!hw) {
953 wcn36xx_err("failed to alloc hw\n");
954 ret = -ENOMEM;
955 goto out_err;
956 }
957 platform_set_drvdata(pdev, hw);
958 wcn = hw->priv;
959 wcn->hw = hw;
960 wcn->dev = &pdev->dev;
961 wcn->ctrl_ops = pdev->dev.platform_data;
962
963 mutex_init(&wcn->hal_mutex);
964
965 if (!wcn->ctrl_ops->get_hw_mac(addr)) {
966 wcn36xx_info("mac address: %pM\n", addr);
967 SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
968 }
969
970 ret = wcn36xx_platform_get_resources(wcn, pdev);
971 if (ret)
972 goto out_wq;
973
974 wcn36xx_init_ieee80211(wcn);
975 ret = ieee80211_register_hw(wcn->hw);
976 if (ret)
977 goto out_unmap;
978
979 return 0;
980
981out_unmap:
982 iounmap(wcn->mmio);
983out_wq:
984 ieee80211_free_hw(hw);
985out_err:
986 return ret;
987}
988static int wcn36xx_remove(struct platform_device *pdev)
989{
990 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
991 struct wcn36xx *wcn = hw->priv;
992 wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
993
994 mutex_destroy(&wcn->hal_mutex);
995
996 ieee80211_unregister_hw(hw);
997 iounmap(wcn->mmio);
998 ieee80211_free_hw(hw);
999
1000 return 0;
1001}
1002static const struct platform_device_id wcn36xx_platform_id_table[] = {
1003 {
1004 .name = "wcn36xx",
1005 .driver_data = 0
1006 },
1007 {}
1008};
1009MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
1010
1011static struct platform_driver wcn36xx_driver = {
1012 .probe = wcn36xx_probe,
1013 .remove = wcn36xx_remove,
1014 .driver = {
1015 .name = "wcn36xx",
1016 .owner = THIS_MODULE,
1017 },
1018 .id_table = wcn36xx_platform_id_table,
1019};
1020
1021static int __init wcn36xx_init(void)
1022{
1023 platform_driver_register(&wcn36xx_driver);
1024 return 0;
1025}
1026module_init(wcn36xx_init);
1027
1028static void __exit wcn36xx_exit(void)
1029{
1030 platform_driver_unregister(&wcn36xx_driver);
1031}
1032module_exit(wcn36xx_exit);
1033
1034MODULE_LICENSE("Dual BSD/GPL");
1035MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
1036MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
new file mode 100644
index 000000000000..28b515c81b0e
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include "wcn36xx.h"
20
21int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
22 struct ieee80211_vif *vif)
23{
24 int ret = 0;
25 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
26 /* TODO: Make sure the TX chain clean */
27 ret = wcn36xx_smd_enter_bmps(wcn, vif);
28 if (!ret) {
29 wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
30 vif_priv->pw_state = WCN36XX_BMPS;
31 } else {
32 /*
33 * One of the reasons why HW will not enter BMPS is because
34 * driver is trying to enter bmps before first beacon was
35 * received just after auth complete
36 */
37 wcn36xx_err("Can not enter BMPS!\n");
38 }
39 return ret;
40}
41
42int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
43 struct ieee80211_vif *vif)
44{
45 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
46
47 if (WCN36XX_BMPS != vif_priv->pw_state) {
48 wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
49 return -EINVAL;
50 }
51 wcn36xx_smd_exit_bmps(wcn, vif);
52 vif_priv->pw_state = WCN36XX_FULL_POWER;
53 return 0;
54}
55
56int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
57 struct ieee80211_vif *vif)
58{
59 wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
60 return wcn36xx_smd_keep_alive_req(wcn, vif,
61 WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
62}
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.h b/drivers/net/wireless/ath/wcn36xx/pmc.h
new file mode 100644
index 000000000000..f72ed68b5a07
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _WCN36XX_PMC_H_
18#define _WCN36XX_PMC_H_
19
20struct wcn36xx;
21
22enum wcn36xx_power_state {
23 WCN36XX_FULL_POWER,
24 WCN36XX_BMPS
25};
26
27int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
28 struct ieee80211_vif *vif);
29int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
30 struct ieee80211_vif *vif);
31int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
32 struct ieee80211_vif *vif);
33#endif /* _WCN36XX_PMC_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
new file mode 100644
index 000000000000..f8c3a10510c2
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -0,0 +1,2126 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/etherdevice.h>
20#include <linux/firmware.h>
21#include <linux/bitops.h>
22#include "smd.h"
23
24static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
25{
26 struct wcn36xx_hal_cfg *entry;
27 u32 *val;
28
29 if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
30 wcn36xx_err("Not enough room for TLV entry\n");
31 return -ENOMEM;
32 }
33
34 entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
35 entry->id = id;
36 entry->len = sizeof(u32);
37 entry->pad_bytes = 0;
38 entry->reserve = 0;
39
40 val = (u32 *) (entry + 1);
41 *val = value;
42
43 *len += sizeof(*entry) + sizeof(u32);
44
45 return 0;
46}
47
48static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
49 struct ieee80211_sta *sta,
50 struct wcn36xx_hal_config_bss_params *bss_params)
51{
52 if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
53 bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
54 else if (sta && sta->ht_cap.ht_supported)
55 bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
56 else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
57 bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
58 else
59 bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
60}
61
62static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
63{
64 return caps & flag ? 1 : 0;
65}
66static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
67 struct ieee80211_sta *sta,
68 struct wcn36xx_hal_config_bss_params *bss_params)
69{
70 if (sta && sta->ht_cap.ht_supported) {
71 unsigned long caps = sta->ht_cap.cap;
72 bss_params->ht = sta->ht_cap.ht_supported;
73 bss_params->tx_channel_width_set = is_cap_supported(caps,
74 IEEE80211_HT_CAP_SUP_WIDTH_20_40);
75 bss_params->lsig_tx_op_protection_full_support =
76 is_cap_supported(caps,
77 IEEE80211_HT_CAP_LSIG_TXOP_PROT);
78
79 bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
80 bss_params->lln_non_gf_coexist =
81 !!(vif->bss_conf.ht_operation_mode &
82 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
83 /* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
84 bss_params->dual_cts_protection = 0;
85 /* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
86 bss_params->ht20_coexist = 0;
87 }
88}
89
90static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
91 struct wcn36xx_hal_config_sta_params *sta_params)
92{
93 if (sta->ht_cap.ht_supported) {
94 unsigned long caps = sta->ht_cap.cap;
95 sta_params->ht_capable = sta->ht_cap.ht_supported;
96 sta_params->tx_channel_width_set = is_cap_supported(caps,
97 IEEE80211_HT_CAP_SUP_WIDTH_20_40);
98 sta_params->lsig_txop_protection = is_cap_supported(caps,
99 IEEE80211_HT_CAP_LSIG_TXOP_PROT);
100
101 sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
102 sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
103 sta_params->max_amsdu_size = is_cap_supported(caps,
104 IEEE80211_HT_CAP_MAX_AMSDU);
105 sta_params->sgi_20Mhz = is_cap_supported(caps,
106 IEEE80211_HT_CAP_SGI_20);
107 sta_params->sgi_40mhz = is_cap_supported(caps,
108 IEEE80211_HT_CAP_SGI_40);
109 sta_params->green_field_capable = is_cap_supported(caps,
110 IEEE80211_HT_CAP_GRN_FLD);
111 sta_params->delayed_ba_support = is_cap_supported(caps,
112 IEEE80211_HT_CAP_DELAY_BA);
113 sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
114 IEEE80211_HT_CAP_DSSSCCK40);
115 }
116}
117
118static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
119 struct ieee80211_vif *vif,
120 struct ieee80211_sta *sta,
121 struct wcn36xx_hal_config_sta_params *sta_params)
122{
123 struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
124 struct wcn36xx_sta *priv_sta = NULL;
125 if (vif->type == NL80211_IFTYPE_ADHOC ||
126 vif->type == NL80211_IFTYPE_AP ||
127 vif->type == NL80211_IFTYPE_MESH_POINT) {
128 sta_params->type = 1;
129 sta_params->sta_index = 0xFF;
130 } else {
131 sta_params->type = 0;
132 sta_params->sta_index = 1;
133 }
134
135 sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
136
137 /*
138 * In STA mode ieee80211_sta contains bssid and ieee80211_vif
139 * contains our mac address. In AP mode we are bssid so vif
140 * contains bssid and ieee80211_sta contains mac.
141 */
142 if (NL80211_IFTYPE_STATION == vif->type)
143 memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
144 else
145 memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
146
147 sta_params->encrypt_type = priv_vif->encrypt_type;
148 sta_params->short_preamble_supported =
149 !(WCN36XX_FLAGS(wcn) &
150 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
151
152 sta_params->rifs_mode = 0;
153 sta_params->rmf = 0;
154 sta_params->action = 0;
155 sta_params->uapsd = 0;
156 sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
157 sta_params->max_ampdu_duration = 0;
158 sta_params->bssid_index = priv_vif->bss_index;
159 sta_params->p2p = 0;
160
161 if (sta) {
162 priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
163 if (NL80211_IFTYPE_STATION == vif->type)
164 memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
165 else
166 memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
167 sta_params->wmm_enabled = sta->wme;
168 sta_params->max_sp_len = sta->max_sp;
169 sta_params->aid = priv_sta->aid;
170 wcn36xx_smd_set_sta_ht_params(sta, sta_params);
171 memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
172 sizeof(priv_sta->supported_rates));
173 } else {
174 wcn36xx_set_default_rates(&sta_params->supported_rates);
175 }
176}
177
178static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
179{
180 int ret = 0;
181 wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
182
183 init_completion(&wcn->hal_rsp_compl);
184 ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
185 if (ret) {
186 wcn36xx_err("HAL TX failed\n");
187 goto out;
188 }
189 if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
190 msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
191 wcn36xx_err("Timeout while waiting SMD response\n");
192 ret = -ETIME;
193 goto out;
194 }
195out:
196 return ret;
197}
198
199#define INIT_HAL_MSG(msg_body, type) \
200 do { \
201 memset(&msg_body, 0, sizeof(msg_body)); \
202 msg_body.header.msg_type = type; \
203 msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
204 msg_body.header.len = sizeof(msg_body); \
205 } while (0) \
206
207#define PREPARE_HAL_BUF(send_buf, msg_body) \
208 do { \
209 memset(send_buf, 0, msg_body.header.len); \
210 memcpy(send_buf, &msg_body, sizeof(msg_body)); \
211 } while (0) \
212
213static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
214{
215 struct wcn36xx_fw_msg_status_rsp *rsp;
216
217 if (len < sizeof(struct wcn36xx_hal_msg_header) +
218 sizeof(struct wcn36xx_fw_msg_status_rsp))
219 return -EIO;
220
221 rsp = (struct wcn36xx_fw_msg_status_rsp *)
222 (buf + sizeof(struct wcn36xx_hal_msg_header));
223
224 if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
225 return rsp->status;
226
227 return 0;
228}
229
230int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
231{
232 const struct firmware *nv;
233 struct nv_data *nv_d;
234 struct wcn36xx_hal_nv_img_download_req_msg msg_body;
235 int fw_bytes_left;
236 int ret;
237 u16 fm_offset = 0;
238
239 ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
240 if (ret) {
241 wcn36xx_err("Failed to load nv file %s: %d\n",
242 WLAN_NV_FILE, ret);
243 goto out_free_nv;
244 }
245
246 nv_d = (struct nv_data *)nv->data;
247 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
248
249 msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
250
251 msg_body.frag_number = 0;
252 /* hal_buf must be protected with mutex */
253 mutex_lock(&wcn->hal_mutex);
254
255 do {
256 fw_bytes_left = nv->size - fm_offset - 4;
257 if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
258 msg_body.last_fragment = 0;
259 msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
260 } else {
261 msg_body.last_fragment = 1;
262 msg_body.nv_img_buffer_size = fw_bytes_left;
263
264 /* Do not forget update general message len */
265 msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
266
267 }
268
269 /* Add load NV request message header */
270 memcpy(wcn->hal_buf, &msg_body, sizeof(msg_body));
271
272 /* Add NV body itself */
273 memcpy(wcn->hal_buf + sizeof(msg_body),
274 &nv_d->table + fm_offset,
275 msg_body.nv_img_buffer_size);
276
277 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
278 if (ret)
279 goto out_unlock;
280 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
281 wcn->hal_rsp_len);
282 if (ret) {
283 wcn36xx_err("hal_load_nv response failed err=%d\n",
284 ret);
285 goto out_unlock;
286 }
287 msg_body.frag_number++;
288 fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
289
290 } while (msg_body.last_fragment != 1);
291
292out_unlock:
293 mutex_unlock(&wcn->hal_mutex);
294out_free_nv:
295 release_firmware(nv);
296
297 return ret;
298}
299
300static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
301{
302 struct wcn36xx_hal_mac_start_rsp_msg *rsp;
303
304 if (len < sizeof(*rsp))
305 return -EIO;
306
307 rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
308
309 if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
310 return -EIO;
311
312 memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
313 WCN36XX_HAL_VERSION_LENGTH);
314 memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
315 WCN36XX_HAL_VERSION_LENGTH);
316
317 /* null terminate the strings, just in case */
318 wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
319 wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
320
321 wcn->fw_revision = rsp->start_rsp_params.version.revision;
322 wcn->fw_version = rsp->start_rsp_params.version.version;
323 wcn->fw_minor = rsp->start_rsp_params.version.minor;
324 wcn->fw_major = rsp->start_rsp_params.version.major;
325
326 wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
327 wcn->wlan_version, wcn->crm_version);
328
329 wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
330 wcn->fw_major, wcn->fw_minor,
331 wcn->fw_version, wcn->fw_revision,
332 rsp->start_rsp_params.stations,
333 rsp->start_rsp_params.bssids);
334
335 return 0;
336}
337
338int wcn36xx_smd_start(struct wcn36xx *wcn)
339{
340 struct wcn36xx_hal_mac_start_req_msg msg_body;
341 int ret = 0;
342
343 mutex_lock(&wcn->hal_mutex);
344 INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
345
346 msg_body.params.type = DRIVER_TYPE_PRODUCTION;
347 msg_body.params.len = 0;
348
349 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
350
351 wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
352 msg_body.params.type);
353
354 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
355 if (ret) {
356 wcn36xx_err("Sending hal_start failed\n");
357 goto out;
358 }
359
360 ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
361 if (ret) {
362 wcn36xx_err("hal_start response failed err=%d\n", ret);
363 goto out;
364 }
365
366out:
367 mutex_unlock(&wcn->hal_mutex);
368 return ret;
369}
370
371int wcn36xx_smd_stop(struct wcn36xx *wcn)
372{
373 struct wcn36xx_hal_mac_stop_req_msg msg_body;
374 int ret = 0;
375
376 mutex_lock(&wcn->hal_mutex);
377 INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
378
379 msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
380
381 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
382
383 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
384 if (ret) {
385 wcn36xx_err("Sending hal_stop failed\n");
386 goto out;
387 }
388 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
389 if (ret) {
390 wcn36xx_err("hal_stop response failed err=%d\n", ret);
391 goto out;
392 }
393out:
394 mutex_unlock(&wcn->hal_mutex);
395 return ret;
396}
397
398int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
399{
400 struct wcn36xx_hal_init_scan_req_msg msg_body;
401 int ret = 0;
402
403 mutex_lock(&wcn->hal_mutex);
404 INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
405
406 msg_body.mode = mode;
407
408 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
409
410 wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
411
412 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
413 if (ret) {
414 wcn36xx_err("Sending hal_init_scan failed\n");
415 goto out;
416 }
417 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
418 if (ret) {
419 wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
420 goto out;
421 }
422out:
423 mutex_unlock(&wcn->hal_mutex);
424 return ret;
425}
426
427int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
428{
429 struct wcn36xx_hal_start_scan_req_msg msg_body;
430 int ret = 0;
431
432 mutex_lock(&wcn->hal_mutex);
433 INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
434
435 msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
436
437 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
438
439 wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
440 msg_body.scan_channel);
441
442 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
443 if (ret) {
444 wcn36xx_err("Sending hal_start_scan failed\n");
445 goto out;
446 }
447 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
448 if (ret) {
449 wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
450 goto out;
451 }
452out:
453 mutex_unlock(&wcn->hal_mutex);
454 return ret;
455}
456
457int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
458{
459 struct wcn36xx_hal_end_scan_req_msg msg_body;
460 int ret = 0;
461
462 mutex_lock(&wcn->hal_mutex);
463 INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
464
465 msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
466
467 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
468
469 wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
470 msg_body.scan_channel);
471
472 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
473 if (ret) {
474 wcn36xx_err("Sending hal_end_scan failed\n");
475 goto out;
476 }
477 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
478 if (ret) {
479 wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
480 goto out;
481 }
482out:
483 mutex_unlock(&wcn->hal_mutex);
484 return ret;
485}
486
487int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
488 enum wcn36xx_hal_sys_mode mode)
489{
490 struct wcn36xx_hal_finish_scan_req_msg msg_body;
491 int ret = 0;
492
493 mutex_lock(&wcn->hal_mutex);
494 INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
495
496 msg_body.mode = mode;
497
498 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
499
500 wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
501 msg_body.mode);
502
503 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
504 if (ret) {
505 wcn36xx_err("Sending hal_finish_scan failed\n");
506 goto out;
507 }
508 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
509 if (ret) {
510 wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
511 goto out;
512 }
513out:
514 mutex_unlock(&wcn->hal_mutex);
515 return ret;
516}
517
518static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
519{
520 struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
521 int ret = 0;
522
523 ret = wcn36xx_smd_rsp_status_check(buf, len);
524 if (ret)
525 return ret;
526 rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
527 wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
528 rsp->channel_number, rsp->status);
529 return ret;
530}
531
532int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
533 struct ieee80211_vif *vif, int ch)
534{
535 struct wcn36xx_hal_switch_channel_req_msg msg_body;
536 int ret = 0;
537
538 mutex_lock(&wcn->hal_mutex);
539 INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
540
541 msg_body.channel_number = (u8)ch;
542 msg_body.tx_mgmt_power = 0xbf;
543 msg_body.max_tx_power = 0xbf;
544 memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
545
546 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
547
548 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
549 if (ret) {
550 wcn36xx_err("Sending hal_switch_channel failed\n");
551 goto out;
552 }
553 ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
554 if (ret) {
555 wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
556 goto out;
557 }
558out:
559 mutex_unlock(&wcn->hal_mutex);
560 return ret;
561}
562
563static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
564{
565 struct wcn36xx_hal_update_scan_params_resp *rsp;
566
567 rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
568
569 /* Remove the PNO version bit */
570 rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
571
572 if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
573 wcn36xx_warn("error response from update scan\n");
574 return rsp->status;
575 }
576
577 return 0;
578}
579
580int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
581{
582 struct wcn36xx_hal_update_scan_params_req msg_body;
583 int ret = 0;
584
585 mutex_lock(&wcn->hal_mutex);
586 INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
587
588 msg_body.dot11d_enabled = 0;
589 msg_body.dot11d_resolved = 0;
590 msg_body.channel_count = 26;
591 msg_body.active_min_ch_time = 60;
592 msg_body.active_max_ch_time = 120;
593 msg_body.passive_min_ch_time = 60;
594 msg_body.passive_max_ch_time = 110;
595 msg_body.state = 0;
596
597 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
598
599 wcn36xx_dbg(WCN36XX_DBG_HAL,
600 "hal update scan params channel_count %d\n",
601 msg_body.channel_count);
602
603 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
604 if (ret) {
605 wcn36xx_err("Sending hal_update_scan_params failed\n");
606 goto out;
607 }
608 ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
609 wcn->hal_rsp_len);
610 if (ret) {
611 wcn36xx_err("hal_update_scan_params response failed err=%d\n",
612 ret);
613 goto out;
614 }
615out:
616 mutex_unlock(&wcn->hal_mutex);
617 return ret;
618}
619
620static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
621 struct ieee80211_vif *vif,
622 void *buf,
623 size_t len)
624{
625 struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
626 struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
627
628 if (len < sizeof(*rsp))
629 return -EINVAL;
630
631 rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
632
633 if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
634 wcn36xx_warn("hal add sta self failure: %d\n",
635 rsp->status);
636 return rsp->status;
637 }
638
639 wcn36xx_dbg(WCN36XX_DBG_HAL,
640 "hal add sta self status %d self_sta_index %d dpu_index %d\n",
641 rsp->status, rsp->self_sta_index, rsp->dpu_index);
642
643 priv_vif->self_sta_index = rsp->self_sta_index;
644 priv_vif->self_dpu_desc_index = rsp->dpu_index;
645
646 return 0;
647}
648
649int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
650{
651 struct wcn36xx_hal_add_sta_self_req msg_body;
652 int ret = 0;
653
654 mutex_lock(&wcn->hal_mutex);
655 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
656
657 memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
658
659 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
660
661 wcn36xx_dbg(WCN36XX_DBG_HAL,
662 "hal add sta self self_addr %pM status %d\n",
663 msg_body.self_addr, msg_body.status);
664
665 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
666 if (ret) {
667 wcn36xx_err("Sending hal_add_sta_self failed\n");
668 goto out;
669 }
670 ret = wcn36xx_smd_add_sta_self_rsp(wcn,
671 vif,
672 wcn->hal_buf,
673 wcn->hal_rsp_len);
674 if (ret) {
675 wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
676 goto out;
677 }
678out:
679 mutex_unlock(&wcn->hal_mutex);
680 return ret;
681}
682
683int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
684{
685 struct wcn36xx_hal_del_sta_self_req_msg msg_body;
686 int ret = 0;
687
688 mutex_lock(&wcn->hal_mutex);
689 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
690
691 memcpy(&msg_body.self_addr, addr, ETH_ALEN);
692
693 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
694
695 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
696 if (ret) {
697 wcn36xx_err("Sending hal_delete_sta_self failed\n");
698 goto out;
699 }
700 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
701 if (ret) {
702 wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
703 ret);
704 goto out;
705 }
706out:
707 mutex_unlock(&wcn->hal_mutex);
708 return ret;
709}
710
711int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
712{
713 struct wcn36xx_hal_delete_sta_req_msg msg_body;
714 int ret = 0;
715
716 mutex_lock(&wcn->hal_mutex);
717 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
718
719 msg_body.sta_index = sta_index;
720
721 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
722
723 wcn36xx_dbg(WCN36XX_DBG_HAL,
724 "hal delete sta sta_index %d\n",
725 msg_body.sta_index);
726
727 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
728 if (ret) {
729 wcn36xx_err("Sending hal_delete_sta failed\n");
730 goto out;
731 }
732 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
733 if (ret) {
734 wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
735 goto out;
736 }
737out:
738 mutex_unlock(&wcn->hal_mutex);
739 return ret;
740}
741
742static int wcn36xx_smd_join_rsp(void *buf, size_t len)
743{
744 struct wcn36xx_hal_join_rsp_msg *rsp;
745
746 if (wcn36xx_smd_rsp_status_check(buf, len))
747 return -EIO;
748
749 rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
750
751 wcn36xx_dbg(WCN36XX_DBG_HAL,
752 "hal rsp join status %d tx_mgmt_power %d\n",
753 rsp->status, rsp->tx_mgmt_power);
754
755 return 0;
756}
757
758int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
759{
760 struct wcn36xx_hal_join_req_msg msg_body;
761 int ret = 0;
762
763 mutex_lock(&wcn->hal_mutex);
764 INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
765
766 memcpy(&msg_body.bssid, bssid, ETH_ALEN);
767 memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
768 msg_body.channel = ch;
769
770 if (conf_is_ht40_minus(&wcn->hw->conf))
771 msg_body.secondary_channel_offset =
772 PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
773 else if (conf_is_ht40_plus(&wcn->hw->conf))
774 msg_body.secondary_channel_offset =
775 PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
776 else
777 msg_body.secondary_channel_offset =
778 PHY_SINGLE_CHANNEL_CENTERED;
779
780 msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
781
782 msg_body.max_tx_power = 0xbf;
783 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
784
785 wcn36xx_dbg(WCN36XX_DBG_HAL,
786 "hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
787 msg_body.bssid, msg_body.self_sta_mac_addr,
788 msg_body.channel, msg_body.link_state);
789
790 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
791 if (ret) {
792 wcn36xx_err("Sending hal_join failed\n");
793 goto out;
794 }
795 ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
796 if (ret) {
797 wcn36xx_err("hal_join response failed err=%d\n", ret);
798 goto out;
799 }
800out:
801 mutex_unlock(&wcn->hal_mutex);
802 return ret;
803}
804
805int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
806 const u8 *sta_mac,
807 enum wcn36xx_hal_link_state state)
808{
809 struct wcn36xx_hal_set_link_state_req_msg msg_body;
810 int ret = 0;
811
812 mutex_lock(&wcn->hal_mutex);
813 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
814
815 memcpy(&msg_body.bssid, bssid, ETH_ALEN);
816 memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
817 msg_body.state = state;
818
819 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
820
821 wcn36xx_dbg(WCN36XX_DBG_HAL,
822 "hal set link state bssid %pM self_mac_addr %pM state %d\n",
823 msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
824
825 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
826 if (ret) {
827 wcn36xx_err("Sending hal_set_link_st failed\n");
828 goto out;
829 }
830 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
831 if (ret) {
832 wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
833 goto out;
834 }
835out:
836 mutex_unlock(&wcn->hal_mutex);
837 return ret;
838}
839
840static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
841 const struct wcn36xx_hal_config_sta_params *orig,
842 struct wcn36xx_hal_config_sta_params_v1 *v1)
843{
844 /* convert orig to v1 format */
845 memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
846 memcpy(&v1->mac, orig->mac, ETH_ALEN);
847 v1->aid = orig->aid;
848 v1->type = orig->type;
849 v1->listen_interval = orig->listen_interval;
850 v1->ht_capable = orig->ht_capable;
851
852 v1->max_ampdu_size = orig->max_ampdu_size;
853 v1->max_ampdu_density = orig->max_ampdu_density;
854 v1->sgi_40mhz = orig->sgi_40mhz;
855 v1->sgi_20Mhz = orig->sgi_20Mhz;
856
857 memcpy(&v1->supported_rates, &orig->supported_rates,
858 sizeof(orig->supported_rates));
859 v1->sta_index = orig->sta_index;
860}
861
862static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
863 struct ieee80211_sta *sta,
864 void *buf,
865 size_t len)
866{
867 struct wcn36xx_hal_config_sta_rsp_msg *rsp;
868 struct config_sta_rsp_params *params;
869 struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
870
871 if (len < sizeof(*rsp))
872 return -EINVAL;
873
874 rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
875 params = &rsp->params;
876
877 if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
878 wcn36xx_warn("hal config sta response failure: %d\n",
879 params->status);
880 return -EIO;
881 }
882
883 sta_priv->sta_index = params->sta_index;
884 sta_priv->dpu_desc_index = params->dpu_index;
885
886 wcn36xx_dbg(WCN36XX_DBG_HAL,
887 "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
888 params->status, params->sta_index, params->bssid_index,
889 params->p2p);
890
891 return 0;
892}
893
894static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
895 const struct wcn36xx_hal_config_sta_req_msg *orig)
896{
897 struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
898 struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
899
900 INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
901
902 wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
903 &msg_body.sta_params);
904
905 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
906
907 wcn36xx_dbg(WCN36XX_DBG_HAL,
908 "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
909 sta->action, sta->sta_index, sta->bssid_index,
910 sta->bssid, sta->type, sta->mac, sta->aid);
911
912 return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
913}
914
915int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
916 struct ieee80211_sta *sta)
917{
918 struct wcn36xx_hal_config_sta_req_msg msg;
919 struct wcn36xx_hal_config_sta_params *sta_params;
920 int ret = 0;
921
922 mutex_lock(&wcn->hal_mutex);
923 INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
924
925 sta_params = &msg.sta_params;
926
927 wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
928
929 if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
930 ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
931 } else {
932 PREPARE_HAL_BUF(wcn->hal_buf, msg);
933
934 wcn36xx_dbg(WCN36XX_DBG_HAL,
935 "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
936 sta_params->action, sta_params->sta_index,
937 sta_params->bssid_index, sta_params->bssid,
938 sta_params->type, sta_params->mac, sta_params->aid);
939
940 ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
941 }
942 if (ret) {
943 wcn36xx_err("Sending hal_config_sta failed\n");
944 goto out;
945 }
946 ret = wcn36xx_smd_config_sta_rsp(wcn,
947 sta,
948 wcn->hal_buf,
949 wcn->hal_rsp_len);
950 if (ret) {
951 wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
952 goto out;
953 }
954out:
955 mutex_unlock(&wcn->hal_mutex);
956 return ret;
957}
958
959static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
960 const struct wcn36xx_hal_config_bss_req_msg *orig)
961{
962 struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
963 struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
964 struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
965
966 INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
967
968 /* convert orig to v1 */
969 memcpy(&msg_body.bss_params.bssid,
970 &orig->bss_params.bssid, ETH_ALEN);
971 memcpy(&msg_body.bss_params.self_mac_addr,
972 &orig->bss_params.self_mac_addr, ETH_ALEN);
973
974 msg_body.bss_params.bss_type = orig->bss_params.bss_type;
975 msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
976 msg_body.bss_params.nw_type = orig->bss_params.nw_type;
977
978 msg_body.bss_params.short_slot_time_supported =
979 orig->bss_params.short_slot_time_supported;
980 msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
981 msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
982 msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
983 msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
984 msg_body.bss_params.lln_non_gf_coexist =
985 orig->bss_params.lln_non_gf_coexist;
986
987 msg_body.bss_params.lsig_tx_op_protection_full_support =
988 orig->bss_params.lsig_tx_op_protection_full_support;
989 msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
990 msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
991 msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
992 msg_body.bss_params.tx_channel_width_set =
993 orig->bss_params.tx_channel_width_set;
994 msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
995 msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
996
997 msg_body.bss_params.reserved = orig->bss_params.reserved;
998
999 memcpy(&msg_body.bss_params.ssid,
1000 &orig->bss_params.ssid,
1001 sizeof(orig->bss_params.ssid));
1002
1003 msg_body.bss_params.action = orig->bss_params.action;
1004 msg_body.bss_params.rateset = orig->bss_params.rateset;
1005 msg_body.bss_params.ht = orig->bss_params.ht;
1006 msg_body.bss_params.obss_prot_enabled =
1007 orig->bss_params.obss_prot_enabled;
1008 msg_body.bss_params.rmf = orig->bss_params.rmf;
1009 msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
1010 msg_body.bss_params.dual_cts_protection =
1011 orig->bss_params.dual_cts_protection;
1012
1013 msg_body.bss_params.max_probe_resp_retry_limit =
1014 orig->bss_params.max_probe_resp_retry_limit;
1015 msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
1016 msg_body.bss_params.proxy_probe_resp =
1017 orig->bss_params.proxy_probe_resp;
1018 msg_body.bss_params.edca_params_valid =
1019 orig->bss_params.edca_params_valid;
1020
1021 memcpy(&msg_body.bss_params.acbe,
1022 &orig->bss_params.acbe,
1023 sizeof(orig->bss_params.acbe));
1024 memcpy(&msg_body.bss_params.acbk,
1025 &orig->bss_params.acbk,
1026 sizeof(orig->bss_params.acbk));
1027 memcpy(&msg_body.bss_params.acvi,
1028 &orig->bss_params.acvi,
1029 sizeof(orig->bss_params.acvi));
1030 memcpy(&msg_body.bss_params.acvo,
1031 &orig->bss_params.acvo,
1032 sizeof(orig->bss_params.acvo));
1033
1034 msg_body.bss_params.ext_set_sta_key_param_valid =
1035 orig->bss_params.ext_set_sta_key_param_valid;
1036
1037 memcpy(&msg_body.bss_params.ext_set_sta_key_param,
1038 &orig->bss_params.ext_set_sta_key_param,
1039 sizeof(orig->bss_params.acvo));
1040
1041 msg_body.bss_params.wcn36xx_hal_persona =
1042 orig->bss_params.wcn36xx_hal_persona;
1043 msg_body.bss_params.spectrum_mgt_enable =
1044 orig->bss_params.spectrum_mgt_enable;
1045 msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
1046 msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
1047
1048 wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
1049 &msg_body.bss_params.sta);
1050
1051 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1052
1053 wcn36xx_dbg(WCN36XX_DBG_HAL,
1054 "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
1055 bss->bssid, bss->self_mac_addr, bss->bss_type,
1056 bss->oper_mode, bss->nw_type);
1057
1058 wcn36xx_dbg(WCN36XX_DBG_HAL,
1059 "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
1060 sta->bssid, sta->action, sta->sta_index,
1061 sta->bssid_index, sta->aid, sta->type, sta->mac);
1062
1063 return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1064}
1065
1066
1067static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
1068 struct ieee80211_vif *vif,
1069 void *buf,
1070 size_t len)
1071{
1072 struct wcn36xx_hal_config_bss_rsp_msg *rsp;
1073 struct wcn36xx_hal_config_bss_rsp_params *params;
1074 struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
1075
1076 if (len < sizeof(*rsp))
1077 return -EINVAL;
1078
1079 rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
1080 params = &rsp->bss_rsp_params;
1081
1082 if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
1083 wcn36xx_warn("hal config bss response failure: %d\n",
1084 params->status);
1085 return -EIO;
1086 }
1087
1088 wcn36xx_dbg(WCN36XX_DBG_HAL,
1089 "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
1090 " sta_idx %d self_idx %d bcast_idx %d mac %pM"
1091 " power %d ucast_dpu_signature %d\n",
1092 params->status, params->bss_index, params->dpu_desc_index,
1093 params->bss_sta_index, params->bss_self_sta_index,
1094 params->bss_bcast_sta_idx, params->mac,
1095 params->tx_mgmt_power, params->ucast_dpu_signature);
1096
1097 priv_vif->bss_index = params->bss_index;
1098
1099 if (priv_vif->sta) {
1100 priv_vif->sta->bss_sta_index = params->bss_sta_index;
1101 priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
1102 }
1103
1104 priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
1105
1106 return 0;
1107}
1108
1109int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
1110 struct ieee80211_sta *sta, const u8 *bssid,
1111 bool update)
1112{
1113 struct wcn36xx_hal_config_bss_req_msg msg;
1114 struct wcn36xx_hal_config_bss_params *bss;
1115 struct wcn36xx_hal_config_sta_params *sta_params;
1116 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
1117 int ret = 0;
1118
1119 mutex_lock(&wcn->hal_mutex);
1120 INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
1121
1122 bss = &msg.bss_params;
1123 sta_params = &bss->sta;
1124
1125 WARN_ON(is_zero_ether_addr(bssid));
1126
1127 memcpy(&bss->bssid, bssid, ETH_ALEN);
1128
1129 memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
1130
1131 if (vif->type == NL80211_IFTYPE_STATION) {
1132 bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
1133
1134 /* STA */
1135 bss->oper_mode = 1;
1136 bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
1137 } else if (vif->type == NL80211_IFTYPE_AP) {
1138 bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
1139
1140 /* AP */
1141 bss->oper_mode = 0;
1142 bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
1143 } else if (vif->type == NL80211_IFTYPE_ADHOC ||
1144 vif->type == NL80211_IFTYPE_MESH_POINT) {
1145 bss->bss_type = WCN36XX_HAL_IBSS_MODE;
1146
1147 /* STA */
1148 bss->oper_mode = 1;
1149 } else {
1150 wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
1151 }
1152
1153 if (vif->type == NL80211_IFTYPE_STATION)
1154 wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
1155 else
1156 bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
1157
1158 bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
1159 bss->lla_coexist = 0;
1160 bss->llb_coexist = 0;
1161 bss->llg_coexist = 0;
1162 bss->rifs_mode = 0;
1163 bss->beacon_interval = vif->bss_conf.beacon_int;
1164 bss->dtim_period = vif_priv->dtim_period;
1165
1166 wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
1167
1168 bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
1169
1170 if (conf_is_ht40_minus(&wcn->hw->conf))
1171 bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1172 else if (conf_is_ht40_plus(&wcn->hw->conf))
1173 bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1174 else
1175 bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
1176
1177 bss->reserved = 0;
1178 wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
1179
1180 /* wcn->ssid is only valid in AP and IBSS mode */
1181 bss->ssid.length = vif_priv->ssid.length;
1182 memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
1183
1184 bss->obss_prot_enabled = 0;
1185 bss->rmf = 0;
1186 bss->max_probe_resp_retry_limit = 0;
1187 bss->hidden_ssid = vif->bss_conf.hidden_ssid;
1188 bss->proxy_probe_resp = 0;
1189 bss->edca_params_valid = 0;
1190
1191 /* FIXME: set acbe, acbk, acvi and acvo */
1192
1193 bss->ext_set_sta_key_param_valid = 0;
1194
1195 /* FIXME: set ext_set_sta_key_param */
1196
1197 bss->spectrum_mgt_enable = 0;
1198 bss->tx_mgmt_power = 0;
1199 bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
1200
1201 bss->action = update;
1202
1203 wcn36xx_dbg(WCN36XX_DBG_HAL,
1204 "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
1205 bss->bssid, bss->self_mac_addr, bss->bss_type,
1206 bss->oper_mode, bss->nw_type);
1207
1208 wcn36xx_dbg(WCN36XX_DBG_HAL,
1209 "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
1210 sta_params->bssid, sta_params->action,
1211 sta_params->sta_index, sta_params->bssid_index,
1212 sta_params->aid, sta_params->type,
1213 sta_params->mac);
1214
1215 if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
1216 ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
1217 } else {
1218 PREPARE_HAL_BUF(wcn->hal_buf, msg);
1219
1220 ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
1221 }
1222 if (ret) {
1223 wcn36xx_err("Sending hal_config_bss failed\n");
1224 goto out;
1225 }
1226 ret = wcn36xx_smd_config_bss_rsp(wcn,
1227 vif,
1228 wcn->hal_buf,
1229 wcn->hal_rsp_len);
1230 if (ret) {
1231 wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
1232 goto out;
1233 }
1234out:
1235 mutex_unlock(&wcn->hal_mutex);
1236 return ret;
1237}
1238
1239int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
1240{
1241 struct wcn36xx_hal_delete_bss_req_msg msg_body;
1242 struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
1243 int ret = 0;
1244
1245 mutex_lock(&wcn->hal_mutex);
1246 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
1247
1248 msg_body.bss_index = priv_vif->bss_index;
1249
1250 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1251
1252 wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
1253
1254 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1255 if (ret) {
1256 wcn36xx_err("Sending hal_delete_bss failed\n");
1257 goto out;
1258 }
1259 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1260 if (ret) {
1261 wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
1262 goto out;
1263 }
1264out:
1265 mutex_unlock(&wcn->hal_mutex);
1266 return ret;
1267}
1268
1269int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
1270 struct sk_buff *skb_beacon, u16 tim_off,
1271 u16 p2p_off)
1272{
1273 struct wcn36xx_hal_send_beacon_req_msg msg_body;
1274 int ret = 0;
1275
1276 mutex_lock(&wcn->hal_mutex);
1277 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
1278
1279 /* TODO need to find out why this is needed? */
1280 msg_body.beacon_length = skb_beacon->len + 6;
1281
1282 if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
1283 memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
1284 memcpy(&(msg_body.beacon[4]), skb_beacon->data,
1285 skb_beacon->len);
1286 } else {
1287 wcn36xx_err("Beacon is to big: beacon size=%d\n",
1288 msg_body.beacon_length);
1289 return -ENOMEM;
1290 }
1291 memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
1292
1293 /* TODO need to find out why this is needed? */
1294 msg_body.tim_ie_offset = tim_off+4;
1295 msg_body.p2p_ie_offset = p2p_off;
1296 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1297
1298 wcn36xx_dbg(WCN36XX_DBG_HAL,
1299 "hal send beacon beacon_length %d\n",
1300 msg_body.beacon_length);
1301
1302 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1303 if (ret) {
1304 wcn36xx_err("Sending hal_send_beacon failed\n");
1305 goto out;
1306 }
1307 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1308 if (ret) {
1309 wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
1310 goto out;
1311 }
1312out:
1313 mutex_unlock(&wcn->hal_mutex);
1314 return ret;
1315}
1316
1317int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
1318 struct ieee80211_vif *vif,
1319 struct sk_buff *skb)
1320{
1321 struct wcn36xx_hal_send_probe_resp_req_msg msg;
1322 int ret = 0;
1323
1324 mutex_lock(&wcn->hal_mutex);
1325 INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
1326
1327 if (skb->len > BEACON_TEMPLATE_SIZE) {
1328 wcn36xx_warn("probe response template is too big: %d\n",
1329 skb->len);
1330 return -E2BIG;
1331 }
1332
1333 msg.probe_resp_template_len = skb->len;
1334 memcpy(&msg.probe_resp_template, skb->data, skb->len);
1335
1336 memcpy(msg.bssid, vif->addr, ETH_ALEN);
1337
1338 PREPARE_HAL_BUF(wcn->hal_buf, msg);
1339
1340 wcn36xx_dbg(WCN36XX_DBG_HAL,
1341 "hal update probe rsp len %d bssid %pM\n",
1342 msg.probe_resp_template_len, msg.bssid);
1343
1344 ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
1345 if (ret) {
1346 wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
1347 goto out;
1348 }
1349 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1350 if (ret) {
1351 wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
1352 ret);
1353 goto out;
1354 }
1355out:
1356 mutex_unlock(&wcn->hal_mutex);
1357 return ret;
1358}
1359
1360int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
1361 enum ani_ed_type enc_type,
1362 u8 keyidx,
1363 u8 keylen,
1364 u8 *key,
1365 u8 sta_index)
1366{
1367 struct wcn36xx_hal_set_sta_key_req_msg msg_body;
1368 int ret = 0;
1369
1370 mutex_lock(&wcn->hal_mutex);
1371 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
1372
1373 msg_body.set_sta_key_params.sta_index = sta_index;
1374 msg_body.set_sta_key_params.enc_type = enc_type;
1375
1376 msg_body.set_sta_key_params.key[0].id = keyidx;
1377 msg_body.set_sta_key_params.key[0].unicast = 1;
1378 msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
1379 msg_body.set_sta_key_params.key[0].pae_role = 0;
1380 msg_body.set_sta_key_params.key[0].length = keylen;
1381 memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
1382 msg_body.set_sta_key_params.single_tid_rc = 1;
1383
1384 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1385
1386 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1387 if (ret) {
1388 wcn36xx_err("Sending hal_set_stakey failed\n");
1389 goto out;
1390 }
1391 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1392 if (ret) {
1393 wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
1394 goto out;
1395 }
1396out:
1397 mutex_unlock(&wcn->hal_mutex);
1398 return ret;
1399}
1400
1401int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
1402 enum ani_ed_type enc_type,
1403 u8 keyidx,
1404 u8 keylen,
1405 u8 *key)
1406{
1407 struct wcn36xx_hal_set_bss_key_req_msg msg_body;
1408 int ret = 0;
1409
1410 mutex_lock(&wcn->hal_mutex);
1411 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
1412 msg_body.bss_idx = 0;
1413 msg_body.enc_type = enc_type;
1414 msg_body.num_keys = 1;
1415 msg_body.keys[0].id = keyidx;
1416 msg_body.keys[0].unicast = 0;
1417 msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
1418 msg_body.keys[0].pae_role = 0;
1419 msg_body.keys[0].length = keylen;
1420 memcpy(msg_body.keys[0].key, key, keylen);
1421
1422 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1423
1424 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1425 if (ret) {
1426 wcn36xx_err("Sending hal_set_bsskey failed\n");
1427 goto out;
1428 }
1429 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1430 if (ret) {
1431 wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
1432 goto out;
1433 }
1434out:
1435 mutex_unlock(&wcn->hal_mutex);
1436 return ret;
1437}
1438
1439int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
1440 enum ani_ed_type enc_type,
1441 u8 keyidx,
1442 u8 sta_index)
1443{
1444 struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
1445 int ret = 0;
1446
1447 mutex_lock(&wcn->hal_mutex);
1448 INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
1449
1450 msg_body.sta_idx = sta_index;
1451 msg_body.enc_type = enc_type;
1452 msg_body.key_id = keyidx;
1453
1454 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1455
1456 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1457 if (ret) {
1458 wcn36xx_err("Sending hal_remove_stakey failed\n");
1459 goto out;
1460 }
1461 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1462 if (ret) {
1463 wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
1464 goto out;
1465 }
1466out:
1467 mutex_unlock(&wcn->hal_mutex);
1468 return ret;
1469}
1470
1471int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
1472 enum ani_ed_type enc_type,
1473 u8 keyidx)
1474{
1475 struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
1476 int ret = 0;
1477
1478 mutex_lock(&wcn->hal_mutex);
1479 INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
1480 msg_body.bss_idx = 0;
1481 msg_body.enc_type = enc_type;
1482 msg_body.key_id = keyidx;
1483
1484 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1485
1486 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1487 if (ret) {
1488 wcn36xx_err("Sending hal_remove_bsskey failed\n");
1489 goto out;
1490 }
1491 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1492 if (ret) {
1493 wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
1494 goto out;
1495 }
1496out:
1497 mutex_unlock(&wcn->hal_mutex);
1498 return ret;
1499}
1500
1501int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
1502{
1503 struct wcn36xx_hal_enter_bmps_req_msg msg_body;
1504 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
1505 int ret = 0;
1506
1507 mutex_lock(&wcn->hal_mutex);
1508 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
1509
1510 msg_body.bss_index = vif_priv->bss_index;
1511 msg_body.tbtt = vif->bss_conf.sync_tsf;
1512 msg_body.dtim_period = vif_priv->dtim_period;
1513
1514 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1515
1516 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1517 if (ret) {
1518 wcn36xx_err("Sending hal_enter_bmps failed\n");
1519 goto out;
1520 }
1521 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1522 if (ret) {
1523 wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
1524 goto out;
1525 }
1526out:
1527 mutex_unlock(&wcn->hal_mutex);
1528 return ret;
1529}
1530
1531int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
1532{
1533 struct wcn36xx_hal_enter_bmps_req_msg msg_body;
1534 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
1535 int ret = 0;
1536
1537 mutex_lock(&wcn->hal_mutex);
1538 INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
1539
1540 msg_body.bss_index = vif_priv->bss_index;
1541
1542 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1543
1544 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1545 if (ret) {
1546 wcn36xx_err("Sending hal_exit_bmps failed\n");
1547 goto out;
1548 }
1549 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1550 if (ret) {
1551 wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
1552 goto out;
1553 }
1554out:
1555 mutex_unlock(&wcn->hal_mutex);
1556 return ret;
1557}
1558int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
1559{
1560 struct wcn36xx_hal_set_power_params_req_msg msg_body;
1561 int ret = 0;
1562
1563 mutex_lock(&wcn->hal_mutex);
1564 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
1565
1566 /*
1567 * When host is down ignore every second dtim
1568 */
1569 if (ignore_dtim) {
1570 msg_body.ignore_dtim = 1;
1571 msg_body.dtim_period = 2;
1572 }
1573 msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
1574
1575 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1576
1577 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1578 if (ret) {
1579 wcn36xx_err("Sending hal_set_power_params failed\n");
1580 goto out;
1581 }
1582
1583out:
1584 mutex_unlock(&wcn->hal_mutex);
1585 return ret;
1586}
1587/* Notice: This function should be called after associated, or else it
1588 * will be invalid
1589 */
1590int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
1591 struct ieee80211_vif *vif,
1592 int packet_type)
1593{
1594 struct wcn36xx_hal_keep_alive_req_msg msg_body;
1595 struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
1596 int ret = 0;
1597
1598 mutex_lock(&wcn->hal_mutex);
1599 INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
1600
1601 if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
1602 msg_body.bss_index = vif_priv->bss_index;
1603 msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
1604 msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
1605 } else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
1606 /* TODO: it also support ARP response type */
1607 } else {
1608 wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
1609 return -EINVAL;
1610 }
1611
1612 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1613
1614 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1615 if (ret) {
1616 wcn36xx_err("Sending hal_exit_bmps failed\n");
1617 goto out;
1618 }
1619 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1620 if (ret) {
1621 wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
1622 goto out;
1623 }
1624out:
1625 mutex_unlock(&wcn->hal_mutex);
1626 return ret;
1627}
1628
1629int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
1630 u32 arg3, u32 arg4, u32 arg5)
1631{
1632 struct wcn36xx_hal_dump_cmd_req_msg msg_body;
1633 int ret = 0;
1634
1635 mutex_lock(&wcn->hal_mutex);
1636 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
1637
1638 msg_body.arg1 = arg1;
1639 msg_body.arg2 = arg2;
1640 msg_body.arg3 = arg3;
1641 msg_body.arg4 = arg4;
1642 msg_body.arg5 = arg5;
1643
1644 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1645
1646 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1647 if (ret) {
1648 wcn36xx_err("Sending hal_dump_cmd failed\n");
1649 goto out;
1650 }
1651 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1652 if (ret) {
1653 wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
1654 goto out;
1655 }
1656out:
1657 mutex_unlock(&wcn->hal_mutex);
1658 return ret;
1659}
1660
1661static inline void set_feat_caps(u32 *bitmap,
1662 enum place_holder_in_cap_bitmap cap)
1663{
1664 int arr_idx, bit_idx;
1665
1666 if (cap < 0 || cap > 127) {
1667 wcn36xx_warn("error cap idx %d\n", cap);
1668 return;
1669 }
1670
1671 arr_idx = cap / 32;
1672 bit_idx = cap % 32;
1673 bitmap[arr_idx] |= (1 << bit_idx);
1674}
1675
1676static inline int get_feat_caps(u32 *bitmap,
1677 enum place_holder_in_cap_bitmap cap)
1678{
1679 int arr_idx, bit_idx;
1680 int ret = 0;
1681
1682 if (cap < 0 || cap > 127) {
1683 wcn36xx_warn("error cap idx %d\n", cap);
1684 return -EINVAL;
1685 }
1686
1687 arr_idx = cap / 32;
1688 bit_idx = cap % 32;
1689 ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
1690 return ret;
1691}
1692
1693static inline void clear_feat_caps(u32 *bitmap,
1694 enum place_holder_in_cap_bitmap cap)
1695{
1696 int arr_idx, bit_idx;
1697
1698 if (cap < 0 || cap > 127) {
1699 wcn36xx_warn("error cap idx %d\n", cap);
1700 return;
1701 }
1702
1703 arr_idx = cap / 32;
1704 bit_idx = cap % 32;
1705 bitmap[arr_idx] &= ~(1 << bit_idx);
1706}
1707
1708int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
1709{
1710 struct wcn36xx_hal_feat_caps_msg msg_body;
1711 int ret = 0;
1712
1713 mutex_lock(&wcn->hal_mutex);
1714 INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
1715
1716 set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
1717
1718 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1719
1720 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1721 if (ret) {
1722 wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
1723 goto out;
1724 }
1725 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1726 if (ret) {
1727 wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
1728 ret);
1729 goto out;
1730 }
1731out:
1732 mutex_unlock(&wcn->hal_mutex);
1733 return ret;
1734}
1735
1736int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
1737 struct ieee80211_sta *sta,
1738 u16 tid,
1739 u16 *ssn,
1740 u8 direction,
1741 u8 sta_index)
1742{
1743 struct wcn36xx_hal_add_ba_session_req_msg msg_body;
1744 int ret = 0;
1745
1746 mutex_lock(&wcn->hal_mutex);
1747 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
1748
1749 msg_body.sta_index = sta_index;
1750 memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
1751 msg_body.dialog_token = 0x10;
1752 msg_body.tid = tid;
1753
1754 /* Immediate BA because Delayed BA is not supported */
1755 msg_body.policy = 1;
1756 msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
1757 msg_body.timeout = 0;
1758 if (ssn)
1759 msg_body.ssn = *ssn;
1760 msg_body.direction = direction;
1761
1762 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1763
1764 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1765 if (ret) {
1766 wcn36xx_err("Sending hal_add_ba_session failed\n");
1767 goto out;
1768 }
1769 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1770 if (ret) {
1771 wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
1772 goto out;
1773 }
1774out:
1775 mutex_unlock(&wcn->hal_mutex);
1776 return ret;
1777}
1778
1779int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
1780{
1781 struct wcn36xx_hal_add_ba_req_msg msg_body;
1782 int ret = 0;
1783
1784 mutex_lock(&wcn->hal_mutex);
1785 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
1786
1787 msg_body.session_id = 0;
1788 msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
1789
1790 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1791
1792 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1793 if (ret) {
1794 wcn36xx_err("Sending hal_add_ba failed\n");
1795 goto out;
1796 }
1797 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1798 if (ret) {
1799 wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
1800 goto out;
1801 }
1802out:
1803 mutex_unlock(&wcn->hal_mutex);
1804 return ret;
1805}
1806
1807int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
1808{
1809 struct wcn36xx_hal_del_ba_req_msg msg_body;
1810 int ret = 0;
1811
1812 mutex_lock(&wcn->hal_mutex);
1813 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
1814
1815 msg_body.sta_index = sta_index;
1816 msg_body.tid = tid;
1817 msg_body.direction = 0;
1818 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1819
1820 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1821 if (ret) {
1822 wcn36xx_err("Sending hal_del_ba failed\n");
1823 goto out;
1824 }
1825 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1826 if (ret) {
1827 wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
1828 goto out;
1829 }
1830out:
1831 mutex_unlock(&wcn->hal_mutex);
1832 return ret;
1833}
1834
1835int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
1836{
1837 struct wcn36xx_hal_trigger_ba_req_msg msg_body;
1838 struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
1839 int ret = 0;
1840
1841 mutex_lock(&wcn->hal_mutex);
1842 INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
1843
1844 msg_body.session_id = 0;
1845 msg_body.candidate_cnt = 1;
1846 msg_body.header.len += sizeof(*candidate);
1847 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1848
1849 candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
1850 (wcn->hal_buf + sizeof(msg_body));
1851 candidate->sta_index = sta_index;
1852 candidate->tid_bitmap = 1;
1853
1854 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1855 if (ret) {
1856 wcn36xx_err("Sending hal_trigger_ba failed\n");
1857 goto out;
1858 }
1859 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1860 if (ret) {
1861 wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
1862 goto out;
1863 }
1864out:
1865 mutex_unlock(&wcn->hal_mutex);
1866 return ret;
1867}
1868
1869static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
1870{
1871 struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
1872
1873 if (len != sizeof(*rsp)) {
1874 wcn36xx_warn("Bad TX complete indication\n");
1875 return -EIO;
1876 }
1877
1878 wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
1879
1880 return 0;
1881}
1882
1883static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
1884 void *buf,
1885 size_t len)
1886{
1887 struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
1888 struct ieee80211_vif *vif = NULL;
1889 struct wcn36xx_vif *tmp;
1890
1891 /* Old FW does not have bss index */
1892 if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
1893 list_for_each_entry(tmp, &wcn->vif_list, list) {
1894 wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
1895 tmp->bss_index);
1896 vif = container_of((void *)tmp,
1897 struct ieee80211_vif,
1898 drv_priv);
1899 ieee80211_connection_loss(vif);
1900 }
1901 return 0;
1902 }
1903
1904 if (len != sizeof(*rsp)) {
1905 wcn36xx_warn("Corrupted missed beacon indication\n");
1906 return -EIO;
1907 }
1908
1909 list_for_each_entry(tmp, &wcn->vif_list, list) {
1910 if (tmp->bss_index == rsp->bss_index) {
1911 wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
1912 rsp->bss_index);
1913 vif = container_of((void *)tmp,
1914 struct ieee80211_vif,
1915 drv_priv);
1916 ieee80211_connection_loss(vif);
1917 return 0;
1918 }
1919 }
1920
1921 wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
1922 return -ENOENT;
1923}
1924
1925static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
1926 void *buf,
1927 size_t len)
1928{
1929 struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
1930 struct wcn36xx_vif *tmp;
1931 struct ieee80211_sta *sta = NULL;
1932
1933 if (len != sizeof(*rsp)) {
1934 wcn36xx_warn("Corrupted delete sta indication\n");
1935 return -EIO;
1936 }
1937
1938 list_for_each_entry(tmp, &wcn->vif_list, list) {
1939 if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
1940 sta = container_of((void *)tmp->sta,
1941 struct ieee80211_sta,
1942 drv_priv);
1943 wcn36xx_dbg(WCN36XX_DBG_HAL,
1944 "delete station indication %pM index %d\n",
1945 rsp->addr2,
1946 rsp->sta_id);
1947 ieee80211_report_low_ack(sta, 0);
1948 return 0;
1949 }
1950 }
1951
1952 wcn36xx_warn("STA with addr %pM and index %d not found\n",
1953 rsp->addr2,
1954 rsp->sta_id);
1955 return -ENOENT;
1956}
1957
1958int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
1959{
1960 struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
1961 size_t len;
1962 int ret = 0;
1963
1964 mutex_lock(&wcn->hal_mutex);
1965 INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
1966
1967 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
1968
1969 body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
1970 len = msg_body.header.len;
1971
1972 put_cfg_tlv_u32(wcn, &len, cfg_id, value);
1973 body->header.len = len;
1974 body->len = len - sizeof(*body);
1975
1976 ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
1977 if (ret) {
1978 wcn36xx_err("Sending hal_update_cfg failed\n");
1979 goto out;
1980 }
1981 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1982 if (ret) {
1983 wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
1984 goto out;
1985 }
1986out:
1987 mutex_unlock(&wcn->hal_mutex);
1988 return ret;
1989}
1990static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
1991{
1992 struct wcn36xx_hal_msg_header *msg_header = buf;
1993 struct wcn36xx_hal_ind_msg *msg_ind;
1994 wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
1995
1996 switch (msg_header->msg_type) {
1997 case WCN36XX_HAL_START_RSP:
1998 case WCN36XX_HAL_CONFIG_STA_RSP:
1999 case WCN36XX_HAL_CONFIG_BSS_RSP:
2000 case WCN36XX_HAL_ADD_STA_SELF_RSP:
2001 case WCN36XX_HAL_STOP_RSP:
2002 case WCN36XX_HAL_DEL_STA_SELF_RSP:
2003 case WCN36XX_HAL_DELETE_STA_RSP:
2004 case WCN36XX_HAL_INIT_SCAN_RSP:
2005 case WCN36XX_HAL_START_SCAN_RSP:
2006 case WCN36XX_HAL_END_SCAN_RSP:
2007 case WCN36XX_HAL_FINISH_SCAN_RSP:
2008 case WCN36XX_HAL_DOWNLOAD_NV_RSP:
2009 case WCN36XX_HAL_DELETE_BSS_RSP:
2010 case WCN36XX_HAL_SEND_BEACON_RSP:
2011 case WCN36XX_HAL_SET_LINK_ST_RSP:
2012 case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
2013 case WCN36XX_HAL_SET_BSSKEY_RSP:
2014 case WCN36XX_HAL_SET_STAKEY_RSP:
2015 case WCN36XX_HAL_RMV_STAKEY_RSP:
2016 case WCN36XX_HAL_RMV_BSSKEY_RSP:
2017 case WCN36XX_HAL_ENTER_BMPS_RSP:
2018 case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
2019 case WCN36XX_HAL_EXIT_BMPS_RSP:
2020 case WCN36XX_HAL_KEEP_ALIVE_RSP:
2021 case WCN36XX_HAL_DUMP_COMMAND_RSP:
2022 case WCN36XX_HAL_ADD_BA_SESSION_RSP:
2023 case WCN36XX_HAL_ADD_BA_RSP:
2024 case WCN36XX_HAL_DEL_BA_RSP:
2025 case WCN36XX_HAL_TRIGGER_BA_RSP:
2026 case WCN36XX_HAL_UPDATE_CFG_RSP:
2027 case WCN36XX_HAL_JOIN_RSP:
2028 case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
2029 case WCN36XX_HAL_CH_SWITCH_RSP:
2030 case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
2031 memcpy(wcn->hal_buf, buf, len);
2032 wcn->hal_rsp_len = len;
2033 complete(&wcn->hal_rsp_compl);
2034 break;
2035
2036 case WCN36XX_HAL_OTA_TX_COMPL_IND:
2037 case WCN36XX_HAL_MISSED_BEACON_IND:
2038 case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
2039 mutex_lock(&wcn->hal_ind_mutex);
2040 msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
2041 msg_ind->msg_len = len;
2042 msg_ind->msg = kmalloc(len, GFP_KERNEL);
2043 memcpy(msg_ind->msg, buf, len);
2044 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
2045 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
2046 wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
2047 mutex_unlock(&wcn->hal_ind_mutex);
2048 break;
2049 default:
2050 wcn36xx_err("SMD_EVENT (%d) not supported\n",
2051 msg_header->msg_type);
2052 }
2053}
2054static void wcn36xx_ind_smd_work(struct work_struct *work)
2055{
2056 struct wcn36xx *wcn =
2057 container_of(work, struct wcn36xx, hal_ind_work);
2058 struct wcn36xx_hal_msg_header *msg_header;
2059 struct wcn36xx_hal_ind_msg *hal_ind_msg;
2060
2061 mutex_lock(&wcn->hal_ind_mutex);
2062
2063 hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
2064 struct wcn36xx_hal_ind_msg,
2065 list);
2066
2067 msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
2068
2069 switch (msg_header->msg_type) {
2070 case WCN36XX_HAL_OTA_TX_COMPL_IND:
2071 wcn36xx_smd_tx_compl_ind(wcn,
2072 hal_ind_msg->msg,
2073 hal_ind_msg->msg_len);
2074 break;
2075 case WCN36XX_HAL_MISSED_BEACON_IND:
2076 wcn36xx_smd_missed_beacon_ind(wcn,
2077 hal_ind_msg->msg,
2078 hal_ind_msg->msg_len);
2079 break;
2080 case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
2081 wcn36xx_smd_delete_sta_context_ind(wcn,
2082 hal_ind_msg->msg,
2083 hal_ind_msg->msg_len);
2084 break;
2085 default:
2086 wcn36xx_err("SMD_EVENT (%d) not supported\n",
2087 msg_header->msg_type);
2088 }
2089 list_del(wcn->hal_ind_queue.next);
2090 kfree(hal_ind_msg->msg);
2091 kfree(hal_ind_msg);
2092 mutex_unlock(&wcn->hal_ind_mutex);
2093}
2094int wcn36xx_smd_open(struct wcn36xx *wcn)
2095{
2096 int ret = 0;
2097 wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
2098 if (!wcn->hal_ind_wq) {
2099 wcn36xx_err("failed to allocate wq\n");
2100 ret = -ENOMEM;
2101 goto out;
2102 }
2103 INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
2104 INIT_LIST_HEAD(&wcn->hal_ind_queue);
2105 mutex_init(&wcn->hal_ind_mutex);
2106
2107 ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
2108 if (ret) {
2109 wcn36xx_err("failed to open control channel\n");
2110 goto free_wq;
2111 }
2112
2113 return ret;
2114
2115free_wq:
2116 destroy_workqueue(wcn->hal_ind_wq);
2117out:
2118 return ret;
2119}
2120
2121void wcn36xx_smd_close(struct wcn36xx *wcn)
2122{
2123 wcn->ctrl_ops->close();
2124 destroy_workqueue(wcn->hal_ind_wq);
2125 mutex_destroy(&wcn->hal_ind_mutex);
2126}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
new file mode 100644
index 000000000000..e7c39019c6f1
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -0,0 +1,127 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _SMD_H_
18#define _SMD_H_
19
20#include "wcn36xx.h"
21
22/* Max shared size is 4k but we take less.*/
23#define WCN36XX_NV_FRAGMENT_SIZE 3072
24
25#define WCN36XX_HAL_BUF_SIZE 4096
26
27#define HAL_MSG_TIMEOUT 200
28#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
29#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
30/* The PNO version info be contained in the rsp msg */
31#define WCN36XX_FW_MSG_PNO_VERSION_MASK 0x8000
32
33enum wcn36xx_fw_msg_result {
34 WCN36XX_FW_MSG_RESULT_SUCCESS = 0,
35 WCN36XX_FW_MSG_RESULT_SUCCESS_SYNC = 1,
36
37 WCN36XX_FW_MSG_RESULT_MEM_FAIL = 5,
38};
39
40/******************************/
41/* SMD requests and responses */
42/******************************/
43struct wcn36xx_fw_msg_status_rsp {
44 u32 status;
45} __packed;
46
47struct wcn36xx_hal_ind_msg {
48 struct list_head list;
49 u8 *msg;
50 size_t msg_len;
51};
52
53struct wcn36xx;
54
55int wcn36xx_smd_open(struct wcn36xx *wcn);
56void wcn36xx_smd_close(struct wcn36xx *wcn);
57
58int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
59int wcn36xx_smd_start(struct wcn36xx *wcn);
60int wcn36xx_smd_stop(struct wcn36xx *wcn);
61int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
62int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
63int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
64int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
65 enum wcn36xx_hal_sys_mode mode);
66int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
67int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
68int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
69int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
70int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch);
71int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
72 const u8 *sta_mac,
73 enum wcn36xx_hal_link_state state);
74int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
75 struct ieee80211_sta *sta, const u8 *bssid,
76 bool update);
77int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif);
78int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
79 struct ieee80211_sta *sta);
80int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
81 struct sk_buff *skb_beacon, u16 tim_off,
82 u16 p2p_off);
83int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
84 struct ieee80211_vif *vif, int ch);
85int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
86 struct ieee80211_vif *vif,
87 struct sk_buff *skb);
88int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
89 enum ani_ed_type enc_type,
90 u8 keyidx,
91 u8 keylen,
92 u8 *key,
93 u8 sta_index);
94int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
95 enum ani_ed_type enc_type,
96 u8 keyidx,
97 u8 keylen,
98 u8 *key);
99int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
100 enum ani_ed_type enc_type,
101 u8 keyidx,
102 u8 sta_index);
103int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
104 enum ani_ed_type enc_type,
105 u8 keyidx);
106int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
107int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
108int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim);
109int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
110 struct ieee80211_vif *vif,
111 int packet_type);
112int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
113 u32 arg3, u32 arg4, u32 arg5);
114int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
115
116int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
117 struct ieee80211_sta *sta,
118 u16 tid,
119 u16 *ssn,
120 u8 direction,
121 u8 sta_index);
122int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
123int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
124int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
125
126int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
127#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
new file mode 100644
index 000000000000..b2b60e30caaf
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -0,0 +1,284 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include "txrx.h"
20
21static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
22{
23 return 100 - ((bd->phy_stat0 >> 24) & 0xff);
24}
25
26int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
27{
28 struct ieee80211_rx_status status;
29 struct ieee80211_hdr *hdr;
30 struct wcn36xx_rx_bd *bd;
31 u16 fc, sn;
32
33 /*
34 * All fields must be 0, otherwise it can lead to
35 * unexpected consequences.
36 */
37 memset(&status, 0, sizeof(status));
38
39 bd = (struct wcn36xx_rx_bd *)skb->data;
40 buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
41 wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
42 "BD <<< ", (char *)bd,
43 sizeof(struct wcn36xx_rx_bd));
44
45 skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
46 skb_pull(skb, bd->pdu.mpdu_header_off);
47
48 status.mactime = 10;
49 status.freq = WCN36XX_CENTER_FREQ(wcn);
50 status.band = WCN36XX_BAND(wcn);
51 status.signal = -get_rssi0(bd);
52 status.antenna = 1;
53 status.rate_idx = 1;
54 status.flag = 0;
55 status.rx_flags = 0;
56 status.flag |= RX_FLAG_IV_STRIPPED |
57 RX_FLAG_MMIC_STRIPPED |
58 RX_FLAG_DECRYPTED;
59
60 wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
61 status.flag, status.vendor_radiotap_len);
62
63 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
64
65 hdr = (struct ieee80211_hdr *) skb->data;
66 fc = __le16_to_cpu(hdr->frame_control);
67 sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
68
69 if (ieee80211_is_beacon(hdr->frame_control)) {
70 wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
71 skb, skb->len, fc, sn);
72 wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
73 (char *)skb->data, skb->len);
74 } else {
75 wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
76 skb, skb->len, fc, sn);
77 wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
78 (char *)skb->data, skb->len);
79 }
80
81 ieee80211_rx_irqsafe(wcn->hw, skb);
82
83 return 0;
84}
85
86static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
87 u32 mpdu_header_len,
88 u32 len,
89 u16 tid)
90{
91 bd->pdu.mpdu_header_len = mpdu_header_len;
92 bd->pdu.mpdu_header_off = sizeof(*bd);
93 bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
94 bd->pdu.mpdu_header_off;
95 bd->pdu.mpdu_len = len;
96 bd->pdu.tid = tid;
97}
98
99static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
100 u8 *addr)
101{
102 struct wcn36xx_vif *vif_priv = NULL;
103 struct ieee80211_vif *vif = NULL;
104 list_for_each_entry(vif_priv, &wcn->vif_list, list) {
105 vif = container_of((void *)vif_priv,
106 struct ieee80211_vif,
107 drv_priv);
108 if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
109 return vif_priv;
110 }
111 wcn36xx_warn("vif %pM not found\n", addr);
112 return NULL;
113}
114static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
115 struct wcn36xx *wcn,
116 struct wcn36xx_vif **vif_priv,
117 struct wcn36xx_sta *sta_priv,
118 struct ieee80211_hdr *hdr,
119 bool bcast)
120{
121 struct ieee80211_vif *vif = NULL;
122 struct wcn36xx_vif *__vif_priv = NULL;
123 bd->bd_rate = WCN36XX_BD_RATE_DATA;
124
125 /*
126 * For not unicast frames mac80211 will not set sta pointer so use
127 * self_sta_index instead.
128 */
129 if (sta_priv) {
130 __vif_priv = sta_priv->vif;
131 vif = container_of((void *)__vif_priv,
132 struct ieee80211_vif,
133 drv_priv);
134
135 if (vif->type == NL80211_IFTYPE_STATION) {
136 bd->sta_index = sta_priv->bss_sta_index;
137 bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
138 } else if (vif->type == NL80211_IFTYPE_AP ||
139 vif->type == NL80211_IFTYPE_ADHOC ||
140 vif->type == NL80211_IFTYPE_MESH_POINT) {
141 bd->sta_index = sta_priv->sta_index;
142 bd->dpu_desc_idx = sta_priv->dpu_desc_index;
143 }
144 } else {
145 __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
146 bd->sta_index = __vif_priv->self_sta_index;
147 bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
148 }
149
150 bd->dpu_sign = __vif_priv->ucast_dpu_signature;
151
152 if (ieee80211_is_nullfunc(hdr->frame_control) ||
153 (sta_priv && !sta_priv->is_data_encrypted))
154 bd->dpu_ne = 1;
155
156 if (bcast) {
157 bd->ub = 1;
158 bd->ack_policy = 1;
159 }
160 *vif_priv = __vif_priv;
161}
162
163static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
164 struct wcn36xx *wcn,
165 struct wcn36xx_vif **vif_priv,
166 struct ieee80211_hdr *hdr,
167 bool bcast)
168{
169 struct wcn36xx_vif *__vif_priv =
170 get_vif_by_addr(wcn, hdr->addr2);
171 bd->sta_index = __vif_priv->self_sta_index;
172 bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
173 bd->dpu_ne = 1;
174
175 /* default rate for unicast */
176 if (ieee80211_is_mgmt(hdr->frame_control))
177 bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
178 WCN36XX_BD_RATE_CTRL :
179 WCN36XX_BD_RATE_MGMT;
180 else if (ieee80211_is_ctl(hdr->frame_control))
181 bd->bd_rate = WCN36XX_BD_RATE_CTRL;
182 else
183 wcn36xx_warn("frame control type unknown\n");
184
185 /*
186 * In joining state trick hardware that probe is sent as
187 * unicast even if address is broadcast.
188 */
189 if (__vif_priv->is_joining &&
190 ieee80211_is_probe_req(hdr->frame_control))
191 bcast = false;
192
193 if (bcast) {
194 /* broadcast */
195 bd->ub = 1;
196 /* No ack needed not unicast */
197 bd->ack_policy = 1;
198 bd->queue_id = WCN36XX_TX_B_WQ_ID;
199 } else
200 bd->queue_id = WCN36XX_TX_U_WQ_ID;
201 *vif_priv = __vif_priv;
202}
203
204int wcn36xx_start_tx(struct wcn36xx *wcn,
205 struct wcn36xx_sta *sta_priv,
206 struct sk_buff *skb)
207{
208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
209 struct wcn36xx_vif *vif_priv = NULL;
210 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
211 unsigned long flags;
212 bool is_low = ieee80211_is_data(hdr->frame_control);
213 bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
214 is_multicast_ether_addr(hdr->addr1);
215 struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low);
216
217 if (!bd) {
218 /*
219 * TX DXE are used in pairs. One for the BD and one for the
220 * actual frame. The BD DXE's has a preallocated buffer while
221 * the skb ones does not. If this isn't true something is really
222 * wierd. TODO: Recover from this situation
223 */
224
225 wcn36xx_err("bd address may not be NULL for BD DXE\n");
226 return -EINVAL;
227 }
228
229 memset(bd, 0, sizeof(*bd));
230
231 wcn36xx_dbg(WCN36XX_DBG_TX,
232 "tx skb %p len %d fc %04x sn %d %s %s\n",
233 skb, skb->len, __le16_to_cpu(hdr->frame_control),
234 IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
235 is_low ? "low" : "high", bcast ? "bcast" : "ucast");
236
237 wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
238
239 bd->dpu_rf = WCN36XX_BMU_WQ_TX;
240
241 bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS;
242 if (bd->tx_comp) {
243 wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
244 spin_lock_irqsave(&wcn->dxe_lock, flags);
245 if (wcn->tx_ack_skb) {
246 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
247 wcn36xx_warn("tx_ack_skb already set\n");
248 return -EINVAL;
249 }
250
251 wcn->tx_ack_skb = skb;
252 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
253
254 /* Only one at a time is supported by fw. Stop the TX queues
255 * until the ack status gets back.
256 *
257 * TODO: Add watchdog in case FW does not answer
258 */
259 ieee80211_stop_queues(wcn->hw);
260 }
261
262 /* Data frames served first*/
263 if (is_low) {
264 wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast);
265 wcn36xx_set_tx_pdu(bd,
266 ieee80211_is_data_qos(hdr->frame_control) ?
267 sizeof(struct ieee80211_qos_hdr) :
268 sizeof(struct ieee80211_hdr_3addr),
269 skb->len, sta_priv ? sta_priv->tid : 0);
270 } else {
271 /* MGMT and CTRL frames are handeld here*/
272 wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast);
273 wcn36xx_set_tx_pdu(bd,
274 ieee80211_is_data_qos(hdr->frame_control) ?
275 sizeof(struct ieee80211_qos_hdr) :
276 sizeof(struct ieee80211_hdr_3addr),
277 skb->len, WCN36XX_TID);
278 }
279
280 buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
281 bd->tx_bd_sign = 0xbdbdbdbd;
282
283 return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low);
284}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
new file mode 100644
index 000000000000..bbfbcf808c77
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -0,0 +1,160 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _TXRX_H_
18#define _TXRX_H_
19
20#include <linux/etherdevice.h>
21#include "wcn36xx.h"
22
23/* TODO describe all properties */
24#define WCN36XX_802_11_HEADER_LEN 24
25#define WCN36XX_BMU_WQ_TX 25
26#define WCN36XX_TID 7
27/* broadcast wq ID */
28#define WCN36XX_TX_B_WQ_ID 0xA
29#define WCN36XX_TX_U_WQ_ID 0x9
30/* bd_rate */
31#define WCN36XX_BD_RATE_DATA 0
32#define WCN36XX_BD_RATE_MGMT 2
33#define WCN36XX_BD_RATE_CTRL 3
34
35struct wcn36xx_pdu {
36 u32 dpu_fb:8;
37 u32 adu_fb:8;
38 u32 pdu_id:16;
39
40 /* 0x04*/
41 u32 tail_pdu_idx:16;
42 u32 head_pdu_idx:16;
43
44 /* 0x08*/
45 u32 pdu_count:7;
46 u32 mpdu_data_off:9;
47 u32 mpdu_header_off:8;
48 u32 mpdu_header_len:8;
49
50 /* 0x0c*/
51 u32 reserved4:8;
52 u32 tid:4;
53 u32 reserved3:4;
54 u32 mpdu_len:16;
55};
56
57struct wcn36xx_rx_bd {
58 u32 bdt:2;
59 u32 ft:1;
60 u32 dpu_ne:1;
61 u32 rx_key_id:3;
62 u32 ub:1;
63 u32 rmf:1;
64 u32 uma_bypass:1;
65 u32 csr11:1;
66 u32 reserved0:1;
67 u32 scan_learn:1;
68 u32 rx_ch:4;
69 u32 rtsf:1;
70 u32 bsf:1;
71 u32 a2hf:1;
72 u32 st_auf:1;
73 u32 dpu_sign:3;
74 u32 dpu_rf:8;
75
76 struct wcn36xx_pdu pdu;
77
78 /* 0x14*/
79 u32 addr3:8;
80 u32 addr2:8;
81 u32 addr1:8;
82 u32 dpu_desc_idx:8;
83
84 /* 0x18*/
85 u32 rxp_flags:23;
86 u32 rate_id:9;
87
88 u32 phy_stat0;
89 u32 phy_stat1;
90
91 /* 0x24 */
92 u32 rx_times;
93
94 u32 pmi_cmd[6];
95
96 /* 0x40 */
97 u32 reserved7:4;
98 u32 reorder_slot_id:6;
99 u32 reorder_fwd_id:6;
100 u32 reserved6:12;
101 u32 reorder_code:4;
102
103 /* 0x44 */
104 u32 exp_seq_num:12;
105 u32 cur_seq_num:12;
106 u32 fr_type_subtype:8;
107
108 /* 0x48 */
109 u32 msdu_size:16;
110 u32 sub_fr_id:4;
111 u32 proc_order:4;
112 u32 reserved9:4;
113 u32 aef:1;
114 u32 lsf:1;
115 u32 esf:1;
116 u32 asf:1;
117};
118
119struct wcn36xx_tx_bd {
120 u32 bdt:2;
121 u32 ft:1;
122 u32 dpu_ne:1;
123 u32 fw_tx_comp:1;
124 u32 tx_comp:1;
125 u32 reserved1:1;
126 u32 ub:1;
127 u32 rmf:1;
128 u32 reserved0:12;
129 u32 dpu_sign:3;
130 u32 dpu_rf:8;
131
132 struct wcn36xx_pdu pdu;
133
134 /* 0x14*/
135 u32 reserved5:7;
136 u32 queue_id:5;
137 u32 bd_rate:2;
138 u32 ack_policy:2;
139 u32 sta_index:8;
140 u32 dpu_desc_idx:8;
141
142 u32 tx_bd_sign;
143 u32 reserved6;
144 u32 dxe_start_time;
145 u32 dxe_end_time;
146
147 /*u32 tcp_udp_start_off:10;
148 u32 header_cks:16;
149 u32 reserved7:6;*/
150};
151
152struct wcn36xx_sta;
153struct wcn36xx;
154
155int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
156int wcn36xx_start_tx(struct wcn36xx *wcn,
157 struct wcn36xx_sta *sta_priv,
158 struct sk_buff *skb);
159
160#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
new file mode 100644
index 000000000000..58b63833e8e7
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -0,0 +1,238 @@
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _WCN36XX_H_
18#define _WCN36XX_H_
19
20#include <linux/completion.h>
21#include <linux/printk.h>
22#include <linux/spinlock.h>
23#include <net/mac80211.h>
24
25#include "hal.h"
26#include "smd.h"
27#include "txrx.h"
28#include "dxe.h"
29#include "pmc.h"
30#include "debug.h"
31
32#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
33#define WCN36XX_AGGR_BUFFER_SIZE 64
34
35extern unsigned int wcn36xx_dbg_mask;
36
37enum wcn36xx_debug_mask {
38 WCN36XX_DBG_DXE = 0x00000001,
39 WCN36XX_DBG_DXE_DUMP = 0x00000002,
40 WCN36XX_DBG_SMD = 0x00000004,
41 WCN36XX_DBG_SMD_DUMP = 0x00000008,
42 WCN36XX_DBG_RX = 0x00000010,
43 WCN36XX_DBG_RX_DUMP = 0x00000020,
44 WCN36XX_DBG_TX = 0x00000040,
45 WCN36XX_DBG_TX_DUMP = 0x00000080,
46 WCN36XX_DBG_HAL = 0x00000100,
47 WCN36XX_DBG_HAL_DUMP = 0x00000200,
48 WCN36XX_DBG_MAC = 0x00000400,
49 WCN36XX_DBG_BEACON = 0x00000800,
50 WCN36XX_DBG_BEACON_DUMP = 0x00001000,
51 WCN36XX_DBG_PMC = 0x00002000,
52 WCN36XX_DBG_PMC_DUMP = 0x00004000,
53 WCN36XX_DBG_ANY = 0xffffffff,
54};
55
56#define wcn36xx_err(fmt, arg...) \
57 printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
58
59#define wcn36xx_warn(fmt, arg...) \
60 printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
61
62#define wcn36xx_info(fmt, arg...) \
63 printk(KERN_INFO pr_fmt(fmt), ##arg)
64
65#define wcn36xx_dbg(mask, fmt, arg...) do { \
66 if (wcn36xx_dbg_mask & mask) \
67 printk(KERN_DEBUG pr_fmt(fmt), ##arg); \
68} while (0)
69
70#define wcn36xx_dbg_dump(mask, prefix_str, buf, len) do { \
71 if (wcn36xx_dbg_mask & mask) \
72 print_hex_dump(KERN_DEBUG, pr_fmt(prefix_str), \
73 DUMP_PREFIX_OFFSET, 32, 1, \
74 buf, len, false); \
75} while (0)
76
77#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
78#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
79#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
80#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
81#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
82#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
83
84static inline void buff_to_be(u32 *buf, size_t len)
85{
86 int i;
87 for (i = 0; i < len; i++)
88 buf[i] = cpu_to_be32(buf[i]);
89}
90
91struct nv_data {
92 int is_valid;
93 u8 table;
94};
95
96/* Interface for platform control path
97 *
98 * @open: hook must be called when wcn36xx wants to open control channel.
99 * @tx: sends a buffer.
100 */
101struct wcn36xx_platform_ctrl_ops {
102 int (*open)(void *drv_priv, void *rsp_cb);
103 void (*close)(void);
104 int (*tx)(char *buf, size_t len);
105 int (*get_hw_mac)(u8 *addr);
106 int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
107};
108
109/**
110 * struct wcn36xx_vif - holds VIF related fields
111 *
112 * @bss_index: bss_index is initially set to 0xFF. bss_index is received from
113 * HW after first config_bss call and must be used in delete_bss and
114 * enter/exit_bmps.
115 */
116struct wcn36xx_vif {
117 struct list_head list;
118 struct wcn36xx_sta *sta;
119 u8 dtim_period;
120 enum ani_ed_type encrypt_type;
121 bool is_joining;
122 struct wcn36xx_hal_mac_ssid ssid;
123
124 /* Power management */
125 enum wcn36xx_power_state pw_state;
126
127 u8 bss_index;
128 u8 ucast_dpu_signature;
129 /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
130 u8 self_sta_index;
131 u8 self_dpu_desc_index;
132};
133
134/**
135 * struct wcn36xx_sta - holds STA related fields
136 *
137 * @tid: traffic ID that is used during AMPDU and in TX BD.
138 * @sta_index: STA index is returned from HW after config_sta call and is
139 * used in both SMD channel and TX BD.
140 * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
141 * call and is used in TX BD.
142 * @bss_sta_index: STA index is returned from HW after config_bss call and is
143 * used in both SMD channel and TX BD. See table bellow when it is used.
144 * @bss_dpu_desc_index: DPU descriptor index is returned from HW after
145 * config_bss call and is used in TX BD.
146 * ______________________________________________
147 * | | STA | AP |
148 * |______________|_____________|_______________|
149 * | TX BD |bss_sta_index| sta_index |
150 * |______________|_____________|_______________|
151 * |all SMD calls |bss_sta_index| sta_index |
152 * |______________|_____________|_______________|
153 * |smd_delete_sta| sta_index | sta_index |
154 * |______________|_____________|_______________|
155 */
156struct wcn36xx_sta {
157 struct wcn36xx_vif *vif;
158 u16 aid;
159 u16 tid;
160 u8 sta_index;
161 u8 dpu_desc_index;
162 u8 bss_sta_index;
163 u8 bss_dpu_desc_index;
164 bool is_data_encrypted;
165 /* Rates */
166 struct wcn36xx_hal_supported_rates supported_rates;
167};
168struct wcn36xx_dxe_ch;
169struct wcn36xx {
170 struct ieee80211_hw *hw;
171 struct device *dev;
172 struct list_head vif_list;
173
174 u8 fw_revision;
175 u8 fw_version;
176 u8 fw_minor;
177 u8 fw_major;
178
179 /* extra byte for the NULL termination */
180 u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
181 u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1];
182
183 /* IRQs */
184 int tx_irq;
185 int rx_irq;
186 void __iomem *mmio;
187
188 struct wcn36xx_platform_ctrl_ops *ctrl_ops;
189 /*
190 * smd_buf must be protected with smd_mutex to garantee
191 * that all messages are sent one after another
192 */
193 u8 *hal_buf;
194 size_t hal_rsp_len;
195 struct mutex hal_mutex;
196 struct completion hal_rsp_compl;
197 struct workqueue_struct *hal_ind_wq;
198 struct work_struct hal_ind_work;
199 struct mutex hal_ind_mutex;
200 struct list_head hal_ind_queue;
201
202 /* DXE channels */
203 struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */
204 struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */
205 struct wcn36xx_dxe_ch dxe_rx_l_ch; /* RX low */
206 struct wcn36xx_dxe_ch dxe_rx_h_ch; /* RX high */
207
208 /* For synchronization of DXE resources from BH, IRQ and WQ contexts */
209 spinlock_t dxe_lock;
210 bool queues_stopped;
211
212 /* Memory pools */
213 struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
214 struct wcn36xx_dxe_mem_pool data_mem_pool;
215
216 struct sk_buff *tx_ack_skb;
217
218#ifdef CONFIG_WCN36XX_DEBUGFS
219 /* Debug file system entry */
220 struct wcn36xx_dfs_entry dfs;
221#endif /* CONFIG_WCN36XX_DEBUGFS */
222
223};
224
225static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
226 u8 major,
227 u8 minor,
228 u8 version,
229 u8 revision)
230{
231 return (wcn->fw_major == major &&
232 wcn->fw_minor == minor &&
233 wcn->fw_version == version &&
234 wcn->fw_revision == revision);
235}
236void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
237
238#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 61c302a6bdea..5b340769d5bb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -316,8 +316,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
316 } 316 }
317 conn.channel = ch - 1; 317 conn.channel = ch - 1;
318 318
319 memcpy(conn.bssid, bss->bssid, 6); 319 memcpy(conn.bssid, bss->bssid, ETH_ALEN);
320 memcpy(conn.dst_mac, bss->bssid, 6); 320 memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
321 /* 321 /*
322 * FW don't support scan after connection attempt 322 * FW don't support scan after connection attempt
323 */ 323 */
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eb1dc7ad80fb..eeceab39cda2 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -197,7 +197,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
197 pci_iounmap(pdev, wil->csr); 197 pci_iounmap(pdev, wil->csr);
198 pci_release_region(pdev, 0); 198 pci_release_region(pdev, 0);
199 pci_disable_device(pdev); 199 pci_disable_device(pdev);
200 pci_set_drvdata(pdev, NULL);
201} 200}
202 201
203static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = { 202static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index b827d51c30a3..a55ae6494c3b 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -844,18 +844,18 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
844 if (priv->wep_is_on) 844 if (priv->wep_is_on)
845 frame_ctl |= IEEE80211_FCTL_PROTECTED; 845 frame_ctl |= IEEE80211_FCTL_PROTECTED;
846 if (priv->operating_mode == IW_MODE_ADHOC) { 846 if (priv->operating_mode == IW_MODE_ADHOC) {
847 skb_copy_from_linear_data(skb, &header.addr1, 6); 847 skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
848 memcpy(&header.addr2, dev->dev_addr, 6); 848 memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
849 memcpy(&header.addr3, priv->BSSID, 6); 849 memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
850 } else { 850 } else {
851 frame_ctl |= IEEE80211_FCTL_TODS; 851 frame_ctl |= IEEE80211_FCTL_TODS;
852 memcpy(&header.addr1, priv->CurrentBSSID, 6); 852 memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
853 memcpy(&header.addr2, dev->dev_addr, 6); 853 memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
854 skb_copy_from_linear_data(skb, &header.addr3, 6); 854 skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
855 } 855 }
856 856
857 if (priv->use_wpa) 857 if (priv->use_wpa)
858 memcpy(&header.addr4, SNAP_RFC1024, 6); 858 memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
859 859
860 header.frame_control = cpu_to_le16(frame_ctl); 860 header.frame_control = cpu_to_le16(frame_ctl);
861 /* Copy the wireless header into the card */ 861 /* Copy the wireless header into the card */
@@ -929,11 +929,11 @@ static void fast_rx_path(struct atmel_private *priv,
929 } 929 }
930 } 930 }
931 931
932 memcpy(skbp, header->addr1, 6); /* destination address */ 932 memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
933 if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS) 933 if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
934 memcpy(&skbp[6], header->addr3, 6); 934 memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
935 else 935 else
936 memcpy(&skbp[6], header->addr2, 6); /* source address */ 936 memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
937 937
938 skb->protocol = eth_type_trans(skb, priv->dev); 938 skb->protocol = eth_type_trans(skb, priv->dev);
939 skb->ip_summed = CHECKSUM_NONE; 939 skb->ip_summed = CHECKSUM_NONE;
@@ -969,14 +969,14 @@ static void frag_rx_path(struct atmel_private *priv,
969 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, 969 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
970 u8 frag_no, int more_frags) 970 u8 frag_no, int more_frags)
971{ 971{
972 u8 mac4[6]; 972 u8 mac4[ETH_ALEN];
973 u8 source[6]; 973 u8 source[ETH_ALEN];
974 struct sk_buff *skb; 974 struct sk_buff *skb;
975 975
976 if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS) 976 if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
977 memcpy(source, header->addr3, 6); 977 memcpy(source, header->addr3, ETH_ALEN);
978 else 978 else
979 memcpy(source, header->addr2, 6); 979 memcpy(source, header->addr2, ETH_ALEN);
980 980
981 rx_packet_loc += 24; /* skip header */ 981 rx_packet_loc += 24; /* skip header */
982 982
@@ -984,9 +984,9 @@ static void frag_rx_path(struct atmel_private *priv,
984 msdu_size -= 4; 984 msdu_size -= 4;
985 985
986 if (frag_no == 0) { /* first fragment */ 986 if (frag_no == 0) { /* first fragment */
987 atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6); 987 atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
988 msdu_size -= 6; 988 msdu_size -= ETH_ALEN;
989 rx_packet_loc += 6; 989 rx_packet_loc += ETH_ALEN;
990 990
991 if (priv->do_rx_crc) 991 if (priv->do_rx_crc)
992 crc = crc32_le(crc, mac4, 6); 992 crc = crc32_le(crc, mac4, 6);
@@ -994,9 +994,9 @@ static void frag_rx_path(struct atmel_private *priv,
994 priv->frag_seq = seq_no; 994 priv->frag_seq = seq_no;
995 priv->frag_no = 1; 995 priv->frag_no = 1;
996 priv->frag_len = msdu_size; 996 priv->frag_len = msdu_size;
997 memcpy(priv->frag_source, source, 6); 997 memcpy(priv->frag_source, source, ETH_ALEN);
998 memcpy(&priv->rx_buf[6], source, 6); 998 memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
999 memcpy(priv->rx_buf, header->addr1, 6); 999 memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
1000 1000
1001 atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size); 1001 atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
1002 1002
@@ -1006,13 +1006,13 @@ static void frag_rx_path(struct atmel_private *priv,
1006 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); 1006 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
1007 if ((crc ^ 0xffffffff) != netcrc) { 1007 if ((crc ^ 0xffffffff) != netcrc) {
1008 priv->dev->stats.rx_crc_errors++; 1008 priv->dev->stats.rx_crc_errors++;
1009 memset(priv->frag_source, 0xff, 6); 1009 memset(priv->frag_source, 0xff, ETH_ALEN);
1010 } 1010 }
1011 } 1011 }
1012 1012
1013 } else if (priv->frag_no == frag_no && 1013 } else if (priv->frag_no == frag_no &&
1014 priv->frag_seq == seq_no && 1014 priv->frag_seq == seq_no &&
1015 memcmp(priv->frag_source, source, 6) == 0) { 1015 memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
1016 1016
1017 atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len], 1017 atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
1018 rx_packet_loc, msdu_size); 1018 rx_packet_loc, msdu_size);
@@ -1024,7 +1024,7 @@ static void frag_rx_path(struct atmel_private *priv,
1024 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); 1024 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
1025 if ((crc ^ 0xffffffff) != netcrc) { 1025 if ((crc ^ 0xffffffff) != netcrc) {
1026 priv->dev->stats.rx_crc_errors++; 1026 priv->dev->stats.rx_crc_errors++;
1027 memset(priv->frag_source, 0xff, 6); 1027 memset(priv->frag_source, 0xff, ETH_ALEN);
1028 more_frags = 1; /* don't send broken assembly */ 1028 more_frags = 1; /* don't send broken assembly */
1029 } 1029 }
1030 } 1030 }
@@ -1033,7 +1033,7 @@ static void frag_rx_path(struct atmel_private *priv,
1033 priv->frag_no++; 1033 priv->frag_no++;
1034 1034
1035 if (!more_frags) { /* last one */ 1035 if (!more_frags) { /* last one */
1036 memset(priv->frag_source, 0xff, 6); 1036 memset(priv->frag_source, 0xff, ETH_ALEN);
1037 if (!(skb = dev_alloc_skb(priv->frag_len + 14))) { 1037 if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
1038 priv->dev->stats.rx_dropped++; 1038 priv->dev->stats.rx_dropped++;
1039 } else { 1039 } else {
@@ -1129,7 +1129,7 @@ static void rx_done_irq(struct atmel_private *priv)
1129 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); 1129 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
1130 1130
1131 /* we use the same buffer for frag reassembly and control packets */ 1131 /* we use the same buffer for frag reassembly and control packets */
1132 memset(priv->frag_source, 0xff, 6); 1132 memset(priv->frag_source, 0xff, ETH_ALEN);
1133 1133
1134 if (priv->do_rx_crc) { 1134 if (priv->do_rx_crc) {
1135 /* last 4 octets is crc */ 1135 /* last 4 octets is crc */
@@ -1557,7 +1557,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1557 priv->last_qual = jiffies; 1557 priv->last_qual = jiffies;
1558 priv->last_beacon_timestamp = 0; 1558 priv->last_beacon_timestamp = 0;
1559 memset(priv->frag_source, 0xff, sizeof(priv->frag_source)); 1559 memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
1560 memset(priv->BSSID, 0, 6); 1560 memset(priv->BSSID, 0, ETH_ALEN);
1561 priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */ 1561 priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
1562 priv->station_was_associated = 0; 1562 priv->station_was_associated = 0;
1563 1563
@@ -1718,7 +1718,7 @@ static int atmel_get_wap(struct net_device *dev,
1718 char *extra) 1718 char *extra)
1719{ 1719{
1720 struct atmel_private *priv = netdev_priv(dev); 1720 struct atmel_private *priv = netdev_priv(dev);
1721 memcpy(awrq->sa_data, priv->CurrentBSSID, 6); 1721 memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
1722 awrq->sa_family = ARPHRD_ETHER; 1722 awrq->sa_family = ARPHRD_ETHER;
1723 1723
1724 return 0; 1724 return 0;
@@ -2356,7 +2356,7 @@ static int atmel_get_scan(struct net_device *dev,
2356 for (i = 0; i < priv->BSS_list_entries; i++) { 2356 for (i = 0; i < priv->BSS_list_entries; i++) {
2357 iwe.cmd = SIOCGIWAP; 2357 iwe.cmd = SIOCGIWAP;
2358 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 2358 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
2359 memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6); 2359 memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
2360 current_ev = iwe_stream_add_event(info, current_ev, 2360 current_ev = iwe_stream_add_event(info, current_ev,
2361 extra + IW_SCAN_MAX_DATA, 2361 extra + IW_SCAN_MAX_DATA,
2362 &iwe, IW_EV_ADDR_LEN); 2362 &iwe, IW_EV_ADDR_LEN);
@@ -2760,7 +2760,7 @@ static void atmel_enter_state(struct atmel_private *priv, int new_state)
2760static void atmel_scan(struct atmel_private *priv, int specific_ssid) 2760static void atmel_scan(struct atmel_private *priv, int specific_ssid)
2761{ 2761{
2762 struct { 2762 struct {
2763 u8 BSSID[6]; 2763 u8 BSSID[ETH_ALEN];
2764 u8 SSID[MAX_SSID_LENGTH]; 2764 u8 SSID[MAX_SSID_LENGTH];
2765 u8 scan_type; 2765 u8 scan_type;
2766 u8 channel; 2766 u8 channel;
@@ -2771,7 +2771,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
2771 u8 SSID_size; 2771 u8 SSID_size;
2772 } cmd; 2772 } cmd;
2773 2773
2774 memset(cmd.BSSID, 0xff, 6); 2774 memset(cmd.BSSID, 0xff, ETH_ALEN);
2775 2775
2776 if (priv->fast_scan) { 2776 if (priv->fast_scan) {
2777 cmd.SSID_size = priv->SSID_size; 2777 cmd.SSID_size = priv->SSID_size;
@@ -2816,7 +2816,7 @@ static void join(struct atmel_private *priv, int type)
2816 2816
2817 cmd.SSID_size = priv->SSID_size; 2817 cmd.SSID_size = priv->SSID_size;
2818 memcpy(cmd.SSID, priv->SSID, priv->SSID_size); 2818 memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
2819 memcpy(cmd.BSSID, priv->CurrentBSSID, 6); 2819 memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
2820 cmd.channel = (priv->channel & 0x7f); 2820 cmd.channel = (priv->channel & 0x7f);
2821 cmd.BSS_type = type; 2821 cmd.BSS_type = type;
2822 cmd.timeout = cpu_to_le16(2000); 2822 cmd.timeout = cpu_to_le16(2000);
@@ -2837,7 +2837,7 @@ static void start(struct atmel_private *priv, int type)
2837 2837
2838 cmd.SSID_size = priv->SSID_size; 2838 cmd.SSID_size = priv->SSID_size;
2839 memcpy(cmd.SSID, priv->SSID, priv->SSID_size); 2839 memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
2840 memcpy(cmd.BSSID, priv->BSSID, 6); 2840 memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
2841 cmd.BSS_type = type; 2841 cmd.BSS_type = type;
2842 cmd.channel = (priv->channel & 0x7f); 2842 cmd.channel = (priv->channel & 0x7f);
2843 2843
@@ -2883,9 +2883,9 @@ static void send_authentication_request(struct atmel_private *priv, u16 system,
2883 header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); 2883 header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
2884 header.duration_id = cpu_to_le16(0x8000); 2884 header.duration_id = cpu_to_le16(0x8000);
2885 header.seq_ctrl = 0; 2885 header.seq_ctrl = 0;
2886 memcpy(header.addr1, priv->CurrentBSSID, 6); 2886 memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
2887 memcpy(header.addr2, priv->dev->dev_addr, 6); 2887 memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
2888 memcpy(header.addr3, priv->CurrentBSSID, 6); 2888 memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
2889 2889
2890 if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1) 2890 if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
2891 /* no WEP for authentication frames with TrSeqNo 1 */ 2891 /* no WEP for authentication frames with TrSeqNo 1 */
@@ -2916,7 +2916,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2916 struct ass_req_format { 2916 struct ass_req_format {
2917 __le16 capability; 2917 __le16 capability;
2918 __le16 listen_interval; 2918 __le16 listen_interval;
2919 u8 ap[6]; /* nothing after here directly accessible */ 2919 u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
2920 u8 ssid_el_id; 2920 u8 ssid_el_id;
2921 u8 ssid_len; 2921 u8 ssid_len;
2922 u8 ssid[MAX_SSID_LENGTH]; 2922 u8 ssid[MAX_SSID_LENGTH];
@@ -2930,9 +2930,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2930 header.duration_id = cpu_to_le16(0x8000); 2930 header.duration_id = cpu_to_le16(0x8000);
2931 header.seq_ctrl = 0; 2931 header.seq_ctrl = 0;
2932 2932
2933 memcpy(header.addr1, priv->CurrentBSSID, 6); 2933 memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
2934 memcpy(header.addr2, priv->dev->dev_addr, 6); 2934 memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
2935 memcpy(header.addr3, priv->CurrentBSSID, 6); 2935 memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
2936 2936
2937 body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS); 2937 body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
2938 if (priv->wep_is_on) 2938 if (priv->wep_is_on)
@@ -2944,7 +2944,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2944 2944
2945 /* current AP address - only in reassoc frame */ 2945 /* current AP address - only in reassoc frame */
2946 if (is_reassoc) { 2946 if (is_reassoc) {
2947 memcpy(body.ap, priv->CurrentBSSID, 6); 2947 memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
2948 ssid_el_p = &body.ssid_el_id; 2948 ssid_el_p = &body.ssid_el_id;
2949 bodysize = 18 + priv->SSID_size; 2949 bodysize = 18 + priv->SSID_size;
2950 } else { 2950 } else {
@@ -3021,7 +3021,7 @@ static void store_bss_info(struct atmel_private *priv,
3021 int i, index; 3021 int i, index;
3022 3022
3023 for (index = -1, i = 0; i < priv->BSS_list_entries; i++) 3023 for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
3024 if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0) 3024 if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
3025 index = i; 3025 index = i;
3026 3026
3027 /* If we process a probe and an entry from this BSS exists 3027 /* If we process a probe and an entry from this BSS exists
@@ -3032,7 +3032,7 @@ static void store_bss_info(struct atmel_private *priv,
3032 if (priv->BSS_list_entries == MAX_BSS_ENTRIES) 3032 if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
3033 return; 3033 return;
3034 index = priv->BSS_list_entries++; 3034 index = priv->BSS_list_entries++;
3035 memcpy(priv->BSSinfo[index].BSSID, bss, 6); 3035 memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
3036 priv->BSSinfo[index].RSSI = rssi; 3036 priv->BSSinfo[index].RSSI = rssi;
3037 } else { 3037 } else {
3038 if (rssi > priv->BSSinfo[index].RSSI) 3038 if (rssi > priv->BSSinfo[index].RSSI)
@@ -3235,7 +3235,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index)
3235{ 3235{
3236 struct bss_info *bss = &priv->BSSinfo[bss_index]; 3236 struct bss_info *bss = &priv->BSSinfo[bss_index];
3237 3237
3238 memcpy(priv->CurrentBSSID, bss->BSSID, 6); 3238 memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
3239 memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize); 3239 memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
3240 3240
3241 /* The WPA stuff cares about the current AP address */ 3241 /* The WPA stuff cares about the current AP address */
@@ -3767,7 +3767,7 @@ static int probe_atmel_card(struct net_device *dev)
3767 0x00, 0x04, 0x25, 0x00, 0x00, 0x00 3767 0x00, 0x04, 0x25, 0x00, 0x00, 0x00
3768 }; 3768 };
3769 printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name); 3769 printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
3770 memcpy(dev->dev_addr, default_mac, 6); 3770 memcpy(dev->dev_addr, default_mac, ETH_ALEN);
3771 } 3771 }
3772 } 3772 }
3773 3773
@@ -3819,7 +3819,7 @@ static void build_wpa_mib(struct atmel_private *priv)
3819 3819
3820 struct { /* NB this is matched to the hardware, don't change. */ 3820 struct { /* NB this is matched to the hardware, don't change. */
3821 u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE]; 3821 u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
3822 u8 receiver_address[6]; 3822 u8 receiver_address[ETH_ALEN];
3823 u8 wep_is_on; 3823 u8 wep_is_on;
3824 u8 default_key; /* 0..3 */ 3824 u8 default_key; /* 0..3 */
3825 u8 group_key; 3825 u8 group_key;
@@ -3837,7 +3837,7 @@ static void build_wpa_mib(struct atmel_private *priv)
3837 3837
3838 mib.wep_is_on = priv->wep_is_on; 3838 mib.wep_is_on = priv->wep_is_on;
3839 mib.exclude_unencrypted = priv->exclude_unencrypted; 3839 mib.exclude_unencrypted = priv->exclude_unencrypted;
3840 memcpy(mib.receiver_address, priv->CurrentBSSID, 6); 3840 memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
3841 3841
3842 /* zero all the keys before adding in valid ones. */ 3842 /* zero all the keys before adding in valid ones. */
3843 memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value)); 3843 memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 8cb206a89083..4ae63f4ddfb2 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -278,7 +278,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
278 else 278 else
279 txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate); 279 txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate);
280 txhdr->mac_frame_ctl = wlhdr->frame_control; 280 txhdr->mac_frame_ctl = wlhdr->frame_control;
281 memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); 281 memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
282 282
283 /* Calculate duration for fallback rate */ 283 /* Calculate duration for fallback rate */
284 if ((rate_fb == rate) || 284 if ((rate_fb == rate) ||
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 849a28c80302..86588c9ff0f2 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -215,7 +215,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
215 rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value); 215 rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
216 216
217 txhdr->mac_frame_ctl = wlhdr->frame_control; 217 txhdr->mac_frame_ctl = wlhdr->frame_control;
218 memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); 218 memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
219 219
220 /* Calculate duration for fallback rate */ 220 /* Calculate duration for fallback rate */
221 if ((rate_fb->hw_value == rate) || 221 if ((rate_fb->hw_value == rate) ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c3462b75bd08..2a23bf2b904d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -34,6 +34,7 @@
34#include <brcmu_utils.h> 34#include <brcmu_utils.h>
35#include <brcmu_wifi.h> 35#include <brcmu_wifi.h>
36#include "sdio_host.h" 36#include "sdio_host.h"
37#include "sdio_chip.h"
37#include "dhd_dbg.h" 38#include "dhd_dbg.h"
38#include "dhd_bus.h" 39#include "dhd_bus.h"
39 40
@@ -41,13 +42,6 @@
41 42
42#define DMA_ALIGN_MASK 0x03 43#define DMA_ALIGN_MASK 0x03
43 44
44#define SDIO_DEVICE_ID_BROADCOM_43143 43143
45#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
46#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
47#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
48#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
49#define SDIO_DEVICE_ID_BROADCOM_4335 0x4335
50
51#define SDIO_FUNC1_BLOCKSIZE 64 45#define SDIO_FUNC1_BLOCKSIZE 64
52#define SDIO_FUNC2_BLOCKSIZE 512 46#define SDIO_FUNC2_BLOCKSIZE 512
53 47
@@ -58,7 +52,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
58 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, 52 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
59 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, 53 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
60 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)}, 54 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
61 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)}, 55 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
56 SDIO_DEVICE_ID_BROADCOM_4335_4339)},
62 { /* end: all zeroes */ }, 57 { /* end: all zeroes */ },
63}; 58};
64MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 59MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -466,7 +461,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
466{ 461{
467 brcmf_dbg(SDIO, "Enter\n"); 462 brcmf_dbg(SDIO, "Enter\n");
468 463
469 brcmfmac_sdio_pdata = pdev->dev.platform_data; 464 brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
470 465
471 if (brcmfmac_sdio_pdata->power_on) 466 if (brcmfmac_sdio_pdata->power_on)
472 brcmfmac_sdio_pdata->power_on(); 467 brcmfmac_sdio_pdata->power_on();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 2eb9e642c9bf..899a2ada5b82 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -97,8 +97,6 @@
97#define WLC_PHY_TYPE_LCN 8 97#define WLC_PHY_TYPE_LCN 8
98#define WLC_PHY_TYPE_NULL 0xf 98#define WLC_PHY_TYPE_NULL 0xf
99 99
100#define BRCMF_EVENTING_MASK_LEN 16
101
102#define TOE_TX_CSUM_OL 0x00000001 100#define TOE_TX_CSUM_OL 0x00000001
103#define TOE_RX_CSUM_OL 0x00000002 101#define TOE_RX_CSUM_OL 0x00000002
104 102
@@ -632,29 +630,29 @@ struct brcmf_skb_reorder_data {
632 u8 *reorder; 630 u8 *reorder;
633}; 631};
634 632
635extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 633int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
636 634
637/* Return pointer to interface name */ 635/* Return pointer to interface name */
638extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx); 636char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
639 637
640/* Query dongle */ 638/* Query dongle */
641extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, 639int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
642 uint cmd, void *buf, uint len); 640 void *buf, uint len);
643extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, 641int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
644 void *buf, uint len); 642 void *buf, uint len);
645 643
646/* Remove any protocol-specific data header. */ 644/* Remove any protocol-specific data header. */
647extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, 645int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
648 struct sk_buff *rxp); 646 struct sk_buff *rxp);
649 647
650extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); 648int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
651extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, 649struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
652 s32 ifidx, char *name, u8 *mac_addr); 650 char *name, u8 *mac_addr);
653extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx); 651void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
654void brcmf_txflowblock_if(struct brcmf_if *ifp, 652void brcmf_txflowblock_if(struct brcmf_if *ifp,
655 enum brcmf_netif_stop_reason reason, bool state); 653 enum brcmf_netif_stop_reason reason, bool state);
656extern u32 brcmf_get_chip_info(struct brcmf_if *ifp); 654u32 brcmf_get_chip_info(struct brcmf_if *ifp);
657extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, 655void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
658 bool success); 656 bool success);
659 657
660#endif /* _BRCMF_H_ */ 658#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 74156f84180c..a6eb09e5d46f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -132,35 +132,34 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
132 * interface functions from common layer 132 * interface functions from common layer
133 */ 133 */
134 134
135extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, 135bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
136 struct sk_buff *pkt, int prec); 136 int prec);
137 137
138/* Receive frame for delivery to OS. Callee disposes of rxp. */ 138/* Receive frame for delivery to OS. Callee disposes of rxp. */
139extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist); 139void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
140 140
141/* Indication from bus module regarding presence/insertion of dongle. */ 141/* Indication from bus module regarding presence/insertion of dongle. */
142extern int brcmf_attach(uint bus_hdrlen, struct device *dev); 142int brcmf_attach(uint bus_hdrlen, struct device *dev);
143/* Indication from bus module regarding removal/absence of dongle */ 143/* Indication from bus module regarding removal/absence of dongle */
144extern void brcmf_detach(struct device *dev); 144void brcmf_detach(struct device *dev);
145/* Indication from bus module that dongle should be reset */ 145/* Indication from bus module that dongle should be reset */
146extern void brcmf_dev_reset(struct device *dev); 146void brcmf_dev_reset(struct device *dev);
147/* Indication from bus module to change flow-control state */ 147/* Indication from bus module to change flow-control state */
148extern void brcmf_txflowblock(struct device *dev, bool state); 148void brcmf_txflowblock(struct device *dev, bool state);
149 149
150/* Notify the bus has transferred the tx packet to firmware */ 150/* Notify the bus has transferred the tx packet to firmware */
151extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, 151void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
152 bool success);
153 152
154extern int brcmf_bus_start(struct device *dev); 153int brcmf_bus_start(struct device *dev);
155 154
156#ifdef CONFIG_BRCMFMAC_SDIO 155#ifdef CONFIG_BRCMFMAC_SDIO
157extern void brcmf_sdio_exit(void); 156void brcmf_sdio_exit(void);
158extern void brcmf_sdio_init(void); 157void brcmf_sdio_init(void);
159extern void brcmf_sdio_register(void); 158void brcmf_sdio_register(void);
160#endif 159#endif
161#ifdef CONFIG_BRCMFMAC_USB 160#ifdef CONFIG_BRCMFMAC_USB
162extern void brcmf_usb_exit(void); 161void brcmf_usb_exit(void);
163extern void brcmf_usb_register(void); 162void brcmf_usb_register(void);
164#endif 163#endif
165 164
166#endif /* _BRCMF_BUS_H_ */ 165#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 40e7f854e10f..64e9cff241b9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -509,9 +509,8 @@ netif_rx:
509 } 509 }
510} 510}
511 511
512void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) 512void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
513{ 513{
514 struct sk_buff *skb, *pnext;
515 struct brcmf_if *ifp; 514 struct brcmf_if *ifp;
516 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 515 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
517 struct brcmf_pub *drvr = bus_if->drvr; 516 struct brcmf_pub *drvr = bus_if->drvr;
@@ -519,29 +518,24 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
519 u8 ifidx; 518 u8 ifidx;
520 int ret; 519 int ret;
521 520
522 brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev), 521 brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
523 skb_queue_len(skb_list));
524 522
525 skb_queue_walk_safe(skb_list, skb, pnext) { 523 /* process and remove protocol-specific header */
526 skb_unlink(skb, skb_list); 524 ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
527 525 ifp = drvr->iflist[ifidx];
528 /* process and remove protocol-specific header */
529 ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
530 ifp = drvr->iflist[ifidx];
531
532 if (ret || !ifp || !ifp->ndev) {
533 if ((ret != -ENODATA) && ifp)
534 ifp->stats.rx_errors++;
535 brcmu_pkt_buf_free_skb(skb);
536 continue;
537 }
538 526
539 rd = (struct brcmf_skb_reorder_data *)skb->cb; 527 if (ret || !ifp || !ifp->ndev) {
540 if (rd->reorder) 528 if ((ret != -ENODATA) && ifp)
541 brcmf_rxreorder_process_info(ifp, rd->reorder, skb); 529 ifp->stats.rx_errors++;
542 else 530 brcmu_pkt_buf_free_skb(skb);
543 brcmf_netif_rx(ifp, skb); 531 return;
544 } 532 }
533
534 rd = (struct brcmf_skb_reorder_data *)skb->cb;
535 if (rd->reorder)
536 brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
537 else
538 brcmf_netif_rx(ifp, skb);
545} 539}
546 540
547void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, 541void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index ef9179883748..53c6e710f2cb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -22,21 +22,21 @@
22 */ 22 */
23 23
24/* Linkage, sets prot link and updates hdrlen in pub */ 24/* Linkage, sets prot link and updates hdrlen in pub */
25extern int brcmf_proto_attach(struct brcmf_pub *drvr); 25int brcmf_proto_attach(struct brcmf_pub *drvr);
26 26
27/* Unlink, frees allocated protocol memory (including brcmf_proto) */ 27/* Unlink, frees allocated protocol memory (including brcmf_proto) */
28extern void brcmf_proto_detach(struct brcmf_pub *drvr); 28void brcmf_proto_detach(struct brcmf_pub *drvr);
29 29
30/* Stop protocol: sync w/dongle state. */ 30/* Stop protocol: sync w/dongle state. */
31extern void brcmf_proto_stop(struct brcmf_pub *drvr); 31void brcmf_proto_stop(struct brcmf_pub *drvr);
32 32
33/* Add any protocol-specific data header. 33/* Add any protocol-specific data header.
34 * Caller must reserve prot_hdrlen prepend space. 34 * Caller must reserve prot_hdrlen prepend space.
35 */ 35 */
36extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset, 36void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
37 struct sk_buff *txp); 37 struct sk_buff *txp);
38 38
39/* Sets dongle media info (drv_version, mac address). */ 39/* Sets dongle media info (drv_version, mac address). */
40extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp); 40int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
41 41
42#endif /* _BRCMF_PROTO_H_ */ 42#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1aa75d5951b8..67f05db4b9b8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -275,11 +275,6 @@ struct rte_console {
275/* Flags for SDH calls */ 275/* Flags for SDH calls */
276#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) 276#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
277 277
278#define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
279#define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
280MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
281MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
282
283#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */ 278#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
284#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change 279#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
285 * when idle 280 * when idle
@@ -454,9 +449,6 @@ struct brcmf_sdio {
454 struct work_struct datawork; 449 struct work_struct datawork;
455 atomic_t dpc_tskcnt; 450 atomic_t dpc_tskcnt;
456 451
457 const struct firmware *firmware;
458 u32 fw_ptr;
459
460 bool txoff; /* Transmit flow-controlled */ 452 bool txoff; /* Transmit flow-controlled */
461 struct brcmf_sdio_count sdcnt; 453 struct brcmf_sdio_count sdcnt;
462 bool sr_enabled; /* SaveRestore enabled */ 454 bool sr_enabled; /* SaveRestore enabled */
@@ -493,6 +485,100 @@ enum brcmf_sdio_frmtype {
493 BRCMF_SDIO_FT_SUB, 485 BRCMF_SDIO_FT_SUB,
494}; 486};
495 487
488#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
489#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
490#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
491#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
492#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
493#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
494#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
495#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
496#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
497#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
498#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
499#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
500#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
501#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
502
503MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
504MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
505MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
506MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
507MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
508MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
509MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
510MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
511MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
512MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
513MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
514MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
515MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
516MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
517
518struct brcmf_firmware_names {
519 u32 chipid;
520 u32 revmsk;
521 const char *bin;
522 const char *nv;
523};
524
525enum brcmf_firmware_type {
526 BRCMF_FIRMWARE_BIN,
527 BRCMF_FIRMWARE_NVRAM
528};
529
530#define BRCMF_FIRMWARE_NVRAM(name) \
531 name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
532
533static const struct brcmf_firmware_names brcmf_fwname_data[] = {
534 { BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
535 { BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
536 { BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
537 { BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
538 { BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
539 { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
540 { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
541};
542
543
544static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
545 enum brcmf_firmware_type type)
546{
547 const struct firmware *fw;
548 const char *name;
549 int err, i;
550
551 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
552 if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
553 brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
554 switch (type) {
555 case BRCMF_FIRMWARE_BIN:
556 name = brcmf_fwname_data[i].bin;
557 break;
558 case BRCMF_FIRMWARE_NVRAM:
559 name = brcmf_fwname_data[i].nv;
560 break;
561 default:
562 brcmf_err("invalid firmware type (%d)\n", type);
563 return NULL;
564 }
565 goto found;
566 }
567 }
568 brcmf_err("Unknown chipid %d [%d]\n",
569 bus->ci->chip, bus->ci->chiprev);
570 return NULL;
571
572found:
573 err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
574 if ((err) || (!fw)) {
575 brcmf_err("fail to request firmware %s (%d)\n", name, err);
576 return NULL;
577 }
578
579 return fw;
580}
581
496static void pkt_align(struct sk_buff *p, int len, int align) 582static void pkt_align(struct sk_buff *p, int len, int align)
497{ 583{
498 uint datalign; 584 uint datalign;
@@ -1406,13 +1492,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1406 bus->glom.qlen, pfirst, pfirst->data, 1492 bus->glom.qlen, pfirst, pfirst->data,
1407 pfirst->len, pfirst->next, 1493 pfirst->len, pfirst->next,
1408 pfirst->prev); 1494 pfirst->prev);
1495 skb_unlink(pfirst, &bus->glom);
1496 brcmf_rx_frame(bus->sdiodev->dev, pfirst);
1497 bus->sdcnt.rxglompkts++;
1409 } 1498 }
1410 /* sent any remaining packets up */
1411 if (bus->glom.qlen)
1412 brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
1413 1499
1414 bus->sdcnt.rxglomframes++; 1500 bus->sdcnt.rxglomframes++;
1415 bus->sdcnt.rxglompkts += bus->glom.qlen;
1416 } 1501 }
1417 return num; 1502 return num;
1418} 1503}
@@ -1557,7 +1642,6 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1557static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) 1642static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1558{ 1643{
1559 struct sk_buff *pkt; /* Packet for event or data frames */ 1644 struct sk_buff *pkt; /* Packet for event or data frames */
1560 struct sk_buff_head pktlist; /* needed for bus interface */
1561 u16 pad; /* Number of pad bytes to read */ 1645 u16 pad; /* Number of pad bytes to read */
1562 uint rxleft = 0; /* Remaining number of frames allowed */ 1646 uint rxleft = 0; /* Remaining number of frames allowed */
1563 int ret; /* Return code from calls */ 1647 int ret; /* Return code from calls */
@@ -1759,9 +1843,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1759 continue; 1843 continue;
1760 } 1844 }
1761 1845
1762 skb_queue_head_init(&pktlist); 1846 brcmf_rx_frame(bus->sdiodev->dev, pkt);
1763 skb_queue_tail(&pktlist, pkt);
1764 brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
1765 } 1847 }
1766 1848
1767 rxcount = maxframes - rxleft; 1849 rxcount = maxframes - rxleft;
@@ -1786,10 +1868,15 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1786 return; 1868 return;
1787} 1869}
1788 1870
1871/**
1872 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
1873 * bus layer usage.
1874 */
1789/* flag marking a dummy skb added for DMA alignment requirement */ 1875/* flag marking a dummy skb added for DMA alignment requirement */
1790#define DUMMY_SKB_FLAG 0x10000 1876#define ALIGN_SKB_FLAG 0x8000
1791/* bit mask of data length chopped from the previous packet */ 1877/* bit mask of data length chopped from the previous packet */
1792#define DUMMY_SKB_CHOP_LEN_MASK 0xffff 1878#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
1879
1793/** 1880/**
1794 * brcmf_sdio_txpkt_prep - packet preparation for transmit 1881 * brcmf_sdio_txpkt_prep - packet preparation for transmit
1795 * @bus: brcmf_sdio structure pointer 1882 * @bus: brcmf_sdio structure pointer
@@ -1854,7 +1941,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
1854 memcpy(pkt_new->data, 1941 memcpy(pkt_new->data,
1855 pkt_next->data + pkt_next->len - tail_chop, 1942 pkt_next->data + pkt_next->len - tail_chop,
1856 tail_chop); 1943 tail_chop);
1857 *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop; 1944 *(u32 *)(pkt_new->cb) = ALIGN_SKB_FLAG + tail_chop;
1858 skb_trim(pkt_next, pkt_next->len - tail_chop); 1945 skb_trim(pkt_next, pkt_next->len - tail_chop);
1859 __skb_queue_after(pktq, pkt_next, pkt_new); 1946 __skb_queue_after(pktq, pkt_next, pkt_new);
1860 } else { 1947 } else {
@@ -1908,8 +1995,8 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
1908 1995
1909 skb_queue_walk_safe(pktq, pkt_next, tmp) { 1996 skb_queue_walk_safe(pktq, pkt_next, tmp) {
1910 dummy_flags = *(u32 *)(pkt_next->cb); 1997 dummy_flags = *(u32 *)(pkt_next->cb);
1911 if (dummy_flags & DUMMY_SKB_FLAG) { 1998 if (dummy_flags & ALIGN_SKB_FLAG) {
1912 chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK; 1999 chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
1913 if (chop_len) { 2000 if (chop_len) {
1914 pkt_prev = pkt_next->prev; 2001 pkt_prev = pkt_next->prev;
1915 memcpy(pkt_prev->data + pkt_prev->len, 2002 memcpy(pkt_prev->data + pkt_prev->len,
@@ -3037,69 +3124,43 @@ static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3037 return true; 3124 return true;
3038} 3125}
3039 3126
3040static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
3041{
3042 if (bus->firmware->size < bus->fw_ptr + len)
3043 len = bus->firmware->size - bus->fw_ptr;
3044
3045 memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
3046 bus->fw_ptr += len;
3047 return len;
3048}
3049
3050static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus) 3127static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3051{ 3128{
3129 const struct firmware *fw;
3130 int err;
3052 int offset; 3131 int offset;
3053 uint len; 3132 int address;
3054 u8 *memblock = NULL, *memptr; 3133 int len;
3055 int ret; 3134
3056 u8 idx; 3135 fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
3057 3136 if (fw == NULL)
3058 brcmf_dbg(INFO, "Enter\n"); 3137 return -ENOENT;
3059 3138
3060 ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME, 3139 if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
3061 &bus->sdiodev->func[2]->dev); 3140 BRCMF_MAX_CORENUM)
3062 if (ret) { 3141 memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
3063 brcmf_err("Fail to request firmware %d\n", ret); 3142
3064 return ret; 3143 err = 0;
3065 } 3144 offset = 0;
3066 bus->fw_ptr = 0; 3145 address = bus->ci->rambase;
3067 3146 while (offset < fw->size) {
3068 memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC); 3147 len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
3069 if (memblock == NULL) { 3148 fw->size - offset;
3070 ret = -ENOMEM; 3149 err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
3071 goto err; 3150 (u8 *)&fw->data[offset], len);
3072 } 3151 if (err) {
3073 if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
3074 memptr += (BRCMF_SDALIGN -
3075 ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
3076
3077 offset = bus->ci->rambase;
3078
3079 /* Download image */
3080 len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
3081 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
3082 if (BRCMF_MAX_CORENUM != idx)
3083 memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
3084 while (len) {
3085 ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
3086 if (ret) {
3087 brcmf_err("error %d on writing %d membytes at 0x%08x\n", 3152 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3088 ret, MEMBLOCK, offset); 3153 err, len, address);
3089 goto err; 3154 goto failure;
3090 } 3155 }
3091 3156 offset += len;
3092 offset += MEMBLOCK; 3157 address += len;
3093 len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
3094 } 3158 }
3095 3159
3096err: 3160failure:
3097 kfree(memblock); 3161 release_firmware(fw);
3098 3162
3099 release_firmware(bus->firmware); 3163 return err;
3100 bus->fw_ptr = 0;
3101
3102 return ret;
3103} 3164}
3104 3165
3105/* 3166/*
@@ -3111,7 +3172,8 @@ err:
3111 * by two NULs. 3172 * by two NULs.
3112*/ 3173*/
3113 3174
3114static int brcmf_process_nvram_vars(struct brcmf_sdio *bus) 3175static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
3176 const struct firmware *nv)
3115{ 3177{
3116 char *varbuf; 3178 char *varbuf;
3117 char *dp; 3179 char *dp;
@@ -3120,12 +3182,12 @@ static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
3120 int ret = 0; 3182 int ret = 0;
3121 uint buf_len, n, len; 3183 uint buf_len, n, len;
3122 3184
3123 len = bus->firmware->size; 3185 len = nv->size;
3124 varbuf = vmalloc(len); 3186 varbuf = vmalloc(len);
3125 if (!varbuf) 3187 if (!varbuf)
3126 return -ENOMEM; 3188 return -ENOMEM;
3127 3189
3128 memcpy(varbuf, bus->firmware->data, len); 3190 memcpy(varbuf, nv->data, len);
3129 dp = varbuf; 3191 dp = varbuf;
3130 3192
3131 findNewline = false; 3193 findNewline = false;
@@ -3177,18 +3239,16 @@ err:
3177 3239
3178static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus) 3240static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3179{ 3241{
3242 const struct firmware *nv;
3180 int ret; 3243 int ret;
3181 3244
3182 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME, 3245 nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
3183 &bus->sdiodev->func[2]->dev); 3246 if (nv == NULL)
3184 if (ret) { 3247 return -ENOENT;
3185 brcmf_err("Fail to request nvram %d\n", ret);
3186 return ret;
3187 }
3188 3248
3189 ret = brcmf_process_nvram_vars(bus); 3249 ret = brcmf_process_nvram_vars(bus, nv);
3190 3250
3191 release_firmware(bus->firmware); 3251 release_firmware(nv);
3192 3252
3193 return ret; 3253 return ret;
3194} 3254}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index e679214b3c98..14bc24dc5bae 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -102,7 +102,8 @@ struct brcmf_event;
102 BRCMF_ENUM_DEF(DCS_REQUEST, 73) \ 102 BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
103 BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \ 103 BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
104 BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \ 104 BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
105 BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) 105 BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
106 BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
106 107
107#define BRCMF_ENUM_DEF(id, val) \ 108#define BRCMF_ENUM_DEF(id, val) \
108 BRCMF_E_##id = (val), 109 BRCMF_E_##id = (val),
@@ -114,6 +115,8 @@ enum brcmf_fweh_event_code {
114}; 115};
115#undef BRCMF_ENUM_DEF 116#undef BRCMF_ENUM_DEF
116 117
118#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
119
117/* flags field values in struct brcmf_event_msg */ 120/* flags field values in struct brcmf_event_msg */
118#define BRCMF_EVENT_MSG_LINK 0x01 121#define BRCMF_EVENT_MSG_LINK 0x01
119#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02 122#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 82f9140f3d35..d0cd0bf95c5a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -168,6 +168,7 @@ enum brcmf_fws_skb_state {
168/** 168/**
169 * struct brcmf_skbuff_cb - control buffer associated with skbuff. 169 * struct brcmf_skbuff_cb - control buffer associated with skbuff.
170 * 170 *
171 * @bus_flags: 2 bytes reserved for bus specific parameters
171 * @if_flags: holds interface index and packet related flags. 172 * @if_flags: holds interface index and packet related flags.
172 * @htod: host to device packet identifier (used in PKTTAG tlv). 173 * @htod: host to device packet identifier (used in PKTTAG tlv).
173 * @state: transmit state of the packet. 174 * @state: transmit state of the packet.
@@ -177,6 +178,7 @@ enum brcmf_fws_skb_state {
177 * provides 48 bytes of storage so this structure should not exceed that. 178 * provides 48 bytes of storage so this structure should not exceed that.
178 */ 179 */
179struct brcmf_skbuff_cb { 180struct brcmf_skbuff_cb {
181 u16 bus_flags;
180 u16 if_flags; 182 u16 if_flags;
181 u32 htod; 183 u32 htod;
182 enum brcmf_fws_skb_state state; 184 enum brcmf_fws_skb_state state;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index ca72177388b9..2096a14ef1fb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -18,6 +18,7 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/mmc/card.h> 20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
21#include <linux/ssb/ssb_regs.h> 22#include <linux/ssb/ssb_regs.h>
22#include <linux/bcma/bcma.h> 23#include <linux/bcma/bcma.h>
23 24
@@ -136,6 +137,8 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
136 u8 idx; 137 u8 idx;
137 138
138 idx = brcmf_sdio_chip_getinfidx(ci, coreid); 139 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
140 if (idx == BRCMF_MAX_CORENUM)
141 return false;
139 142
140 regdata = brcmf_sdio_regrl(sdiodev, 143 regdata = brcmf_sdio_regrl(sdiodev,
141 CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 144 CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
@@ -154,6 +157,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
154 bool ret; 157 bool ret;
155 158
156 idx = brcmf_sdio_chip_getinfidx(ci, coreid); 159 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
160 if (idx == BRCMF_MAX_CORENUM)
161 return false;
157 162
158 regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 163 regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
159 NULL); 164 NULL);
@@ -261,6 +266,8 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
261 u32 regdata; 266 u32 regdata;
262 267
263 idx = brcmf_sdio_chip_getinfidx(ci, coreid); 268 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
269 if (idx == BRCMF_MAX_CORENUM)
270 return;
264 271
265 /* if core is already in reset, just return */ 272 /* if core is already in reset, just return */
266 regdata = brcmf_sdio_regrl(sdiodev, 273 regdata = brcmf_sdio_regrl(sdiodev,
@@ -304,6 +311,8 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
304 u8 idx; 311 u8 idx;
305 312
306 idx = brcmf_sdio_chip_getinfidx(ci, coreid); 313 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
314 if (idx == BRCMF_MAX_CORENUM)
315 return;
307 316
308 /* 317 /*
309 * Must do the disable sequence first to work for 318 * Must do the disable sequence first to work for
@@ -368,6 +377,8 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
368 u32 regdata; 377 u32 regdata;
369 378
370 idx = brcmf_sdio_chip_getinfidx(ci, coreid); 379 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
380 if (idx == BRCMF_MAX_CORENUM)
381 return;
371 382
372 /* must disable first to work for arbitrary current core state */ 383 /* must disable first to work for arbitrary current core state */
373 brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits); 384 brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
@@ -444,6 +455,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
444 NULL); 455 NULL);
445 ci->chip = regdata & CID_ID_MASK; 456 ci->chip = regdata & CID_ID_MASK;
446 ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; 457 ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
458 if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
459 ci->chiprev >= 2)
460 ci->chip = BCM4339_CHIP_ID;
447 ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; 461 ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
448 462
449 brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev); 463 brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@@ -541,6 +555,20 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
541 ci->ramsize = 0xc0000; 555 ci->ramsize = 0xc0000;
542 ci->rambase = 0x180000; 556 ci->rambase = 0x180000;
543 break; 557 break;
558 case BCM4339_CHIP_ID:
559 ci->c_inf[0].wrapbase = 0x18100000;
560 ci->c_inf[0].cib = 0x2e084411;
561 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
562 ci->c_inf[1].base = 0x18005000;
563 ci->c_inf[1].wrapbase = 0x18105000;
564 ci->c_inf[1].cib = 0x15004211;
565 ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
566 ci->c_inf[2].base = 0x18002000;
567 ci->c_inf[2].wrapbase = 0x18102000;
568 ci->c_inf[2].cib = 0x04084411;
569 ci->ramsize = 0xc0000;
570 ci->rambase = 0x180000;
571 break;
544 default: 572 default:
545 brcmf_err("chipid 0x%x is not supported\n", ci->chip); 573 brcmf_err("chipid 0x%x is not supported\n", ci->chip);
546 return -ENODEV; 574 return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 83c041f1bf4a..507c61c991fa 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,6 +54,14 @@
54 54
55#define BRCMF_MAX_CORENUM 6 55#define BRCMF_MAX_CORENUM 6
56 56
57/* SDIO device ID */
58#define SDIO_DEVICE_ID_BROADCOM_43143 43143
59#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
60#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
61#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
62#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
63#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
64
57struct chip_core_info { 65struct chip_core_info {
58 u16 id; 66 u16 id;
59 u16 rev; 67 u16 rev;
@@ -215,17 +223,16 @@ struct sdpcmd_regs {
215 u16 PAD[0x80]; 223 u16 PAD[0x80];
216}; 224};
217 225
218extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev, 226int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
219 struct chip_info **ci_ptr, u32 regs); 227 struct chip_info **ci_ptr, u32 regs);
220extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr); 228void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
221extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, 229void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
222 struct chip_info *ci, 230 struct chip_info *ci, u32 drivestrength);
223 u32 drivestrength); 231u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
224extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid); 232void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
225extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev, 233 struct chip_info *ci);
226 struct chip_info *ci); 234bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
227extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev, 235 struct chip_info *ci, char *nvram_dat,
228 struct chip_info *ci, char *nvram_dat, 236 uint nvram_sz);
229 uint nvram_sz);
230 237
231#endif /* _BRCMFMAC_SDIO_CHIP_H_ */ 238#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 2b5407f002e5..c9b06b4e71f7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -181,18 +181,18 @@ struct brcmf_sdio_dev {
181}; 181};
182 182
183/* Register/deregister interrupt handler. */ 183/* Register/deregister interrupt handler. */
184extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev); 184int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
185extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev); 185int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
186 186
187/* sdio device register access interface */ 187/* sdio device register access interface */
188extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret); 188u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
189extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret); 189u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
190extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, 190void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
191 u8 data, int *ret); 191 int *ret);
192extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, 192void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
193 u32 data, int *ret); 193 int *ret);
194extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, 194int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
195 void *data, bool write); 195 void *data, bool write);
196 196
197/* Buffer transfer to/from device (client) core via cmd53. 197/* Buffer transfer to/from device (client) core via cmd53.
198 * fn: function number 198 * fn: function number
@@ -206,22 +206,17 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
206 * Returns 0 or error code. 206 * Returns 0 or error code.
207 * NOTE: Async operation is not currently supported. 207 * NOTE: Async operation is not currently supported.
208 */ 208 */
209extern int 209int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
210brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 210 uint flags, struct sk_buff_head *pktq);
211 uint flags, struct sk_buff_head *pktq); 211int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
212extern int 212 uint flags, u8 *buf, uint nbytes);
213brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 213
214 uint flags, u8 *buf, uint nbytes); 214int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
215 215 uint flags, struct sk_buff *pkt);
216extern int 216int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
217brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 217 uint flags, u8 *buf, uint nbytes);
218 uint flags, struct sk_buff *pkt); 218int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
219extern int 219 uint flags, struct sk_buff_head *pktq);
220brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
221 uint flags, u8 *buf, uint nbytes);
222extern int
223brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
224 uint flags, struct sk_buff_head *pktq);
225 220
226/* Flags bits */ 221/* Flags bits */
227 222
@@ -237,46 +232,43 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
237 * nbytes: number of bytes to transfer to/from buf 232 * nbytes: number of bytes to transfer to/from buf
238 * Returns 0 or error code. 233 * Returns 0 or error code.
239 */ 234 */
240extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, 235int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
241 u32 addr, u8 *buf, uint nbytes); 236 u8 *buf, uint nbytes);
242extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, 237int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
243 u32 address, u8 *data, uint size); 238 u8 *data, uint size);
244 239
245/* Issue an abort to the specified function */ 240/* Issue an abort to the specified function */
246extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn); 241int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
247 242
248/* platform specific/high level functions */ 243/* platform specific/high level functions */
249extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev); 244int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
250extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev); 245int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
251 246
252/* attach, return handler on success, NULL if failed. 247/* attach, return handler on success, NULL if failed.
253 * The handler shall be provided by all subsequent calls. No local cache 248 * The handler shall be provided by all subsequent calls. No local cache
254 * cfghdl points to the starting address of pci device mapped memory 249 * cfghdl points to the starting address of pci device mapped memory
255 */ 250 */
256extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev); 251int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
257extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev); 252void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
258 253
259/* read or write one byte using cmd52 */ 254/* read or write one byte using cmd52 */
260extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, 255int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
261 uint fnc, uint addr, u8 *byte); 256 uint addr, u8 *byte);
262 257
263/* read or write 2/4 bytes using cmd53 */ 258/* read or write 2/4 bytes using cmd53 */
264extern int 259int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
265brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, 260 uint addr, u32 *word, uint nbyte);
266 uint rw, uint fnc, uint addr,
267 u32 *word, uint nbyte);
268 261
269/* Watchdog timer interface for pm ops */ 262/* Watchdog timer interface for pm ops */
270extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, 263void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
271 bool enable);
272 264
273extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev); 265void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
274extern void brcmf_sdbrcm_disconnect(void *ptr); 266void brcmf_sdbrcm_disconnect(void *ptr);
275extern void brcmf_sdbrcm_isr(void *arg); 267void brcmf_sdbrcm_isr(void *arg);
276 268
277extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick); 269void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
278 270
279extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, 271void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
280 wait_queue_head_t *wq); 272 wait_queue_head_t *wq);
281extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev); 273bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
282#endif /* _BRCM_SDH_H_ */ 274#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index f4aea47e0730..422f44c63175 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -435,7 +435,6 @@ static void brcmf_usb_rx_complete(struct urb *urb)
435 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; 435 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
436 struct brcmf_usbdev_info *devinfo = req->devinfo; 436 struct brcmf_usbdev_info *devinfo = req->devinfo;
437 struct sk_buff *skb; 437 struct sk_buff *skb;
438 struct sk_buff_head skbq;
439 438
440 brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status); 439 brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
441 brcmf_usb_del_fromq(devinfo, req); 440 brcmf_usb_del_fromq(devinfo, req);
@@ -450,10 +449,8 @@ static void brcmf_usb_rx_complete(struct urb *urb)
450 } 449 }
451 450
452 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) { 451 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
453 skb_queue_head_init(&skbq);
454 skb_queue_tail(&skbq, skb);
455 skb_put(skb, urb->actual_length); 452 skb_put(skb, urb->actual_length);
456 brcmf_rx_frames(devinfo->dev, &skbq); 453 brcmf_rx_frame(devinfo->dev, skb);
457 brcmf_usb_rx_refill(devinfo, req); 454 brcmf_usb_rx_refill(devinfo, req);
458 } else { 455 } else {
459 brcmu_pkt_buf_free_skb(skb); 456 brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index a8a267b5b87a..2d08c155c23b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -172,19 +172,19 @@ struct si_info {
172 172
173 173
174/* AMBA Interconnect exported externs */ 174/* AMBA Interconnect exported externs */
175extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val); 175u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
176 176
177/* === exported functions === */ 177/* === exported functions === */
178extern struct si_pub *ai_attach(struct bcma_bus *pbus); 178struct si_pub *ai_attach(struct bcma_bus *pbus);
179extern void ai_detach(struct si_pub *sih); 179void ai_detach(struct si_pub *sih);
180extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val); 180uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
181extern void ai_clkctl_init(struct si_pub *sih); 181void ai_clkctl_init(struct si_pub *sih);
182extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); 182u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
183extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode); 183bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
184extern bool ai_deviceremoved(struct si_pub *sih); 184bool ai_deviceremoved(struct si_pub *sih);
185 185
186/* Enable Ex-PA for 4313 */ 186/* Enable Ex-PA for 4313 */
187extern void ai_epa_4313war(struct si_pub *sih); 187void ai_epa_4313war(struct si_pub *sih);
188 188
189static inline u32 ai_get_cccaps(struct si_pub *sih) 189static inline u32 ai_get_cccaps(struct si_pub *sih)
190{ 190{
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 73d01e586109..03bdcf29bd50 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
@@ -37,17 +37,17 @@ struct brcms_ampdu_session {
37 u16 dma_len; 37 u16 dma_len;
38}; 38};
39 39
40extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session, 40void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
41 struct brcms_c_info *wlc); 41 struct brcms_c_info *wlc);
42extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session, 42int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
43 struct sk_buff *p); 43 struct sk_buff *p);
44extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session); 44void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
45 45
46extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc); 46struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
47extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu); 47void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
48extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb, 48void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
49 struct sk_buff *p, struct tx_status *txs); 49 struct sk_buff *p, struct tx_status *txs);
50extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc); 50void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
51extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu); 51void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
52 52
53#endif /* _BRCM_AMPDU_H_ */ 53#endif /* _BRCM_AMPDU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
index 97ea3881a8ec..a3d487ab1964 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
@@ -17,13 +17,11 @@
17#ifndef _BRCM_ANTSEL_H_ 17#ifndef _BRCM_ANTSEL_H_
18#define _BRCM_ANTSEL_H_ 18#define _BRCM_ANTSEL_H_
19 19
20extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc); 20struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
21extern void brcms_c_antsel_detach(struct antsel_info *asi); 21void brcms_c_antsel_detach(struct antsel_info *asi);
22extern void brcms_c_antsel_init(struct antsel_info *asi); 22void brcms_c_antsel_init(struct antsel_info *asi);
23extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, 23void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
24 bool sel, 24 u8 id, u8 fbid, u8 *antcfg, u8 *fbantcfg);
25 u8 id, u8 fbid, u8 *antcfg, 25u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
26 u8 *fbantcfg);
27extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
28 26
29#endif /* _BRCM_ANTSEL_H_ */ 27#endif /* _BRCM_ANTSEL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 006483a0abe6..39dd3a5b2979 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -32,20 +32,16 @@
32 32
33#define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */ 33#define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
34 34
35extern struct brcms_cm_info * 35struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
36brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
37 36
38extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm); 37void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
39 38
40extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, 39bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec);
41 u16 chspec);
42 40
43extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, 41void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
44 u16 chanspec, 42 struct txpwr_limits *txpwr);
45 struct txpwr_limits *txpwr); 43void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
46extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, 44 u8 local_constraint_qdbm);
47 u16 chanspec, 45void brcms_c_regd_init(struct brcms_c_info *wlc);
48 u8 local_constraint_qdbm);
49extern void brcms_c_regd_init(struct brcms_c_info *wlc);
50 46
51#endif /* _WLC_CHANNEL_H */ 47#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 4090032e81a2..198053dfc310 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -88,26 +88,26 @@ struct brcms_info {
88}; 88};
89 89
90/* misc callbacks */ 90/* misc callbacks */
91extern void brcms_init(struct brcms_info *wl); 91void brcms_init(struct brcms_info *wl);
92extern uint brcms_reset(struct brcms_info *wl); 92uint brcms_reset(struct brcms_info *wl);
93extern void brcms_intrson(struct brcms_info *wl); 93void brcms_intrson(struct brcms_info *wl);
94extern u32 brcms_intrsoff(struct brcms_info *wl); 94u32 brcms_intrsoff(struct brcms_info *wl);
95extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask); 95void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
96extern int brcms_up(struct brcms_info *wl); 96int brcms_up(struct brcms_info *wl);
97extern void brcms_down(struct brcms_info *wl); 97void brcms_down(struct brcms_info *wl);
98extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif, 98void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
99 bool state, int prio); 99 bool state, int prio);
100extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl); 100bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
101 101
102/* timer functions */ 102/* timer functions */
103extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl, 103struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
104 void (*fn) (void *arg), void *arg, 104 void (*fn) (void *arg), void *arg,
105 const char *name); 105 const char *name);
106extern void brcms_free_timer(struct brcms_timer *timer); 106void brcms_free_timer(struct brcms_timer *timer);
107extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic); 107void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
108extern bool brcms_del_timer(struct brcms_timer *timer); 108bool brcms_del_timer(struct brcms_timer *timer);
109extern void brcms_dpc(unsigned long data); 109void brcms_dpc(unsigned long data);
110extern void brcms_timer(struct brcms_timer *t); 110void brcms_timer(struct brcms_timer *t);
111extern void brcms_fatal_error(struct brcms_info *wl); 111void brcms_fatal_error(struct brcms_info *wl);
112 112
113#endif /* _BRCM_MAC80211_IF_H_ */ 113#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 4608e0eb1493..8138f1cff4e5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1906,14 +1906,14 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
1906 1906
1907 /* If macaddr exists, use it (Sromrev4, CIS, ...). */ 1907 /* If macaddr exists, use it (Sromrev4, CIS, ...). */
1908 if (!is_zero_ether_addr(sprom->il0mac)) { 1908 if (!is_zero_ether_addr(sprom->il0mac)) {
1909 memcpy(etheraddr, sprom->il0mac, 6); 1909 memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
1910 return; 1910 return;
1911 } 1911 }
1912 1912
1913 if (wlc_hw->_nbands > 1) 1913 if (wlc_hw->_nbands > 1)
1914 memcpy(etheraddr, sprom->et1mac, 6); 1914 memcpy(etheraddr, sprom->et1mac, ETH_ALEN);
1915 else 1915 else
1916 memcpy(etheraddr, sprom->il0mac, 6); 1916 memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
1917} 1917}
1918 1918
1919/* power both the pll and external oscillator on/off */ 1919/* power both the pll and external oscillator on/off */
@@ -5695,7 +5695,7 @@ static bool brcms_c_chipmatch_pci(struct bcma_device *core)
5695 return true; 5695 return true;
5696 if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID)) 5696 if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
5697 return true; 5697 return true;
5698 if (device == BCM4313_D11N2G_ID) 5698 if (device == BCM4313_D11N2G_ID || device == BCM4313_CHIP_ID)
5699 return true; 5699 return true;
5700 if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID)) 5700 if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
5701 return true; 5701 return true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index b5d7a38b53fe..c4d135cff04a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -616,66 +616,54 @@ struct brcms_bss_cfg {
616 struct brcms_bss_info *current_bss; 616 struct brcms_bss_info *current_bss;
617}; 617};
618 618
619extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, 619int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
620 struct sk_buff *p); 620int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
621extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo, 621 uint *blocks);
622 uint *blocks); 622
623 623int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
624extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config); 624void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
625extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags); 625u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len);
626extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, 626u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
627 uint mac_len); 627 bool use_rspec, u16 mimo_ctlchbw);
628extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, 628u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
629 u32 rspec, 629 u32 rts_rate, u32 frame_rate,
630 bool use_rspec, u16 mimo_ctlchbw); 630 u8 rts_preamble_type, u8 frame_preamble_type,
631extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only, 631 uint frame_len, bool ba);
632 u32 rts_rate, 632void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
633 u32 frame_rate, 633 struct ieee80211_sta *sta, void (*dma_callback_fn));
634 u8 rts_preamble_type, 634void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
635 u8 frame_preamble_type, uint frame_len, 635int brcms_c_set_nmode(struct brcms_c_info *wlc);
636 bool ba); 636void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rate);
637extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw, 637void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type);
638 struct ieee80211_sta *sta, 638void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
639 void (*dma_callback_fn)); 639 bool mute, struct txpwr_limits *txpwr);
640extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend); 640void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v);
641extern int brcms_c_set_nmode(struct brcms_c_info *wlc); 641u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
642extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, 642void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
643 u32 bcn_rate); 643 int bands);
644extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, 644void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
645 u8 antsel_type); 645void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
646extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, 646void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
647 u16 chanspec, 647void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
648 bool mute, struct txpwr_limits *txpwr); 648void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
649extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, 649void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
650 u16 v); 650 u32 override_bit);
651extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset); 651void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
652extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, 652 u32 override_bit);
653 u16 val, int bands); 653void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset,
654extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags); 654 int len, void *buf);
655extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val); 655u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
656extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw); 656void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
657extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw); 657 const void *buf, int len, u32 sel);
658extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw); 658void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
659extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw, 659 void *buf, int len, u32 sel);
660 u32 override_bit); 660void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
661extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw, 661u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
662 u32 override_bit); 662void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
663extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, 663void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
664 int offset, int len, void *buf); 664void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
665extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate); 665void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
666extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, 666void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode);
667 uint offset, const void *buf, int len, 667void brcms_c_init_scb(struct scb *scb);
668 u32 sel);
669extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
670 void *buf, int len, u32 sel);
671extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
672extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
673extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
674extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
675extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
676extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
677extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
678 u8 stf_mode);
679extern void brcms_c_init_scb(struct scb *scb);
680 668
681#endif /* _BRCM_MAIN_H_ */ 669#endif /* _BRCM_MAIN_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index e34a71e7d242..4d3734f48d9c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -179,121 +179,106 @@ struct shared_phy_params {
179}; 179};
180 180
181 181
182extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp); 182struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
183extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh, 183struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
184 struct bcma_device *d11core, 184 struct bcma_device *d11core, int bandtype,
185 int bandtype, struct wiphy *wiphy); 185 struct wiphy *wiphy);
186extern void wlc_phy_detach(struct brcms_phy_pub *ppi); 186void wlc_phy_detach(struct brcms_phy_pub *ppi);
187 187
188extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype, 188bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
189 u16 *phyrev, u16 *radioid, 189 u16 *phyrev, u16 *radioid, u16 *radiover);
190 u16 *radiover); 190bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
191extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih); 191u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
192extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih); 192
193 193void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
194extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate); 194void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
195extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate); 195void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
196extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec); 196void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
197extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi); 197int wlc_phy_down(struct brcms_phy_pub *ppi);
198extern int wlc_phy_down(struct brcms_phy_pub *ppi); 198u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
199extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih); 199void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
200extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi); 200void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
201extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init); 201
202 202void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
203extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, 203u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
204 u16 chanspec); 204void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
205extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi); 205u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
206extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, 206void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
207 u16 newch); 207
208extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi); 208int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
209extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw); 209void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
210 210void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
211extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, 211bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
212 struct d11rxhdr *rxh); 212
213extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi); 213void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
214extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi); 214
215extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi); 215void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
216 216void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
217extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag); 217
218 218
219extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on); 219void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
220extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on); 220
221 221void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
222 222 bool wide_filter);
223extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi); 223void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
224 224 struct brcms_chanvec *channels);
225extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi, 225u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
226 bool wide_filter); 226
227extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band, 227void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
228 struct brcms_chanvec *channels); 228 u8 *_max_, int rate);
229extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, 229void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
230 uint band); 230 u8 *_max_, u8 *_min_);
231 231void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
232extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, 232 s32 *, s32 *, u32 *);
233 u8 *_min_, u8 *_max_, int rate); 233void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
234extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, 234 u16 chanspec);
235 uint chan, u8 *_max_, u8 *_min_); 235int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
236extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, 236int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
237 uint band, s32 *, s32 *, u32 *); 237void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
238extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, 238 struct txpwr_limits *);
239 struct txpwr_limits *, 239bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
240 u16 chanspec); 240void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
241extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, 241u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
242 bool *override); 242u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
243extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, 243bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
244 bool override); 244
245extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi, 245void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
246 struct txpwr_limits *); 246void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
247extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi); 247void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
248extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, 248u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
249 bool hwpwrctrl); 249s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
250extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi); 250void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
251extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi); 251
252extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih); 252void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
253 253void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
254extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, 254void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
255 u8 rxchain); 255void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
256extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, 256
257 u8 rxchain); 257void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
258extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, 258void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
259 u8 *rxchain); 259void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
260extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih); 260void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
261extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, 261
262 u16 chanspec); 262void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
263extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val); 263
264 264void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
265extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason); 265 struct tx_power *power, uint channel);
266extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi); 266
267extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock); 267void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
268extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi); 268bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
269 269void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
270extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val); 270void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
271extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi); 271void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
272extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val); 272void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
273extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags); 273
274 274void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
275extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type); 275
276 276void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
277extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi, 277void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
278 struct tx_power *power, uint channel); 278
279 279const u8 *wlc_phy_get_ofdm_rate_lookup(void);
280extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal); 280
281extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi); 281s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
282extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, 282 u8 mcs_offset);
283 u8 txpwr_percent); 283s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
284extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
285extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
286 bool bf_preempt);
287extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
288
289extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
290
291extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
292extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
293
294extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
295
296extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
297 u8 mcs_offset);
298extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
299#endif /* _BRCM_PHY_HAL_H_ */ 284#endif /* _BRCM_PHY_HAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index 1dc767c31653..4960f7d26804 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -910,113 +910,103 @@ struct lcnphy_radio_regs {
910 u8 do_init_g; 910 u8 do_init_g;
911}; 911};
912 912
913extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr); 913u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
914extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val); 914void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
915extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val); 915void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
916extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val); 916void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
917extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val); 917void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
918 918
919extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr); 919u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
920extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val); 920void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
921extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val); 921void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
922extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, 922void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
923 u16 val); 923void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
924extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask); 924
925 925void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
926extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val); 926
927 927void wlc_phyreg_enter(struct brcms_phy_pub *pih);
928extern void wlc_phyreg_enter(struct brcms_phy_pub *pih); 928void wlc_phyreg_exit(struct brcms_phy_pub *pih);
929extern void wlc_phyreg_exit(struct brcms_phy_pub *pih); 929void wlc_radioreg_enter(struct brcms_phy_pub *pih);
930extern void wlc_radioreg_enter(struct brcms_phy_pub *pih); 930void wlc_radioreg_exit(struct brcms_phy_pub *pih);
931extern void wlc_radioreg_exit(struct brcms_phy_pub *pih); 931
932 932void wlc_phy_read_table(struct brcms_phy *pi,
933extern void wlc_phy_read_table(struct brcms_phy *pi, 933 const struct phytbl_info *ptbl_info,
934 const struct phytbl_info *ptbl_info, 934 u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
935 u16 tblAddr, u16 tblDataHi, 935void wlc_phy_write_table(struct brcms_phy *pi,
936 u16 tblDatalo); 936 const struct phytbl_info *ptbl_info,
937extern void wlc_phy_write_table(struct brcms_phy *pi, 937 u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
938 const struct phytbl_info *ptbl_info, 938void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
939 u16 tblAddr, u16 tblDataHi, u16 tblDatalo); 939 u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
940extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, 940void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
941 uint tbl_offset, u16 tblAddr, u16 tblDataHi, 941
942 u16 tblDataLo); 942void write_phy_channel_reg(struct brcms_phy *pi, uint val);
943extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val); 943void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
944 944
945extern void write_phy_channel_reg(struct brcms_phy *pi, uint val); 945u8 wlc_phy_nbits(s32 value);
946extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi); 946void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
947 947
948extern u8 wlc_phy_nbits(s32 value); 948uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
949extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core); 949 struct radio_20xx_regs *radioregs);
950 950uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
951extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi, 951 const struct radio_regs *radioregs,
952 struct radio_20xx_regs *radioregs); 952 u16 core_offset);
953extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi, 953
954 const struct radio_regs *radioregs, 954void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
955 u16 core_offset); 955
956 956void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
957extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi); 957void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
958 958
959extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on); 959void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
960extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, 960void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
961 s32 *eps_imag); 961
962 962bool wlc_phy_attach_nphy(struct brcms_phy *pi);
963extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi); 963bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
964extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi); 964
965 965void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
966extern bool wlc_phy_attach_nphy(struct brcms_phy *pi); 966
967extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi); 967void wlc_phy_init_nphy(struct brcms_phy *pi);
968 968void wlc_phy_init_lcnphy(struct brcms_phy *pi);
969extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi); 969
970 970void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
971extern void wlc_phy_init_nphy(struct brcms_phy *pi); 971void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
972extern void wlc_phy_init_lcnphy(struct brcms_phy *pi); 972
973 973void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec);
974extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi); 974void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec);
975extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi); 975void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, u16 chanspec);
976 976int wlc_phy_channel2freq(uint channel);
977extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, 977int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
978 u16 chanspec); 978int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
979extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, 979
980 u16 chanspec); 980void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
981extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, 981s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
982 u16 chanspec); 982
983extern int wlc_phy_channel2freq(uint channel); 983void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
984extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint); 984void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
985extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec); 985void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
986 986
987extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode); 987void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
988extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi); 988void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
989 989void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
990extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi); 990void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
991extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi); 991 bool iqcalmode);
992extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi); 992
993 993void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
994extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index); 994 u8 *max_pwr, u8 rate_id);
995extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable); 995void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
996extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi); 996 u8 rate_mcs_end, u8 rate_ofdm_start);
997extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, 997void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
998 u16 max_val, bool iqcalmode); 998 u8 rate_ofdm_end, u8 rate_mcs_start);
999 999
1000extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan, 1000u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
1001 u8 *max_pwr, u8 rate_id); 1001s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
1002extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start, 1002s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
1003 u8 rate_mcs_end, 1003s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
1004 u8 rate_ofdm_start); 1004void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
1005extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, 1005void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
1006 u8 rate_ofdm_start, 1006void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
1007 u8 rate_ofdm_end, 1007void wlc_2064_vco_cal(struct brcms_phy *pi);
1008 u8 rate_mcs_start); 1008
1009 1009void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
1010extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
1011extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
1012extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
1013extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
1014extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
1015extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
1016extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
1017extern void wlc_2064_vco_cal(struct brcms_phy *pi);
1018
1019extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
1020 1010
1021#define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18 1011#define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18
1022#define LCNPHY_TX_POWER_TABLE_SIZE 128 1012#define LCNPHY_TX_POWER_TABLE_SIZE 128
@@ -1030,26 +1020,24 @@ extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
1030 1020
1031#define LCNPHY_TX_PWR_CTRL_TEMPBASED 0xE001 1021#define LCNPHY_TX_PWR_CTRL_TEMPBASED 0xE001
1032 1022
1033extern void wlc_lcnphy_write_table(struct brcms_phy *pi, 1023void wlc_lcnphy_write_table(struct brcms_phy *pi,
1034 const struct phytbl_info *pti); 1024 const struct phytbl_info *pti);
1035extern void wlc_lcnphy_read_table(struct brcms_phy *pi, 1025void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti);
1036 struct phytbl_info *pti); 1026void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
1037extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b); 1027void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
1038extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq); 1028void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
1039extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b); 1029u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
1040extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi); 1030void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, u8 *eq0, u8 *fi0,
1041extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, 1031 u8 *fq0);
1042 u8 *eq0, u8 *fi0, u8 *fq0); 1032void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
1043extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode); 1033void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
1044extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode); 1034bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
1045extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi); 1035void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
1046extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi); 1036s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
1047extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1); 1037void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr);
1048extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, 1038void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
1049 s8 *cck_pwr); 1039
1050extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi); 1040s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
1051
1052extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
1053 1041
1054#define NPHY_MAX_HPVGA1_INDEX 10 1042#define NPHY_MAX_HPVGA1_INDEX 10
1055#define NPHY_DEF_HPVGA1_INDEXLIMIT 7 1043#define NPHY_DEF_HPVGA1_INDEXLIMIT 7
@@ -1060,9 +1048,8 @@ struct phy_iq_est {
1060 u32 q_pwr; 1048 u32 q_pwr;
1061}; 1049};
1062 1050
1063extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, 1051void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
1064 bool enable); 1052void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
1065extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
1066 1053
1067#define wlc_phy_write_table_nphy(pi, pti) \ 1054#define wlc_phy_write_table_nphy(pi, pti) \
1068 wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73) 1055 wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1076,10 +1063,10 @@ extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
1076#define wlc_nphy_table_data_write(pi, w, v) \ 1063#define wlc_nphy_table_data_write(pi, w, v) \
1077 wlc_phy_table_data_write((pi), (w), (v)) 1064 wlc_phy_table_data_write((pi), (w), (v))
1078 1065
1079extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, 1066void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w,
1080 u32 w, void *d); 1067 void *d);
1081extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, 1068void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, u32,
1082 u32, const void *); 1069 const void *);
1083 1070
1084#define PHY_IPA(pi) \ 1071#define PHY_IPA(pi) \
1085 ((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \ 1072 ((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
@@ -1089,73 +1076,67 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
1089 if (NREV_LT((pi)->pubpi.phy_rev, 3)) \ 1076 if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
1090 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) 1077 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
1091 1078
1092extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype); 1079void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
1093extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi); 1080void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
1094extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en); 1081void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
1095 1082
1096extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan); 1083u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
1097extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on); 1084void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
1098 1085
1099extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi); 1086void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
1100 1087
1101extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd); 1088void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
1102extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi); 1089s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
1103 1090
1104extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val); 1091u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
1105 1092
1106extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est, 1093void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
1107 u16 num_samps, u8 wait_time, 1094 u16 num_samps, u8 wait_time, u8 wait_for_crs);
1108 u8 wait_for_crs); 1095
1109 1096void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
1110extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write, 1097 struct nphy_iq_comp *comp);
1111 struct nphy_iq_comp *comp); 1098void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
1112extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi); 1099
1113 1100void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask);
1114extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, 1101u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
1115 u8 rxcore_bitmask); 1102
1116extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih); 1103void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
1117 1104void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
1118extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type); 1105void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
1119extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi); 1106void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
1120extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi); 1107u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
1121extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi); 1108
1122extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi); 1109struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
1123 1110int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
1124extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi); 1111 struct nphy_txgains target_gain, bool full, bool m);
1125extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, 1112int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
1126 struct nphy_txgains target_gain, 1113 u8 type, bool d);
1127 bool full, bool m); 1114void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
1128extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, 1115 s8 txpwrindex, bool res);
1129 struct nphy_txgains target_gain, 1116void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
1130 u8 type, bool d); 1117int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
1131extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, 1118 s32 *rssi_buf, u8 nsamps);
1132 s8 txpwrindex, bool res); 1119void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
1133extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type); 1120int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
1134extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type, 1121void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
1135 s32 *rssi_buf, u8 nsamps); 1122 bool debug);
1136extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi); 1123int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, u8 mode,
1137extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi); 1124 u8, bool);
1138extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, 1125void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
1139 s32 dBm_targetpower, bool debug); 1126void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
1140extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, 1127 u8 num_samps);
1141 u8 mode, u8, bool); 1128void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
1142extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi); 1129
1143extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf, 1130int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
1144 u8 num_samps);
1145extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
1146
1147extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
1148 struct d11rxhdr *rxh);
1149 1131
1150#define NPHY_TESTPATTERN_BPHY_EVM 0 1132#define NPHY_TESTPATTERN_BPHY_EVM 0
1151#define NPHY_TESTPATTERN_BPHY_RFCS 1 1133#define NPHY_TESTPATTERN_BPHY_RFCS 1
1152 1134
1153extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs); 1135void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
1154 1136
1155void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset, 1137void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
1156 s8 *ofdmoffset); 1138 s8 *ofdmoffset);
1157extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, 1139s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
1158 u16 chanspec);
1159 1140
1160extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih); 1141bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
1161#endif /* _BRCM_PHY_INT_H_ */ 1142#endif /* _BRCM_PHY_INT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
index 2c5b66b75970..dd8774717ade 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
@@ -124,56 +124,49 @@
124 124
125struct brcms_phy; 125struct brcms_phy;
126 126
127extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw, 127struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
128 struct brcms_info *wl, 128 struct brcms_info *wl,
129 struct brcms_c_info *wlc); 129 struct brcms_c_info *wlc);
130extern void wlc_phy_shim_detach(struct phy_shim_info *physhim); 130void wlc_phy_shim_detach(struct phy_shim_info *physhim);
131 131
132/* PHY to WL utility functions */ 132/* PHY to WL utility functions */
133extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, 133struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
134 void (*fn) (struct brcms_phy *pi), 134 void (*fn)(struct brcms_phy *pi),
135 void *arg, const char *name); 135 void *arg, const char *name);
136extern void wlapi_free_timer(struct wlapi_timer *t); 136void wlapi_free_timer(struct wlapi_timer *t);
137extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic); 137void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
138extern bool wlapi_del_timer(struct wlapi_timer *t); 138bool wlapi_del_timer(struct wlapi_timer *t);
139extern void wlapi_intrson(struct phy_shim_info *physhim); 139void wlapi_intrson(struct phy_shim_info *physhim);
140extern u32 wlapi_intrsoff(struct phy_shim_info *physhim); 140u32 wlapi_intrsoff(struct phy_shim_info *physhim);
141extern void wlapi_intrsrestore(struct phy_shim_info *physhim, 141void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask);
142 u32 macintmask); 142
143 143void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v);
144extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, 144u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
145 u16 v); 145void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val,
146extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset); 146 int bands);
147extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, 147void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
148 u16 mask, u16 val, int bands); 148void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
149extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags); 149void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
150extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim); 150void wlapi_enable_mac(struct phy_shim_info *physhim);
151extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode); 151void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val);
152extern void wlapi_enable_mac(struct phy_shim_info *physhim); 152void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
153extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, 153void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
154 u32 val); 154void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
155extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim); 155void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
156extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw); 156void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
157extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk); 157void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
158extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk); 158void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim);
159extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on); 159void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim);
160extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim); 160void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
161extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info * 161 int len, void *buf);
162 physhim); 162u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate);
163extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info * 163void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
164 physhim); 164void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, void *buf,
165extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o, 165 int, u32 sel);
166 int len, void *buf); 166void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint, const void *buf,
167extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, 167 int, u32);
168 u8 rate); 168
169extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim); 169void wlapi_high_update_phy_mode(struct phy_shim_info *physhim, u32 phy_mode);
170extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, 170u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
171 void *buf, int, u32 sel);
172extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
173 const void *buf, int, u32);
174
175extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
176 u32 phy_mode);
177extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
178 171
179#endif /* _BRCM_PHY_SHIM_H_ */ 172#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 20e2012d5a3a..a014bbc4f935 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -20,7 +20,7 @@
20 20
21#include "types.h" 21#include "types.h"
22 22
23extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih); 23u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
24extern u32 si_pmu_measure_alpclk(struct si_pub *sih); 24u32 si_pmu_measure_alpclk(struct si_pub *sih);
25 25
26#endif /* _BRCM_PMU_H_ */ 26#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index d36ea5e1cc49..4da38cb4f318 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -266,83 +266,76 @@ struct brcms_antselcfg {
266}; 266};
267 267
268/* common functions for every port */ 268/* common functions for every port */
269extern struct brcms_c_info * 269struct brcms_c_info *brcms_c_attach(struct brcms_info *wl,
270brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, 270 struct bcma_device *core, uint unit,
271 bool piomode, uint *perr); 271 bool piomode, uint *perr);
272extern uint brcms_c_detach(struct brcms_c_info *wlc); 272uint brcms_c_detach(struct brcms_c_info *wlc);
273extern int brcms_c_up(struct brcms_c_info *wlc); 273int brcms_c_up(struct brcms_c_info *wlc);
274extern uint brcms_c_down(struct brcms_c_info *wlc); 274uint brcms_c_down(struct brcms_c_info *wlc);
275 275
276extern bool brcms_c_chipmatch(struct bcma_device *core); 276bool brcms_c_chipmatch(struct bcma_device *core);
277extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx); 277void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
278extern void brcms_c_reset(struct brcms_c_info *wlc); 278void brcms_c_reset(struct brcms_c_info *wlc);
279 279
280extern void brcms_c_intrson(struct brcms_c_info *wlc); 280void brcms_c_intrson(struct brcms_c_info *wlc);
281extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc); 281u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
282extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask); 282void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
283extern bool brcms_c_intrsupd(struct brcms_c_info *wlc); 283bool brcms_c_intrsupd(struct brcms_c_info *wlc);
284extern bool brcms_c_isr(struct brcms_c_info *wlc); 284bool brcms_c_isr(struct brcms_c_info *wlc);
285extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded); 285bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
286extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, 286bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
287 struct sk_buff *sdu, 287 struct ieee80211_hw *hw);
288 struct ieee80211_hw *hw); 288bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
289extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid); 289void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val);
290extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, 290int brcms_c_get_header_len(void);
291 int val); 291void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
292extern int brcms_c_get_header_len(void); 292 const u8 *addr);
293extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc, 293void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
294 int match_reg_offset, 294 const struct ieee80211_tx_queue_params *arg,
295 const u8 *addr); 295 bool suspend);
296extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, 296struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
297 const struct ieee80211_tx_queue_params *arg, 297void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta,
298 bool suspend); 298 u16 tid);
299extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc); 299void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
300extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc, 300 u8 ba_wsize, uint max_rx_ampdu_bytes);
301 struct ieee80211_sta *sta, u16 tid); 301int brcms_c_module_register(struct brcms_pub *pub, const char *name,
302extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid, 302 struct brcms_info *hdl,
303 u8 ba_wsize, uint max_rx_ampdu_bytes); 303 int (*down_fn)(void *handle));
304extern int brcms_c_module_register(struct brcms_pub *pub, 304int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
305 const char *name, struct brcms_info *hdl, 305 struct brcms_info *hdl);
306 int (*down_fn)(void *handle)); 306void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
307extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name, 307void brcms_c_enable_mac(struct brcms_c_info *wlc);
308 struct brcms_info *hdl); 308void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
309extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc); 309void brcms_c_scan_start(struct brcms_c_info *wlc);
310extern void brcms_c_enable_mac(struct brcms_c_info *wlc); 310void brcms_c_scan_stop(struct brcms_c_info *wlc);
311extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state); 311int brcms_c_get_curband(struct brcms_c_info *wlc);
312extern void brcms_c_scan_start(struct brcms_c_info *wlc); 312int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
313extern void brcms_c_scan_stop(struct brcms_c_info *wlc); 313int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
314extern int brcms_c_get_curband(struct brcms_c_info *wlc); 314void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
315extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
316extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
317extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
318 struct brcm_rateset *currs); 315 struct brcm_rateset *currs);
319extern int brcms_c_set_rateset(struct brcms_c_info *wlc, 316int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs);
320 struct brcm_rateset *rs); 317int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
321extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period); 318u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
322extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx); 319void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
323extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
324 s8 sslot_override); 320 s8 sslot_override);
325extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, 321void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval);
326 u8 interval); 322u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
327extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc); 323void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
328extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf); 324int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
329extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr); 325int brcms_c_get_tx_power(struct brcms_c_info *wlc);
330extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); 326bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
331extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); 327void brcms_c_mute(struct brcms_c_info *wlc, bool on);
332extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); 328bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
333extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc); 329void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
334extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr); 330void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
335extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, 331 u8 *ssid, size_t ssid_len);
336 const u8 *bssid, u8 *ssid, size_t ssid_len); 332void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
337extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr); 333void brcms_c_update_beacon(struct brcms_c_info *wlc);
338extern void brcms_c_update_beacon(struct brcms_c_info *wlc); 334void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
339extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc, 335 u16 tim_offset, u16 dtim_period);
340 struct sk_buff *beacon, u16 tim_offset, 336void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
341 u16 dtim_period); 337 struct sk_buff *probe_resp);
342extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc, 338void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
343 struct sk_buff *probe_resp); 339void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len);
344extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
345extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
346 size_t ssid_len);
347 340
348#endif /* _BRCM_PUB_H_ */ 341#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index 980d578825cc..5bb88b78ed64 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -216,34 +216,30 @@ static inline u8 cck_phy2mac_rate(u8 signal)
216 216
217/* sanitize, and sort a rateset with the basic bit(s) preserved, validate 217/* sanitize, and sort a rateset with the basic bit(s) preserved, validate
218 * rateset */ 218 * rateset */
219extern bool 219bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
220brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs, 220 const struct brcms_c_rateset *hw_rs,
221 const struct brcms_c_rateset *hw_rs, 221 bool check_brate, u8 txstreams);
222 bool check_brate, u8 txstreams);
223/* copy rateset src to dst as-is (no masking or sorting) */ 222/* copy rateset src to dst as-is (no masking or sorting) */
224extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src, 223void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
225 struct brcms_c_rateset *dst); 224 struct brcms_c_rateset *dst);
226 225
227/* would be nice to have these documented ... */ 226/* would be nice to have these documented ... */
228extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp); 227u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
229 228
230extern void brcms_c_rateset_filter(struct brcms_c_rateset *src, 229void brcms_c_rateset_filter(struct brcms_c_rateset *src,
231 struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask, 230 struct brcms_c_rateset *dst, bool basic_only,
232 bool mcsallow); 231 u8 rates, uint xmask, bool mcsallow);
233 232
234extern void 233void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
235brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt, 234 const struct brcms_c_rateset *rs_hw, uint phy_type,
236 const struct brcms_c_rateset *rs_hw, uint phy_type, 235 int bandtype, bool cck_only, uint rate_mask,
237 int bandtype, bool cck_only, uint rate_mask, 236 bool mcsallow, u8 bw, u8 txstreams);
238 bool mcsallow, u8 bw, u8 txstreams); 237
239 238s16 brcms_c_rate_legacy_phyctl(uint rate);
240extern s16 brcms_c_rate_legacy_phyctl(uint rate); 239
241 240void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
242extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams); 241void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
243extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset); 242void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams);
244extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, 243void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw);
245 u8 txstreams);
246extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
247 u8 bw);
248 244
249#endif /* _BRCM_RATE_H_ */ 245#endif /* _BRCM_RATE_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
index 19f6580f69be..ba9493009a33 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
@@ -19,24 +19,19 @@
19 19
20#include "types.h" 20#include "types.h"
21 21
22extern int brcms_c_stf_attach(struct brcms_c_info *wlc); 22int brcms_c_stf_attach(struct brcms_c_info *wlc);
23extern void brcms_c_stf_detach(struct brcms_c_info *wlc); 23void brcms_c_stf_detach(struct brcms_c_info *wlc);
24 24
25extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc); 25void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
26extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, 26void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
27 u16 *ss_algo_channel, 27 u16 *ss_algo_channel, u16 chanspec);
28 u16 chanspec); 28int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
29extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc, 29void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
30 struct brcms_band *band); 30int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
31extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc); 31bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
32extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, 32void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
33 bool force); 33void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
34extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val); 34u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
35extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc); 35u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
36extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
37extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
38 u32 rspec);
39extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
40 u32 rspec);
41 36
42#endif /* _BRCM_STF_H_ */ 37#endif /* _BRCM_STF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
index 18750a814b4f..c87dd89bcb78 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
@@ -43,16 +43,14 @@ struct brcms_ucode {
43 u32 *bcm43xx_bomminor; 43 u32 *bcm43xx_bomminor;
44}; 44};
45 45
46extern int 46int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
47brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
48 47
49extern void brcms_ucode_data_free(struct brcms_ucode *ucode); 48void brcms_ucode_data_free(struct brcms_ucode *ucode);
50 49
51extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, 50int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, unsigned int idx);
52 unsigned int idx); 51int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
53extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, 52 unsigned int idx);
54 unsigned int idx); 53void brcms_ucode_free_buf(void *);
55extern void brcms_ucode_free_buf(void *); 54int brcms_check_firmwares(struct brcms_info *wl);
56extern int brcms_check_firmwares(struct brcms_info *wl);
57 55
58#endif /* _BRCM_UCODE_H_ */ 56#endif /* _BRCM_UCODE_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index c1fe245bb07e..84113ea16f84 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,5 +41,6 @@
41#define BCM4331_CHIP_ID 0x4331 41#define BCM4331_CHIP_ID 0x4331
42#define BCM4334_CHIP_ID 0x4334 42#define BCM4334_CHIP_ID 0x4334
43#define BCM4335_CHIP_ID 0x4335 43#define BCM4335_CHIP_ID 0x4335
44#define BCM4339_CHIP_ID 0x4339
44 45
45#endif /* _BRCM_HW_IDS_H_ */ 46#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 92623f02b1c0..8660a2cba098 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -140,6 +140,6 @@ struct brcmu_d11inf {
140 void (*decchspec)(struct brcmu_chan *ch); 140 void (*decchspec)(struct brcmu_chan *ch);
141}; 141};
142 142
143extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf); 143void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
144 144
145#endif /* _BRCMU_CHANNELS_H_ */ 145#endif /* _BRCMU_CHANNELS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 898cacb8d01d..8ba445b3fd72 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -114,31 +114,29 @@ static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
114 return skb_peek_tail(&pq->q[prec].skblist); 114 return skb_peek_tail(&pq->q[prec].skblist);
115} 115}
116 116
117extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, 117struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
118 struct sk_buff *p); 118struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
119extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec, 119 struct sk_buff *p);
120 struct sk_buff *p); 120struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
121extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec); 121struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
122extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec); 122struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
123extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec, 123 bool (*match_fn)(struct sk_buff *p,
124 bool (*match_fn)(struct sk_buff *p, 124 void *arg),
125 void *arg), 125 void *arg);
126 void *arg);
127 126
128/* packet primitives */ 127/* packet primitives */
129extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len); 128struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
130extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb); 129void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
131 130
132/* Empty the queue at particular precedence level */ 131/* Empty the queue at particular precedence level */
133/* callback function fn(pkt, arg) returns true if pkt belongs to if */ 132/* callback function fn(pkt, arg) returns true if pkt belongs to if */
134extern void brcmu_pktq_pflush(struct pktq *pq, int prec, 133void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
135 bool dir, bool (*fn)(struct sk_buff *, void *), void *arg); 134 bool (*fn)(struct sk_buff *, void *), void *arg);
136 135
137/* operations on a set of precedences in packet queue */ 136/* operations on a set of precedences in packet queue */
138 137
139extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp); 138int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
140extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, 139struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
141 int *prec_out);
142 140
143/* operations on packet queue as a whole */ 141/* operations on packet queue as a whole */
144 142
@@ -167,11 +165,11 @@ static inline bool pktq_empty(struct pktq *pq)
167 return pq->len == 0; 165 return pq->len == 0;
168} 166}
169 167
170extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len); 168void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
171/* prec_out may be NULL if caller is not interested in return value */ 169/* prec_out may be NULL if caller is not interested in return value */
172extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out); 170struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
173extern void brcmu_pktq_flush(struct pktq *pq, bool dir, 171void brcmu_pktq_flush(struct pktq *pq, bool dir,
174 bool (*fn)(struct sk_buff *, void *), void *arg); 172 bool (*fn)(struct sk_buff *, void *), void *arg);
175 173
176/* externs */ 174/* externs */
177/* ip address */ 175/* ip address */
@@ -204,13 +202,13 @@ static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
204/* externs */ 202/* externs */
205/* format/print */ 203/* format/print */
206#ifdef DEBUG 204#ifdef DEBUG
207extern void brcmu_prpkt(const char *msg, struct sk_buff *p0); 205void brcmu_prpkt(const char *msg, struct sk_buff *p0);
208#else 206#else
209#define brcmu_prpkt(a, b) 207#define brcmu_prpkt(a, b)
210#endif /* DEBUG */ 208#endif /* DEBUG */
211 209
212#ifdef DEBUG 210#ifdef DEBUG
213extern __printf(3, 4) 211__printf(3, 4)
214void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...); 212void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
215#else 213#else
216__printf(3, 4) 214__printf(3, 4)
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 755a0c8edfe1..40078f5f932e 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -365,7 +365,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
365static int cw1200_spi_probe(struct spi_device *func) 365static int cw1200_spi_probe(struct spi_device *func)
366{ 366{
367 const struct cw1200_platform_data_spi *plat_data = 367 const struct cw1200_platform_data_spi *plat_data =
368 func->dev.platform_data; 368 dev_get_platdata(&func->dev);
369 struct hwbus_priv *self; 369 struct hwbus_priv *self;
370 int status; 370 int status;
371 371
@@ -443,7 +443,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
443 } 443 }
444 kfree(self); 444 kfree(self);
445 } 445 }
446 cw1200_spi_off(func->dev.platform_data); 446 cw1200_spi_off(dev_get_platdata(&func->dev));
447 447
448 return 0; 448 return 0;
449} 449}
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 970a48baaf80..de7c4ffec309 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -217,7 +217,7 @@ static void prism2_host_roaming(local_info_t *local)
217 } 217 }
218 } 218 }
219 219
220 memcpy(req.bssid, selected->bssid, 6); 220 memcpy(req.bssid, selected->bssid, ETH_ALEN);
221 req.channel = selected->chid; 221 req.channel = selected->chid;
222 spin_unlock_irqrestore(&local->lock, flags); 222 spin_unlock_irqrestore(&local->lock, flags);
223 223
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 6b823a1ab789..81903e33d5b1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2698,7 +2698,7 @@ static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2698/* data's copy of the eeprom data */ 2698/* data's copy of the eeprom data */
2699static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac) 2699static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2700{ 2700{
2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6); 2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2702} 2702}
2703 2703
2704static void ipw_read_eeprom(struct ipw_priv *priv) 2704static void ipw_read_eeprom(struct ipw_priv *priv)
@@ -11885,7 +11885,6 @@ static int ipw_pci_probe(struct pci_dev *pdev,
11885 pci_release_regions(pdev); 11885 pci_release_regions(pdev);
11886 out_pci_disable_device: 11886 out_pci_disable_device:
11887 pci_disable_device(pdev); 11887 pci_disable_device(pdev);
11888 pci_set_drvdata(pdev, NULL);
11889 out_free_libipw: 11888 out_free_libipw:
11890 free_libipw(priv->net_dev, 0); 11889 free_libipw(priv->net_dev, 0);
11891 out: 11890 out:
@@ -11966,7 +11965,6 @@ static void ipw_pci_remove(struct pci_dev *pdev)
11966 iounmap(priv->hw_base); 11965 iounmap(priv->hw_base);
11967 pci_release_regions(pdev); 11966 pci_release_regions(pdev);
11968 pci_disable_device(pdev); 11967 pci_disable_device(pdev);
11969 pci_set_drvdata(pdev, NULL);
11970 /* wiphy_unregister needs to be here, before free_libipw */ 11968 /* wiphy_unregister needs to be here, before free_libipw */
11971 wiphy_unregister(priv->ieee->wdev.wiphy); 11969 wiphy_unregister(priv->ieee->wdev.wiphy);
11972 kfree(priv->ieee->a_band.channels); 11970 kfree(priv->ieee->a_band.channels);
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 6eede52ad8c0..5ce2f59d3378 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -950,66 +950,55 @@ static inline int libipw_is_cck_rate(u8 rate)
950} 950}
951 951
952/* libipw.c */ 952/* libipw.c */
953extern void free_libipw(struct net_device *dev, int monitor); 953void free_libipw(struct net_device *dev, int monitor);
954extern struct net_device *alloc_libipw(int sizeof_priv, int monitor); 954struct net_device *alloc_libipw(int sizeof_priv, int monitor);
955extern int libipw_change_mtu(struct net_device *dev, int new_mtu); 955int libipw_change_mtu(struct net_device *dev, int new_mtu);
956 956
957extern void libipw_networks_age(struct libipw_device *ieee, 957void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
958 unsigned long age_secs);
959 958
960extern int libipw_set_encryption(struct libipw_device *ieee); 959int libipw_set_encryption(struct libipw_device *ieee);
961 960
962/* libipw_tx.c */ 961/* libipw_tx.c */
963extern netdev_tx_t libipw_xmit(struct sk_buff *skb, 962netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
964 struct net_device *dev); 963void libipw_txb_free(struct libipw_txb *);
965extern void libipw_txb_free(struct libipw_txb *);
966 964
967/* libipw_rx.c */ 965/* libipw_rx.c */
968extern void libipw_rx_any(struct libipw_device *ieee, 966void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
969 struct sk_buff *skb, struct libipw_rx_stats *stats); 967 struct libipw_rx_stats *stats);
970extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb, 968int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
971 struct libipw_rx_stats *rx_stats); 969 struct libipw_rx_stats *rx_stats);
972/* make sure to set stats->len */ 970/* make sure to set stats->len */
973extern void libipw_rx_mgt(struct libipw_device *ieee, 971void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
974 struct libipw_hdr_4addr *header, 972 struct libipw_rx_stats *stats);
975 struct libipw_rx_stats *stats); 973void libipw_network_reset(struct libipw_network *network);
976extern void libipw_network_reset(struct libipw_network *network);
977 974
978/* libipw_geo.c */ 975/* libipw_geo.c */
979extern const struct libipw_geo *libipw_get_geo(struct libipw_device 976const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
980 *ieee); 977void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
981extern void libipw_set_geo(struct libipw_device *ieee, 978
982 const struct libipw_geo *geo); 979int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
983 980int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
984extern int libipw_is_valid_channel(struct libipw_device *ieee, 981u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
985 u8 channel); 982u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
986extern int libipw_channel_to_index(struct libipw_device *ieee, 983const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
987 u8 channel); 984 u8 channel);
988extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq); 985u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
989extern u8 libipw_get_channel_flags(struct libipw_device *ieee,
990 u8 channel);
991extern const struct libipw_channel *libipw_get_channel(struct
992 libipw_device
993 *ieee, u8 channel);
994extern u32 libipw_channel_to_freq(struct libipw_device * ieee,
995 u8 channel);
996 986
997/* libipw_wx.c */ 987/* libipw_wx.c */
998extern int libipw_wx_get_scan(struct libipw_device *ieee, 988int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
999 struct iw_request_info *info, 989 union iwreq_data *wrqu, char *key);
1000 union iwreq_data *wrqu, char *key); 990int libipw_wx_set_encode(struct libipw_device *ieee,
1001extern int libipw_wx_set_encode(struct libipw_device *ieee, 991 struct iw_request_info *info, union iwreq_data *wrqu,
1002 struct iw_request_info *info, 992 char *key);
1003 union iwreq_data *wrqu, char *key); 993int libipw_wx_get_encode(struct libipw_device *ieee,
1004extern int libipw_wx_get_encode(struct libipw_device *ieee, 994 struct iw_request_info *info, union iwreq_data *wrqu,
1005 struct iw_request_info *info, 995 char *key);
1006 union iwreq_data *wrqu, char *key); 996int libipw_wx_set_encodeext(struct libipw_device *ieee,
1007extern int libipw_wx_set_encodeext(struct libipw_device *ieee, 997 struct iw_request_info *info,
1008 struct iw_request_info *info, 998 union iwreq_data *wrqu, char *extra);
1009 union iwreq_data *wrqu, char *extra); 999int libipw_wx_get_encodeext(struct libipw_device *ieee,
1010extern int libipw_wx_get_encodeext(struct libipw_device *ieee, 1000 struct iw_request_info *info,
1011 struct iw_request_info *info, 1001 union iwreq_data *wrqu, char *extra);
1012 union iwreq_data *wrqu, char *extra);
1013 1002
1014static inline void libipw_increment_scans(struct libipw_device *ieee) 1003static inline void libipw_increment_scans(struct libipw_device *ieee)
1015{ 1004{
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 9581d07a4242..dea3b50d68b9 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3811,7 +3811,6 @@ out_iounmap:
3811out_pci_release_regions: 3811out_pci_release_regions:
3812 pci_release_regions(pdev); 3812 pci_release_regions(pdev);
3813out_pci_disable_device: 3813out_pci_disable_device:
3814 pci_set_drvdata(pdev, NULL);
3815 pci_disable_device(pdev); 3814 pci_disable_device(pdev);
3816out_ieee80211_free_hw: 3815out_ieee80211_free_hw:
3817 ieee80211_free_hw(il->hw); 3816 ieee80211_free_hw(il->hw);
@@ -3888,7 +3887,6 @@ il3945_pci_remove(struct pci_dev *pdev)
3888 iounmap(il->hw_base); 3887 iounmap(il->hw_base);
3889 pci_release_regions(pdev); 3888 pci_release_regions(pdev);
3890 pci_disable_device(pdev); 3889 pci_disable_device(pdev);
3891 pci_set_drvdata(pdev, NULL);
3892 3890
3893 il_free_channel_map(il); 3891 il_free_channel_map(il);
3894 il_free_geos(il); 3892 il_free_geos(il);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 9a8703def0ba..00030d43a194 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -189,15 +189,14 @@ struct il3945_ibss_seq {
189 * for use by iwl-*.c 189 * for use by iwl-*.c
190 * 190 *
191 *****************************************************************************/ 191 *****************************************************************************/
192extern int il3945_calc_db_from_ratio(int sig_ratio); 192int il3945_calc_db_from_ratio(int sig_ratio);
193extern void il3945_rx_replenish(void *data); 193void il3945_rx_replenish(void *data);
194extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq); 194void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
195extern unsigned int il3945_fill_beacon_frame(struct il_priv *il, 195unsigned int il3945_fill_beacon_frame(struct il_priv *il,
196 struct ieee80211_hdr *hdr, 196 struct ieee80211_hdr *hdr, int left);
197 int left); 197int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
198extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, 198 bool display);
199 char **buf, bool display); 199void il3945_dump_nic_error_log(struct il_priv *il);
200extern void il3945_dump_nic_error_log(struct il_priv *il);
201 200
202/****************************************************************************** 201/******************************************************************************
203 * 202 *
@@ -215,39 +214,36 @@ extern void il3945_dump_nic_error_log(struct il_priv *il);
215 * il3945_mac_ <-- mac80211 callback 214 * il3945_mac_ <-- mac80211 callback
216 * 215 *
217 ****************************************************************************/ 216 ****************************************************************************/
218extern void il3945_hw_handler_setup(struct il_priv *il); 217void il3945_hw_handler_setup(struct il_priv *il);
219extern void il3945_hw_setup_deferred_work(struct il_priv *il); 218void il3945_hw_setup_deferred_work(struct il_priv *il);
220extern void il3945_hw_cancel_deferred_work(struct il_priv *il); 219void il3945_hw_cancel_deferred_work(struct il_priv *il);
221extern int il3945_hw_rxq_stop(struct il_priv *il); 220int il3945_hw_rxq_stop(struct il_priv *il);
222extern int il3945_hw_set_hw_params(struct il_priv *il); 221int il3945_hw_set_hw_params(struct il_priv *il);
223extern int il3945_hw_nic_init(struct il_priv *il); 222int il3945_hw_nic_init(struct il_priv *il);
224extern int il3945_hw_nic_stop_master(struct il_priv *il); 223int il3945_hw_nic_stop_master(struct il_priv *il);
225extern void il3945_hw_txq_ctx_free(struct il_priv *il); 224void il3945_hw_txq_ctx_free(struct il_priv *il);
226extern void il3945_hw_txq_ctx_stop(struct il_priv *il); 225void il3945_hw_txq_ctx_stop(struct il_priv *il);
227extern int il3945_hw_nic_reset(struct il_priv *il); 226int il3945_hw_nic_reset(struct il_priv *il);
228extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, 227int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
229 struct il_tx_queue *txq, 228 dma_addr_t addr, u16 len, u8 reset, u8 pad);
230 dma_addr_t addr, u16 len, u8 reset, 229void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
231 u8 pad); 230int il3945_hw_get_temperature(struct il_priv *il);
232extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq); 231int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
233extern int il3945_hw_get_temperature(struct il_priv *il); 232unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
234extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); 233 struct il3945_frame *frame, u8 rate);
235extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
236 struct il3945_frame *frame,
237 u8 rate);
238void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd, 234void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
239 struct ieee80211_tx_info *info, 235 struct ieee80211_tx_info *info,
240 struct ieee80211_hdr *hdr, int sta_id); 236 struct ieee80211_hdr *hdr, int sta_id);
241extern int il3945_hw_reg_send_txpower(struct il_priv *il); 237int il3945_hw_reg_send_txpower(struct il_priv *il);
242extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power); 238int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
243extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb); 239void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
244void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb); 240void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
245extern void il3945_disable_events(struct il_priv *il); 241void il3945_disable_events(struct il_priv *il);
246extern int il4965_get_temperature(const struct il_priv *il); 242int il4965_get_temperature(const struct il_priv *il);
247extern void il3945_post_associate(struct il_priv *il); 243void il3945_post_associate(struct il_priv *il);
248extern void il3945_config_ap(struct il_priv *il); 244void il3945_config_ap(struct il_priv *il);
249 245
250extern int il3945_commit_rxon(struct il_priv *il); 246int il3945_commit_rxon(struct il_priv *il);
251 247
252/** 248/**
253 * il3945_hw_find_station - Find station id for a given BSSID 249 * il3945_hw_find_station - Find station id for a given BSSID
@@ -257,14 +253,14 @@ extern int il3945_commit_rxon(struct il_priv *il);
257 * not yet been merged into a single common layer for managing the 253 * not yet been merged into a single common layer for managing the
258 * station tables. 254 * station tables.
259 */ 255 */
260extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid); 256u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
261 257
262extern __le32 il3945_get_antenna_flags(const struct il_priv *il); 258__le32 il3945_get_antenna_flags(const struct il_priv *il);
263extern int il3945_init_hw_rate_table(struct il_priv *il); 259int il3945_init_hw_rate_table(struct il_priv *il);
264extern void il3945_reg_txpower_periodic(struct il_priv *il); 260void il3945_reg_txpower_periodic(struct il_priv *il);
265extern int il3945_txpower_set_from_eeprom(struct il_priv *il); 261int il3945_txpower_set_from_eeprom(struct il_priv *il);
266 262
267extern int il3945_rs_next_rate(struct il_priv *il, int rate); 263int il3945_rs_next_rate(struct il_priv *il, int rate);
268 264
269/* scanning */ 265/* scanning */
270int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif); 266int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 5ab50a5b48b1..3982ab76f375 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6706,7 +6706,6 @@ out_free_eeprom:
6706out_iounmap: 6706out_iounmap:
6707 iounmap(il->hw_base); 6707 iounmap(il->hw_base);
6708out_pci_release_regions: 6708out_pci_release_regions:
6709 pci_set_drvdata(pdev, NULL);
6710 pci_release_regions(pdev); 6709 pci_release_regions(pdev);
6711out_pci_disable_device: 6710out_pci_disable_device:
6712 pci_disable_device(pdev); 6711 pci_disable_device(pdev);
@@ -6787,7 +6786,6 @@ il4965_pci_remove(struct pci_dev *pdev)
6787 iounmap(il->hw_base); 6786 iounmap(il->hw_base);
6788 pci_release_regions(pdev); 6787 pci_release_regions(pdev);
6789 pci_disable_device(pdev); 6788 pci_disable_device(pdev);
6790 pci_set_drvdata(pdev, NULL);
6791 6789
6792 il4965_uninit_drv(il); 6790 il4965_uninit_drv(il);
6793 6791
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1b15b0b2292b..337dfcf3bbde 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -272,7 +272,7 @@ il4965_hw_valid_rtc_data_addr(u32 addr)
272 ((t) < IL_TX_POWER_TEMPERATURE_MIN || \ 272 ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
273 (t) > IL_TX_POWER_TEMPERATURE_MAX) 273 (t) > IL_TX_POWER_TEMPERATURE_MAX)
274 274
275extern void il4965_temperature_calib(struct il_priv *il); 275void il4965_temperature_calib(struct il_priv *il);
276/********************* END TEMPERATURE ***************************************/ 276/********************* END TEMPERATURE ***************************************/
277 277
278/********************* START TXPOWER *****************************************/ 278/********************* START TXPOWER *****************************************/
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 83f8ed8a5528..ad123d66ab6c 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -858,9 +858,9 @@ struct il_hw_params {
858 * il4965_mac_ <-- mac80211 callback 858 * il4965_mac_ <-- mac80211 callback
859 * 859 *
860 ****************************************************************************/ 860 ****************************************************************************/
861extern void il4965_update_chain_flags(struct il_priv *il); 861void il4965_update_chain_flags(struct il_priv *il);
862extern const u8 il_bcast_addr[ETH_ALEN]; 862extern const u8 il_bcast_addr[ETH_ALEN];
863extern int il_queue_space(const struct il_queue *q); 863int il_queue_space(const struct il_queue *q);
864static inline int 864static inline int
865il_queue_used(const struct il_queue *q, int i) 865il_queue_used(const struct il_queue *q, int i)
866{ 866{
@@ -1727,7 +1727,7 @@ int il_alloc_txq_mem(struct il_priv *il);
1727void il_free_txq_mem(struct il_priv *il); 1727void il_free_txq_mem(struct il_priv *il);
1728 1728
1729#ifdef CONFIG_IWLEGACY_DEBUGFS 1729#ifdef CONFIG_IWLEGACY_DEBUGFS
1730extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len); 1730void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
1731#else 1731#else
1732static inline void 1732static inline void
1733il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len) 1733il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
@@ -1760,12 +1760,12 @@ void il_chswitch_done(struct il_priv *il, bool is_success);
1760/***************************************************** 1760/*****************************************************
1761* TX 1761* TX
1762******************************************************/ 1762******************************************************/
1763extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq); 1763void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
1764extern int il_tx_queue_init(struct il_priv *il, u32 txq_id); 1764int il_tx_queue_init(struct il_priv *il, u32 txq_id);
1765extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id); 1765void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
1766extern void il_tx_queue_unmap(struct il_priv *il, int txq_id); 1766void il_tx_queue_unmap(struct il_priv *il, int txq_id);
1767extern void il_tx_queue_free(struct il_priv *il, int txq_id); 1767void il_tx_queue_free(struct il_priv *il, int txq_id);
1768extern void il_setup_watchdog(struct il_priv *il); 1768void il_setup_watchdog(struct il_priv *il);
1769/***************************************************** 1769/*****************************************************
1770 * TX power 1770 * TX power
1771 ****************************************************/ 1771 ****************************************************/
@@ -1931,10 +1931,10 @@ il_is_ready_rf(struct il_priv *il)
1931 return il_is_ready(il); 1931 return il_is_ready(il);
1932} 1932}
1933 1933
1934extern void il_send_bt_config(struct il_priv *il); 1934void il_send_bt_config(struct il_priv *il);
1935extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear); 1935int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
1936extern void il_apm_stop(struct il_priv *il); 1936void il_apm_stop(struct il_priv *il);
1937extern void _il_apm_stop(struct il_priv *il); 1937void _il_apm_stop(struct il_priv *il);
1938 1938
1939int il_apm_init(struct il_priv *il); 1939int il_apm_init(struct il_priv *il);
1940 1940
@@ -1968,15 +1968,15 @@ void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
1968 1968
1969irqreturn_t il_isr(int irq, void *data); 1969irqreturn_t il_isr(int irq, void *data);
1970 1970
1971extern void il_set_bit(struct il_priv *p, u32 r, u32 m); 1971void il_set_bit(struct il_priv *p, u32 r, u32 m);
1972extern void il_clear_bit(struct il_priv *p, u32 r, u32 m); 1972void il_clear_bit(struct il_priv *p, u32 r, u32 m);
1973extern bool _il_grab_nic_access(struct il_priv *il); 1973bool _il_grab_nic_access(struct il_priv *il);
1974extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout); 1974int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
1975extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout); 1975int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
1976extern u32 il_rd_prph(struct il_priv *il, u32 reg); 1976u32 il_rd_prph(struct il_priv *il, u32 reg);
1977extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val); 1977void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
1978extern u32 il_read_targ_mem(struct il_priv *il, u32 addr); 1978u32 il_read_targ_mem(struct il_priv *il, u32 addr);
1979extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val); 1979void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
1980 1980
1981static inline void 1981static inline void
1982_il_write8(struct il_priv *il, u32 ofs, u8 val) 1982_il_write8(struct il_priv *il, u32 ofs, u8 val)
@@ -2868,13 +2868,13 @@ il4965_first_antenna(u8 mask)
2868 * The specific throughput table used is based on the type of network 2868 * The specific throughput table used is based on the type of network
2869 * the associated with, including A, B, G, and G w/ TGG protection 2869 * the associated with, including A, B, G, and G w/ TGG protection
2870 */ 2870 */
2871extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); 2871void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
2872 2872
2873/* Initialize station's rate scaling information after adding station */ 2873/* Initialize station's rate scaling information after adding station */
2874extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, 2874void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
2875 u8 sta_id); 2875 u8 sta_id);
2876extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, 2876void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
2877 u8 sta_id); 2877 u8 sta_id);
2878 2878
2879/** 2879/**
2880 * il_rate_control_register - Register the rate control algorithm callbacks 2880 * il_rate_control_register - Register the rate control algorithm callbacks
@@ -2886,8 +2886,8 @@ extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
2886 * ieee80211_register_hw 2886 * ieee80211_register_hw
2887 * 2887 *
2888 */ 2888 */
2889extern int il4965_rate_control_register(void); 2889int il4965_rate_control_register(void);
2890extern int il3945_rate_control_register(void); 2890int il3945_rate_control_register(void);
2891 2891
2892/** 2892/**
2893 * il_rate_control_unregister - Unregister the rate control callbacks 2893 * il_rate_control_unregister - Unregister the rate control callbacks
@@ -2895,11 +2895,11 @@ extern int il3945_rate_control_register(void);
2895 * This should be called after calling ieee80211_unregister_hw, but before 2895 * This should be called after calling ieee80211_unregister_hw, but before
2896 * the driver is unloaded. 2896 * the driver is unloaded.
2897 */ 2897 */
2898extern void il4965_rate_control_unregister(void); 2898void il4965_rate_control_unregister(void);
2899extern void il3945_rate_control_unregister(void); 2899void il3945_rate_control_unregister(void);
2900 2900
2901extern int il_power_update_mode(struct il_priv *il, bool force); 2901int il_power_update_mode(struct il_priv *il, bool force);
2902extern void il_power_initialize(struct il_priv *il); 2902void il_power_initialize(struct il_priv *il);
2903 2903
2904extern u32 il_debug_level; 2904extern u32 il_debug_level;
2905 2905
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index f2a86ffc3b4c..23d5f0275ce9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -397,7 +397,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
397 return cpu_to_le32(flags|(u32)rate); 397 return cpu_to_le32(flags|(u32)rate);
398} 398}
399 399
400extern int iwl_alive_start(struct iwl_priv *priv); 400int iwl_alive_start(struct iwl_priv *priv);
401 401
402#ifdef CONFIG_IWLWIFI_DEBUG 402#ifdef CONFIG_IWLWIFI_DEBUG
403void iwl_print_rx_config_cmd(struct iwl_priv *priv, 403void iwl_print_rx_config_cmd(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a79fdd137f95..7434d9edf3b7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -270,7 +270,7 @@ struct iwl_sensitivity_ranges {
270 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) 270 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
271 * 271 *
272 ****************************************************************************/ 272 ****************************************************************************/
273extern void iwl_update_chain_flags(struct iwl_priv *priv); 273void iwl_update_chain_flags(struct iwl_priv *priv);
274extern const u8 iwl_bcast_addr[ETH_ALEN]; 274extern const u8 iwl_bcast_addr[ETH_ALEN];
275 275
276#define IWL_OPERATION_MODE_AUTO 0 276#define IWL_OPERATION_MODE_AUTO 0
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 5d83cab22d62..26fc550cd68c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -407,8 +407,8 @@ static inline u8 first_antenna(u8 mask)
407 407
408 408
409/* Initialize station's rate scaling information after adding station */ 409/* Initialize station's rate scaling information after adding station */
410extern void iwl_rs_rate_init(struct iwl_priv *priv, 410void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
411 struct ieee80211_sta *sta, u8 sta_id); 411 u8 sta_id);
412 412
413/** 413/**
414 * iwl_rate_control_register - Register the rate control algorithm callbacks 414 * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -420,7 +420,7 @@ extern void iwl_rs_rate_init(struct iwl_priv *priv,
420 * ieee80211_register_hw 420 * ieee80211_register_hw
421 * 421 *
422 */ 422 */
423extern int iwlagn_rate_control_register(void); 423int iwlagn_rate_control_register(void);
424 424
425/** 425/**
426 * iwl_rate_control_unregister - Unregister the rate control callbacks 426 * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -428,6 +428,6 @@ extern int iwlagn_rate_control_register(void);
428 * This should be called after calling ieee80211_unregister_hw, but before 428 * This should be called after calling ieee80211_unregister_hw, but before
429 * the driver is unloaded. 429 * the driver is unloaded.
430 */ 430 */
431extern void iwlagn_rate_control_unregister(void); 431void iwlagn_rate_control_unregister(void);
432 432
433#endif /* __iwl_agn__rs__ */ 433#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 335cf1682902..465d40ee176f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -314,9 +314,8 @@ static inline u8 num_of_ant(u8 mask)
314} 314}
315 315
316/* Initialize station's rate scaling information after adding station */ 316/* Initialize station's rate scaling information after adding station */
317extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, 317void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
318 struct ieee80211_sta *sta, 318 enum ieee80211_band band);
319 enum ieee80211_band band);
320 319
321/** 320/**
322 * iwl_rate_control_register - Register the rate control algorithm callbacks 321 * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -328,7 +327,7 @@ extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
328 * ieee80211_register_hw 327 * ieee80211_register_hw
329 * 328 *
330 */ 329 */
331extern int iwl_mvm_rate_control_register(void); 330int iwl_mvm_rate_control_register(void);
332 331
333/** 332/**
334 * iwl_rate_control_unregister - Unregister the rate control callbacks 333 * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -336,7 +335,7 @@ extern int iwl_mvm_rate_control_register(void);
336 * This should be called after calling ieee80211_unregister_hw, but before 335 * This should be called after calling ieee80211_unregister_hw, but before
337 * the driver is unloaded. 336 * the driver is unloaded.
338 */ 337 */
339extern void iwl_mvm_rate_control_unregister(void); 338void iwl_mvm_rate_control_unregister(void);
340 339
341struct iwl_mvm_sta; 340struct iwl_mvm_sta;
342 341
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 4bb6574f4073..5d39ec880d84 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1128,7 +1128,7 @@ static int if_spi_probe(struct spi_device *spi)
1128{ 1128{
1129 struct if_spi_card *card; 1129 struct if_spi_card *card;
1130 struct lbs_private *priv = NULL; 1130 struct lbs_private *priv = NULL;
1131 struct libertas_spi_platform_data *pdata = spi->dev.platform_data; 1131 struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
1132 int err = 0; 1132 int err = 0;
1133 1133
1134 lbs_deb_enter(LBS_DEB_SPI); 1134 lbs_deb_enter(LBS_DEB_SPI);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index a6c46f3b6e3a..e47f4e3012b8 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1048,7 +1048,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
1048 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; 1048 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
1049 unsigned long cmd_flags; 1049 unsigned long cmd_flags;
1050 unsigned long scan_pending_q_flags; 1050 unsigned long scan_pending_q_flags;
1051 uint16_t cancel_scan_cmd = false; 1051 bool cancel_scan_cmd = false;
1052 1052
1053 if ((adapter->curr_cmd) && 1053 if ((adapter->curr_cmd) &&
1054 (adapter->curr_cmd->wait_q_enabled)) { 1054 (adapter->curr_cmd->wait_q_enabled)) {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 37f873bb342f..4e4686e6ac09 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -621,7 +621,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
621 int ret = 0; 621 int ret = 0;
622 struct ieee_types_assoc_rsp *assoc_rsp; 622 struct ieee_types_assoc_rsp *assoc_rsp;
623 struct mwifiex_bssdescriptor *bss_desc; 623 struct mwifiex_bssdescriptor *bss_desc;
624 u8 enable_data = true; 624 bool enable_data = true;
625 u16 cap_info, status_code; 625 u16 cap_info, status_code;
626 626
627 assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params; 627 assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index c2b91f566e05..9d7c9d354d34 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -882,7 +882,9 @@ mwifiex_add_card(void *card, struct semaphore *sem,
882 adapter->cmd_wait_q.status = 0; 882 adapter->cmd_wait_q.status = 0;
883 adapter->scan_wait_q_woken = false; 883 adapter->scan_wait_q_woken = false;
884 884
885 adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE"); 885 adapter->workqueue =
886 alloc_workqueue("MWIFIEX_WORK_QUEUE",
887 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
886 if (!adapter->workqueue) 888 if (!adapter->workqueue)
887 goto err_kmalloc; 889 goto err_kmalloc;
888 890
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 52da8ee7599a..33fa9432b241 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -93,7 +93,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
93 struct pci_dev *pdev = to_pci_dev(dev); 93 struct pci_dev *pdev = to_pci_dev(dev);
94 94
95 if (pdev) { 95 if (pdev) {
96 card = (struct pcie_service_card *) pci_get_drvdata(pdev); 96 card = pci_get_drvdata(pdev);
97 if (!card || !card->adapter) { 97 if (!card || !card->adapter) {
98 pr_err("Card or adapter structure is not valid\n"); 98 pr_err("Card or adapter structure is not valid\n");
99 return 0; 99 return 0;
@@ -128,7 +128,7 @@ static int mwifiex_pcie_resume(struct device *dev)
128 struct pci_dev *pdev = to_pci_dev(dev); 128 struct pci_dev *pdev = to_pci_dev(dev);
129 129
130 if (pdev) { 130 if (pdev) {
131 card = (struct pcie_service_card *) pci_get_drvdata(pdev); 131 card = pci_get_drvdata(pdev);
132 if (!card || !card->adapter) { 132 if (!card || !card->adapter) {
133 pr_err("Card or adapter structure is not valid\n"); 133 pr_err("Card or adapter structure is not valid\n");
134 return 0; 134 return 0;
@@ -2037,7 +2037,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
2037 goto exit; 2037 goto exit;
2038 } 2038 }
2039 2039
2040 card = (struct pcie_service_card *) pci_get_drvdata(pdev); 2040 card = pci_get_drvdata(pdev);
2041 if (!card || !card->adapter) { 2041 if (!card || !card->adapter) {
2042 pr_debug("info: %s: card=%p adapter=%p\n", __func__, card, 2042 pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
2043 card ? card->adapter : NULL); 2043 card ? card->adapter : NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c0268b597748..7d66018a2e33 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -327,7 +327,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
327{ 327{
328 struct mwifiex_adapter *adapter = priv->adapter; 328 struct mwifiex_adapter *adapter = priv->adapter;
329 struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg; 329 struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
330 u16 hs_activate = false; 330 bool hs_activate = false;
331 331
332 if (!hscfg_param) 332 if (!hscfg_param)
333 /* New Activate command */ 333 /* New Activate command */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 95fa3599b407..5dd0ccc70b86 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -708,7 +708,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
708{ 708{
709 u8 *curr = (u8 *) &resp->params.get_wmm_status; 709 u8 *curr = (u8 *) &resp->params.get_wmm_status;
710 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len; 710 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
711 int valid = true; 711 bool valid = true;
712 712
713 struct mwifiex_ie_types_data *tlv_hdr; 713 struct mwifiex_ie_types_data *tlv_hdr;
714 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus; 714 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 644d6e0c51cc..0f129d498fb1 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -83,11 +83,10 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
83} 83}
84 84
85void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, 85void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
86 struct sk_buff *skb); 86 struct sk_buff *skb);
87void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra); 87void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
88void mwifiex_rotate_priolists(struct mwifiex_private *priv, 88void mwifiex_rotate_priolists(struct mwifiex_private *priv,
89 struct mwifiex_ra_list_tbl *ra, 89 struct mwifiex_ra_list_tbl *ra, int tid);
90 int tid);
91 90
92int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter); 91int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
93void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter); 92void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
@@ -95,21 +94,18 @@ int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
95 struct mwifiex_ra_list_tbl *ra_list, int tid); 94 struct mwifiex_ra_list_tbl *ra_list, int tid);
96 95
97u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv, 96u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
98 const struct sk_buff *skb); 97 const struct sk_buff *skb);
99void mwifiex_wmm_init(struct mwifiex_adapter *adapter); 98void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
100 99
101extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv, 100u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
102 u8 **assoc_buf, 101 u8 **assoc_buf,
103 struct ieee_types_wmm_parameter 102 struct ieee_types_wmm_parameter *wmmie,
104 *wmmie, 103 struct ieee80211_ht_cap *htcap);
105 struct ieee80211_ht_cap
106 *htcap);
107 104
108void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv, 105void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
109 struct ieee_types_wmm_parameter 106 struct ieee_types_wmm_parameter *wmm_ie);
110 *wmm_ie);
111void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv); 107void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
112extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, 108int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
113 const struct host_cmd_ds_command *resp); 109 const struct host_cmd_ds_command *resp);
114 110
115#endif /* !_MWIFIEX_WMM_H_ */ 111#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a3707fd4ef62..b953ad621e0b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -6093,7 +6093,6 @@ err_iounmap:
6093 if (priv->sram != NULL) 6093 if (priv->sram != NULL)
6094 pci_iounmap(pdev, priv->sram); 6094 pci_iounmap(pdev, priv->sram);
6095 6095
6096 pci_set_drvdata(pdev, NULL);
6097 ieee80211_free_hw(hw); 6096 ieee80211_free_hw(hw);
6098 6097
6099err_free_reg: 6098err_free_reg:
@@ -6147,7 +6146,6 @@ static void mwl8k_remove(struct pci_dev *pdev)
6147unmap: 6146unmap:
6148 pci_iounmap(pdev, priv->regs); 6147 pci_iounmap(pdev, priv->regs);
6149 pci_iounmap(pdev, priv->sram); 6148 pci_iounmap(pdev, priv->sram);
6150 pci_set_drvdata(pdev, NULL);
6151 ieee80211_free_hw(hw); 6149 ieee80211_free_hw(hw);
6152 pci_release_regions(pdev); 6150 pci_release_regions(pdev);
6153 pci_disable_device(pdev); 6151 pci_disable_device(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 3bb936b9558c..eebd2be21ee9 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -182,23 +182,20 @@ extern int orinoco_debug;
182/* Exported prototypes */ 182/* Exported prototypes */
183/********************************************************************/ 183/********************************************************************/
184 184
185extern struct orinoco_private *alloc_orinocodev( 185struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
186 int sizeof_card, struct device *device, 186 int (*hard_reset)(struct orinoco_private *),
187 int (*hard_reset)(struct orinoco_private *), 187 int (*stop_fw)(struct orinoco_private *, int));
188 int (*stop_fw)(struct orinoco_private *, int)); 188void free_orinocodev(struct orinoco_private *priv);
189extern void free_orinocodev(struct orinoco_private *priv); 189int orinoco_init(struct orinoco_private *priv);
190extern int orinoco_init(struct orinoco_private *priv); 190int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
191extern int orinoco_if_add(struct orinoco_private *priv, 191 unsigned int irq, const struct net_device_ops *ops);
192 unsigned long base_addr, 192void orinoco_if_del(struct orinoco_private *priv);
193 unsigned int irq, 193int orinoco_up(struct orinoco_private *priv);
194 const struct net_device_ops *ops); 194void orinoco_down(struct orinoco_private *priv);
195extern void orinoco_if_del(struct orinoco_private *priv); 195irqreturn_t orinoco_interrupt(int irq, void *dev_id);
196extern int orinoco_up(struct orinoco_private *priv); 196
197extern void orinoco_down(struct orinoco_private *priv); 197void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
198extern irqreturn_t orinoco_interrupt(int irq, void *dev_id); 198void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
199
200extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
201extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
202 199
203int orinoco_process_xmit_skb(struct sk_buff *skb, 200int orinoco_process_xmit_skb(struct sk_buff *skb,
204 struct net_device *dev, 201 struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index d73fdf6185a2..ffb2469eb679 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -234,7 +234,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
234 free_irq(pdev->irq, priv); 234 free_irq(pdev->irq, priv);
235 235
236 fail_irq: 236 fail_irq:
237 pci_set_drvdata(pdev, NULL);
238 free_orinocodev(priv); 237 free_orinocodev(priv);
239 238
240 fail_alloc: 239 fail_alloc:
@@ -265,7 +264,6 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
265 264
266 orinoco_if_del(priv); 265 orinoco_if_del(priv);
267 free_irq(pdev->irq, priv); 266 free_irq(pdev->irq, priv);
268 pci_set_drvdata(pdev, NULL);
269 free_orinocodev(priv); 267 free_orinocodev(priv);
270 pci_iounmap(pdev, priv->hw.iobase); 268 pci_iounmap(pdev, priv->hw.iobase);
271 pci_iounmap(pdev, card->attr_io); 269 pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 677bf14eca84..5ae1191d2532 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -184,7 +184,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
184 free_irq(pdev->irq, priv); 184 free_irq(pdev->irq, priv);
185 185
186 fail_irq: 186 fail_irq:
187 pci_set_drvdata(pdev, NULL);
188 free_orinocodev(priv); 187 free_orinocodev(priv);
189 188
190 fail_alloc: 189 fail_alloc:
@@ -205,7 +204,6 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
205 204
206 orinoco_if_del(priv); 205 orinoco_if_del(priv);
207 free_irq(pdev->irq, priv); 206 free_irq(pdev->irq, priv);
208 pci_set_drvdata(pdev, NULL);
209 free_orinocodev(priv); 207 free_orinocodev(priv);
210 pci_iounmap(pdev, priv->hw.iobase); 208 pci_iounmap(pdev, priv->hw.iobase);
211 pci_release_regions(pdev); 209 pci_release_regions(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 2559dbd6184b..bbd36d1676ff 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -273,7 +273,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
273 free_irq(pdev->irq, priv); 273 free_irq(pdev->irq, priv);
274 274
275 fail_irq: 275 fail_irq:
276 pci_set_drvdata(pdev, NULL);
277 free_orinocodev(priv); 276 free_orinocodev(priv);
278 277
279 fail_alloc: 278 fail_alloc:
@@ -301,7 +300,6 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
301 300
302 orinoco_if_del(priv); 301 orinoco_if_del(priv);
303 free_irq(pdev->irq, priv); 302 free_irq(pdev->irq, priv);
304 pci_set_drvdata(pdev, NULL);
305 free_orinocodev(priv); 303 free_orinocodev(priv);
306 pci_iounmap(pdev, priv->hw.iobase); 304 pci_iounmap(pdev, priv->hw.iobase);
307 pci_iounmap(pdev, card->attr_io); 305 pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 42afeeea2c40..04b08de5fd5d 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -170,7 +170,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
170 free_irq(pdev->irq, priv); 170 free_irq(pdev->irq, priv);
171 171
172 fail_irq: 172 fail_irq:
173 pci_set_drvdata(pdev, NULL);
174 free_orinocodev(priv); 173 free_orinocodev(priv);
175 174
176 fail_alloc: 175 fail_alloc:
@@ -195,7 +194,6 @@ static void orinoco_tmd_remove_one(struct pci_dev *pdev)
195 194
196 orinoco_if_del(priv); 195 orinoco_if_del(priv);
197 free_irq(pdev->irq, priv); 196 free_irq(pdev->irq, priv);
198 pci_set_drvdata(pdev, NULL);
199 free_orinocodev(priv); 197 free_orinocodev(priv);
200 pci_iounmap(pdev, priv->hw.iobase); 198 pci_iounmap(pdev, priv->hw.iobase);
201 pci_iounmap(pdev, card->bridge_io); 199 pci_iounmap(pdev, card->bridge_io);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 57e3af8ebb4b..f9a07b0d83ac 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -631,7 +631,6 @@ static int p54p_probe(struct pci_dev *pdev,
631 iounmap(priv->map); 631 iounmap(priv->map);
632 632
633 err_free_dev: 633 err_free_dev:
634 pci_set_drvdata(pdev, NULL);
635 p54_free_common(dev); 634 p54_free_common(dev);
636 635
637 err_free_reg: 636 err_free_reg:
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7fc46f26cf2b..de15171e2cd8 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -636,7 +636,7 @@ static int p54spi_probe(struct spi_device *spi)
636 gpio_direction_input(p54spi_gpio_irq); 636 gpio_direction_input(p54spi_gpio_irq);
637 637
638 ret = request_irq(gpio_to_irq(p54spi_gpio_irq), 638 ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
639 p54spi_interrupt, IRQF_DISABLED, "p54spi", 639 p54spi_interrupt, 0, "p54spi",
640 priv->spi); 640 priv->spi);
641 if (ret < 0) { 641 if (ret < 0) {
642 dev_err(&priv->spi->dev, "request_irq() failed"); 642 dev_err(&priv->spi->dev, "request_irq() failed");
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 1c22b81e6ef3..8863a6cb2388 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -183,7 +183,7 @@ prism54_update_stats(struct work_struct *work)
183 data = r.ptr; 183 data = r.ptr;
184 184
185 /* copy this MAC to the bss */ 185 /* copy this MAC to the bss */
186 memcpy(bss.address, data, 6); 186 memcpy(bss.address, data, ETH_ALEN);
187 kfree(data); 187 kfree(data);
188 188
189 /* now ask for the corresponding bss */ 189 /* now ask for the corresponding bss */
@@ -531,7 +531,7 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
531 return -EINVAL; 531 return -EINVAL;
532 532
533 /* prepare the structure for the set object */ 533 /* prepare the structure for the set object */
534 memcpy(&bssid[0], awrq->sa_data, 6); 534 memcpy(&bssid[0], awrq->sa_data, ETH_ALEN);
535 535
536 /* set the bssid -- does this make sense when in AP mode? */ 536 /* set the bssid -- does this make sense when in AP mode? */
537 rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid); 537 rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
@@ -550,7 +550,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
550 int rvalue; 550 int rvalue;
551 551
552 rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r); 552 rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
553 memcpy(awrq->sa_data, r.ptr, 6); 553 memcpy(awrq->sa_data, r.ptr, ETH_ALEN);
554 awrq->sa_family = ARPHRD_ETHER; 554 awrq->sa_family = ARPHRD_ETHER;
555 kfree(r.ptr); 555 kfree(r.ptr);
556 556
@@ -582,7 +582,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
582 size_t wpa_ie_len; 582 size_t wpa_ie_len;
583 583
584 /* The first entry must be the MAC address */ 584 /* The first entry must be the MAC address */
585 memcpy(iwe.u.ap_addr.sa_data, bss->address, 6); 585 memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN);
586 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 586 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
587 iwe.cmd = SIOCGIWAP; 587 iwe.cmd = SIOCGIWAP;
588 current_ev = iwe_stream_add_event(info, current_ev, end_buf, 588 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
@@ -2489,7 +2489,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
2489 &((struct sockaddr *) addr)->sa_data); 2489 &((struct sockaddr *) addr)->sa_data);
2490 if (!ret) 2490 if (!ret)
2491 memcpy(priv->ndev->dev_addr, 2491 memcpy(priv->ndev->dev_addr,
2492 &((struct sockaddr *) addr)->sa_data, 6); 2492 &((struct sockaddr *) addr)->sa_data, ETH_ALEN);
2493 2493
2494 return ret; 2494 return ret;
2495} 2495}
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5970ff6f40cc..41a16d30c79c 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -837,7 +837,7 @@ islpci_setup(struct pci_dev *pdev)
837 /* ndev->set_multicast_list = &islpci_set_multicast_list; */ 837 /* ndev->set_multicast_list = &islpci_set_multicast_list; */
838 ndev->addr_len = ETH_ALEN; 838 ndev->addr_len = ETH_ALEN;
839 /* Get a non-zero dummy MAC address for nameif. Jean II */ 839 /* Get a non-zero dummy MAC address for nameif. Jean II */
840 memcpy(ndev->dev_addr, dummy_mac, 6); 840 memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
841 841
842 ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT; 842 ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
843 843
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index a01606b36e03..056af38e72e3 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -682,7 +682,7 @@ mgt_update_addr(islpci_private *priv)
682 isl_oid[GEN_OID_MACADDRESS].size, &res); 682 isl_oid[GEN_OID_MACADDRESS].size, &res);
683 683
684 if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR)) 684 if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
685 memcpy(priv->ndev->dev_addr, res->data, 6); 685 memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
686 else 686 else
687 ret = -EIO; 687 ret = -EIO;
688 if (res) 688 if (res)
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 68dbbb9c6d12..a18b0051a745 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -219,6 +219,7 @@ config RT2X00_LIB_USB
219 219
220config RT2X00_LIB 220config RT2X00_LIB
221 tristate 221 tristate
222 select AVERAGE
222 223
223config RT2X00_LIB_FIRMWARE 224config RT2X00_LIB_FIRMWARE
224 boolean 225 boolean
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index fa33b5edf931..aab6b5e4f5dd 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -52,6 +52,7 @@
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) 52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) 53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
54 * RF5592 2.4G/5G 2T2R 54 * RF5592 2.4G/5G 2T2R
55 * RF3070 2.4G 1T1R
55 * RF5360 2.4G 1T1R 56 * RF5360 2.4G 1T1R
56 * RF5370 2.4G 1T1R 57 * RF5370 2.4G 1T1R
57 * RF5390 2.4G 1T1R 58 * RF5390 2.4G 1T1R
@@ -70,6 +71,7 @@
70#define RF3322 0x000c 71#define RF3322 0x000c
71#define RF3053 0x000d 72#define RF3053 0x000d
72#define RF5592 0x000f 73#define RF5592 0x000f
74#define RF3070 0x3070
73#define RF3290 0x3290 75#define RF3290 0x3290
74#define RF5360 0x5360 76#define RF5360 0x5360
75#define RF5370 0x5370 77#define RF5370 0x5370
@@ -122,7 +124,7 @@
122/* 124/*
123 * MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number. 125 * MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
124 */ 126 */
125#define MAC_CSR0_3290 0x0000 127#define MAC_CSR0_3290 0x0000
126 128
127/* 129/*
128 * E2PROM_CSR: PCI EEPROM control register. 130 * E2PROM_CSR: PCI EEPROM control register.
@@ -211,17 +213,17 @@
211/* 213/*
212 * COEX_CFG_0 214 * COEX_CFG_0
213 */ 215 */
214#define COEX_CFG0 0x0040 216#define COEX_CFG0 0x0040
215#define COEX_CFG_ANT FIELD32(0xff000000) 217#define COEX_CFG_ANT FIELD32(0xff000000)
216/* 218/*
217 * COEX_CFG_1 219 * COEX_CFG_1
218 */ 220 */
219#define COEX_CFG1 0x0044 221#define COEX_CFG1 0x0044
220 222
221/* 223/*
222 * COEX_CFG_2 224 * COEX_CFG_2
223 */ 225 */
224#define COEX_CFG2 0x0048 226#define COEX_CFG2 0x0048
225#define BT_COEX_CFG1 FIELD32(0xff000000) 227#define BT_COEX_CFG1 FIELD32(0xff000000)
226#define BT_COEX_CFG0 FIELD32(0x00ff0000) 228#define BT_COEX_CFG0 FIELD32(0x00ff0000)
227#define WL_COEX_CFG1 FIELD32(0x0000ff00) 229#define WL_COEX_CFG1 FIELD32(0x0000ff00)
@@ -235,8 +237,8 @@
235#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00) 237#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
236#define PLL_CONTROL FIELD32(0x00070000) 238#define PLL_CONTROL FIELD32(0x00070000)
237#define PLL_LPF_R1 FIELD32(0x00080000) 239#define PLL_LPF_R1 FIELD32(0x00080000)
238#define PLL_LPF_C1_CTRL FIELD32(0x00300000) 240#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
239#define PLL_LPF_C2_CTRL FIELD32(0x00c00000) 241#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
240#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000) 242#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
241#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000) 243#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
242#define PLL_LOCK_CTRL FIELD32(0x70000000) 244#define PLL_LOCK_CTRL FIELD32(0x70000000)
@@ -2164,7 +2166,7 @@ struct mac_iveiv_entry {
2164 */ 2166 */
2165#define RFCSR6_R1 FIELD8(0x03) 2167#define RFCSR6_R1 FIELD8(0x03)
2166#define RFCSR6_R2 FIELD8(0x40) 2168#define RFCSR6_R2 FIELD8(0x40)
2167#define RFCSR6_TXDIV FIELD8(0x0c) 2169#define RFCSR6_TXDIV FIELD8(0x0c)
2168/* bits for RF3053 */ 2170/* bits for RF3053 */
2169#define RFCSR6_VCO_IC FIELD8(0xc0) 2171#define RFCSR6_VCO_IC FIELD8(0xc0)
2170 2172
@@ -2202,13 +2204,13 @@ struct mac_iveiv_entry {
2202 * RFCSR 12: 2204 * RFCSR 12:
2203 */ 2205 */
2204#define RFCSR12_TX_POWER FIELD8(0x1f) 2206#define RFCSR12_TX_POWER FIELD8(0x1f)
2205#define RFCSR12_DR0 FIELD8(0xe0) 2207#define RFCSR12_DR0 FIELD8(0xe0)
2206 2208
2207/* 2209/*
2208 * RFCSR 13: 2210 * RFCSR 13:
2209 */ 2211 */
2210#define RFCSR13_TX_POWER FIELD8(0x1f) 2212#define RFCSR13_TX_POWER FIELD8(0x1f)
2211#define RFCSR13_DR0 FIELD8(0xe0) 2213#define RFCSR13_DR0 FIELD8(0xe0)
2212 2214
2213/* 2215/*
2214 * RFCSR 15: 2216 * RFCSR 15:
@@ -2226,7 +2228,7 @@ struct mac_iveiv_entry {
2226#define RFCSR17_TXMIXER_GAIN FIELD8(0x07) 2228#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
2227#define RFCSR17_TX_LO1_EN FIELD8(0x08) 2229#define RFCSR17_TX_LO1_EN FIELD8(0x08)
2228#define RFCSR17_R FIELD8(0x20) 2230#define RFCSR17_R FIELD8(0x20)
2229#define RFCSR17_CODE FIELD8(0x7f) 2231#define RFCSR17_CODE FIELD8(0x7f)
2230 2232
2231/* RFCSR 18 */ 2233/* RFCSR 18 */
2232#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40) 2234#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
@@ -2449,7 +2451,7 @@ enum rt2800_eeprom_word {
2449 */ 2451 */
2450#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f) 2452#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
2451#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0) 2453#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
2452#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00) 2454#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
2453 2455
2454/* 2456/*
2455 * EEPROM NIC Configuration 1 2457 * EEPROM NIC Configuration 1
@@ -2471,18 +2473,18 @@ enum rt2800_eeprom_word {
2471 * DAC_TEST: 0: disable, 1: enable 2473 * DAC_TEST: 0: disable, 1: enable
2472 */ 2474 */
2473#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001) 2475#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
2474#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002) 2476#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
2475#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004) 2477#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
2476#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008) 2478#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
2477#define EEPROM_NIC_CONF1_CARDBUS_ACCEL FIELD16(0x0010) 2479#define EEPROM_NIC_CONF1_CARDBUS_ACCEL FIELD16(0x0010)
2478#define EEPROM_NIC_CONF1_BW40M_SB_2G FIELD16(0x0020) 2480#define EEPROM_NIC_CONF1_BW40M_SB_2G FIELD16(0x0020)
2479#define EEPROM_NIC_CONF1_BW40M_SB_5G FIELD16(0x0040) 2481#define EEPROM_NIC_CONF1_BW40M_SB_5G FIELD16(0x0040)
2480#define EEPROM_NIC_CONF1_WPS_PBC FIELD16(0x0080) 2482#define EEPROM_NIC_CONF1_WPS_PBC FIELD16(0x0080)
2481#define EEPROM_NIC_CONF1_BW40M_2G FIELD16(0x0100) 2483#define EEPROM_NIC_CONF1_BW40M_2G FIELD16(0x0100)
2482#define EEPROM_NIC_CONF1_BW40M_5G FIELD16(0x0200) 2484#define EEPROM_NIC_CONF1_BW40M_5G FIELD16(0x0200)
2483#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400) 2485#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
2484#define EEPROM_NIC_CONF1_ANT_DIVERSITY FIELD16(0x1800) 2486#define EEPROM_NIC_CONF1_ANT_DIVERSITY FIELD16(0x1800)
2485#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000) 2487#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
2486#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000) 2488#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000)
2487#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000) 2489#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000)
2488 2490
@@ -2521,9 +2523,9 @@ enum rt2800_eeprom_word {
2521 * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream 2523 * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
2522 * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved 2524 * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
2523 */ 2525 */
2524#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f) 2526#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
2525#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0) 2527#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
2526#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600) 2528#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
2527 2529
2528/* 2530/*
2529 * EEPROM LNA 2531 * EEPROM LNA
@@ -2790,7 +2792,7 @@ enum rt2800_eeprom_word {
2790#define MCU_CURRENT 0x36 2792#define MCU_CURRENT 0x36
2791#define MCU_LED 0x50 2793#define MCU_LED 0x50
2792#define MCU_LED_STRENGTH 0x51 2794#define MCU_LED_STRENGTH 0x51
2793#define MCU_LED_AG_CONF 0x52 2795#define MCU_LED_AG_CONF 0x52
2794#define MCU_LED_ACT_CONF 0x53 2796#define MCU_LED_ACT_CONF 0x53
2795#define MCU_LED_LED_POLARITY 0x54 2797#define MCU_LED_LED_POLARITY 0x54
2796#define MCU_RADAR 0x60 2798#define MCU_RADAR 0x60
@@ -2799,7 +2801,7 @@ enum rt2800_eeprom_word {
2799#define MCU_FREQ_OFFSET 0x74 2801#define MCU_FREQ_OFFSET 0x74
2800#define MCU_BBP_SIGNAL 0x80 2802#define MCU_BBP_SIGNAL 0x80
2801#define MCU_POWER_SAVE 0x83 2803#define MCU_POWER_SAVE 0x83
2802#define MCU_BAND_SELECT 0x91 2804#define MCU_BAND_SELECT 0x91
2803 2805
2804/* 2806/*
2805 * MCU mailbox tokens 2807 * MCU mailbox tokens
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88ce656f96cd..aa8789423937 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -278,12 +278,9 @@ static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
278 [EEPROM_LNA] = 0x0026, 278 [EEPROM_LNA] = 0x0026,
279 [EEPROM_EXT_LNA2] = 0x0027, 279 [EEPROM_EXT_LNA2] = 0x0027,
280 [EEPROM_RSSI_BG] = 0x0028, 280 [EEPROM_RSSI_BG] = 0x0028,
281 [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */
282 [EEPROM_RSSI_BG2] = 0x0029, 281 [EEPROM_RSSI_BG2] = 0x0029,
283 [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */
284 [EEPROM_RSSI_A] = 0x002a, 282 [EEPROM_RSSI_A] = 0x002a,
285 [EEPROM_RSSI_A2] = 0x002b, 283 [EEPROM_RSSI_A2] = 0x002b,
286 [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */
287 [EEPROM_TXPOWER_BG1] = 0x0030, 284 [EEPROM_TXPOWER_BG1] = 0x0030,
288 [EEPROM_TXPOWER_BG2] = 0x0037, 285 [EEPROM_TXPOWER_BG2] = 0x0037,
289 [EEPROM_EXT_TXPOWER_BG3] = 0x003e, 286 [EEPROM_EXT_TXPOWER_BG3] = 0x003e,
@@ -1783,7 +1780,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1783 rt2800_bbp_read(rt2x00dev, 3, &r3); 1780 rt2800_bbp_read(rt2x00dev, 3, &r3);
1784 1781
1785 if (rt2x00_rt(rt2x00dev, RT3572) && 1782 if (rt2x00_rt(rt2x00dev, RT3572) &&
1786 test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) 1783 rt2x00_has_cap_bt_coexist(rt2x00dev))
1787 rt2800_config_3572bt_ant(rt2x00dev); 1784 rt2800_config_3572bt_ant(rt2x00dev);
1788 1785
1789 /* 1786 /*
@@ -1795,7 +1792,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1795 break; 1792 break;
1796 case 2: 1793 case 2:
1797 if (rt2x00_rt(rt2x00dev, RT3572) && 1794 if (rt2x00_rt(rt2x00dev, RT3572) &&
1798 test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) 1795 rt2x00_has_cap_bt_coexist(rt2x00dev))
1799 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1); 1796 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
1800 else 1797 else
1801 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); 1798 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
@@ -1825,7 +1822,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1825 break; 1822 break;
1826 case 2: 1823 case 2:
1827 if (rt2x00_rt(rt2x00dev, RT3572) && 1824 if (rt2x00_rt(rt2x00dev, RT3572) &&
1828 test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { 1825 rt2x00_has_cap_bt_coexist(rt2x00dev)) {
1829 rt2x00_set_field8(&r3, BBP3_RX_ADC, 1); 1826 rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
1830 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1827 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
1831 rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); 1828 rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
@@ -2029,13 +2026,6 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
2029 rt2x00dev->default_ant.tx_chain_num <= 2); 2026 rt2x00dev->default_ant.tx_chain_num <= 2);
2030 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); 2027 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2031 2028
2032 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2033 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2034 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2035 msleep(1);
2036 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
2037 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2038
2039 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); 2029 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
2040 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset); 2030 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
2041 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); 2031 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2141,7 +2131,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
2141 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0); 2131 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
2142 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0); 2132 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
2143 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0); 2133 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
2144 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { 2134 if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
2145 if (rf->channel <= 14) { 2135 if (rf->channel <= 14) {
2146 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); 2136 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
2147 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); 2137 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
@@ -2674,7 +2664,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2674 if (rf->channel <= 14) { 2664 if (rf->channel <= 14) {
2675 int idx = rf->channel-1; 2665 int idx = rf->channel-1;
2676 2666
2677 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { 2667 if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
2678 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { 2668 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
2679 /* r55/r59 value array of channel 1~14 */ 2669 /* r55/r59 value array of channel 1~14 */
2680 static const char r55_bt_rev[] = {0x83, 0x83, 2670 static const char r55_bt_rev[] = {0x83, 0x83,
@@ -3152,6 +3142,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
3152 case RF3322: 3142 case RF3322:
3153 rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info); 3143 rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
3154 break; 3144 break;
3145 case RF3070:
3155 case RF5360: 3146 case RF5360:
3156 case RF5370: 3147 case RF5370:
3157 case RF5372: 3148 case RF5372:
@@ -3166,7 +3157,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
3166 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 3157 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
3167 } 3158 }
3168 3159
3169 if (rt2x00_rf(rt2x00dev, RF3290) || 3160 if (rt2x00_rf(rt2x00dev, RF3070) ||
3161 rt2x00_rf(rt2x00dev, RF3290) ||
3170 rt2x00_rf(rt2x00dev, RF3322) || 3162 rt2x00_rf(rt2x00dev, RF3322) ||
3171 rt2x00_rf(rt2x00dev, RF5360) || 3163 rt2x00_rf(rt2x00dev, RF5360) ||
3172 rt2x00_rf(rt2x00dev, RF5370) || 3164 rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3218,8 +3210,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
3218 if (rf->channel <= 14) { 3210 if (rf->channel <= 14) {
3219 if (!rt2x00_rt(rt2x00dev, RT5390) && 3211 if (!rt2x00_rt(rt2x00dev, RT5390) &&
3220 !rt2x00_rt(rt2x00dev, RT5392)) { 3212 !rt2x00_rt(rt2x00dev, RT5392)) {
3221 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, 3213 if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
3222 &rt2x00dev->cap_flags)) {
3223 rt2800_bbp_write(rt2x00dev, 82, 0x62); 3214 rt2800_bbp_write(rt2x00dev, 82, 0x62);
3224 rt2800_bbp_write(rt2x00dev, 75, 0x46); 3215 rt2800_bbp_write(rt2x00dev, 75, 0x46);
3225 } else { 3216 } else {
@@ -3244,7 +3235,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
3244 if (rt2x00_rt(rt2x00dev, RT3593)) 3235 if (rt2x00_rt(rt2x00dev, RT3593))
3245 rt2800_bbp_write(rt2x00dev, 83, 0x9a); 3236 rt2800_bbp_write(rt2x00dev, 83, 0x9a);
3246 3237
3247 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) 3238 if (rt2x00_has_cap_external_lna_a(rt2x00dev))
3248 rt2800_bbp_write(rt2x00dev, 75, 0x46); 3239 rt2800_bbp_write(rt2x00dev, 75, 0x46);
3249 else 3240 else
3250 rt2800_bbp_write(rt2x00dev, 75, 0x50); 3241 rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@ -3280,7 +3271,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
3280 /* Turn on primary PAs */ 3271 /* Turn on primary PAs */
3281 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, 3272 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
3282 rf->channel > 14); 3273 rf->channel > 14);
3283 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) 3274 if (rt2x00_has_cap_bt_coexist(rt2x00dev))
3284 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1); 3275 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
3285 else 3276 else
3286 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 3277 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
@@ -3311,33 +3302,50 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
3311 3302
3312 rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); 3303 rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
3313 3304
3314 if (rt2x00_rt(rt2x00dev, RT3572)) 3305 if (rt2x00_rt(rt2x00dev, RT3572)) {
3315 rt2800_rfcsr_write(rt2x00dev, 8, 0x80); 3306 rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
3316 3307
3308 /* AGC init */
3309 if (rf->channel <= 14)
3310 reg = 0x1c + (2 * rt2x00dev->lna_gain);
3311 else
3312 reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
3313
3314 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
3315 }
3316
3317 if (rt2x00_rt(rt2x00dev, RT3593)) { 3317 if (rt2x00_rt(rt2x00dev, RT3593)) {
3318 if (rt2x00_is_usb(rt2x00dev)) { 3318 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
3319 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
3320 3319
3321 /* Band selection. GPIO #8 controls all paths */ 3320 /* Band selection */
3321 if (rt2x00_is_usb(rt2x00dev) ||
3322 rt2x00_is_pcie(rt2x00dev)) {
3323 /* GPIO #8 controls all paths */
3322 rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0); 3324 rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
3323 if (rf->channel <= 14) 3325 if (rf->channel <= 14)
3324 rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1); 3326 rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
3325 else 3327 else
3326 rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0); 3328 rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
3329 }
3327 3330
3331 /* LNA PE control. */
3332 if (rt2x00_is_usb(rt2x00dev)) {
3333 /* GPIO #4 controls PE0 and PE1,
3334 * GPIO #7 controls PE2
3335 */
3328 rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0); 3336 rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
3329 rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0); 3337 rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
3330 3338
3331 /* LNA PE control.
3332 * GPIO #4 controls PE0 and PE1,
3333 * GPIO #7 controls PE2
3334 */
3335 rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1); 3339 rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
3336 rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1); 3340 rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
3337 3341 } else if (rt2x00_is_pcie(rt2x00dev)) {
3338 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); 3342 /* GPIO #4 controls PE0, PE1 and PE2 */
3343 rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
3344 rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
3339 } 3345 }
3340 3346
3347 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
3348
3341 /* AGC init */ 3349 /* AGC init */
3342 if (rf->channel <= 14) 3350 if (rf->channel <= 14)
3343 reg = 0x1c + 2 * rt2x00dev->lna_gain; 3351 reg = 0x1c + 2 * rt2x00dev->lna_gain;
@@ -3565,7 +3573,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
3565{ 3573{
3566 int delta; 3574 int delta;
3567 3575
3568 if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) 3576 if (rt2x00_has_cap_power_limit(rt2x00dev))
3569 return 0; 3577 return 0;
3570 3578
3571 /* 3579 /*
@@ -3594,7 +3602,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
3594 if (rt2x00_rt(rt2x00dev, RT3593)) 3602 if (rt2x00_rt(rt2x00dev, RT3593))
3595 return min_t(u8, txpower, 0xc); 3603 return min_t(u8, txpower, 0xc);
3596 3604
3597 if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) { 3605 if (rt2x00_has_cap_power_limit(rt2x00dev)) {
3598 /* 3606 /*
3599 * Check if eirp txpower exceed txpower_limit. 3607 * Check if eirp txpower exceed txpower_limit.
3600 * We use OFDM 6M as criterion and its eirp txpower 3608 * We use OFDM 6M as criterion and its eirp txpower
@@ -4264,6 +4272,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
4264 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 4272 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
4265 break; 4273 break;
4266 case RF3053: 4274 case RF3053:
4275 case RF3070:
4267 case RF3290: 4276 case RF3290:
4268 case RF5360: 4277 case RF5360:
4269 case RF5370: 4278 case RF5370:
@@ -4405,6 +4414,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
4405 rt2x00_rt(rt2x00dev, RT3290) || 4414 rt2x00_rt(rt2x00dev, RT3290) ||
4406 rt2x00_rt(rt2x00dev, RT3390) || 4415 rt2x00_rt(rt2x00dev, RT3390) ||
4407 rt2x00_rt(rt2x00dev, RT3572) || 4416 rt2x00_rt(rt2x00dev, RT3572) ||
4417 rt2x00_rt(rt2x00dev, RT3593) ||
4408 rt2x00_rt(rt2x00dev, RT5390) || 4418 rt2x00_rt(rt2x00dev, RT5390) ||
4409 rt2x00_rt(rt2x00dev, RT5392) || 4419 rt2x00_rt(rt2x00dev, RT5392) ||
4410 rt2x00_rt(rt2x00dev, RT5592)) 4420 rt2x00_rt(rt2x00dev, RT5592))
@@ -4412,8 +4422,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
4412 else 4422 else
4413 vgc = 0x2e + rt2x00dev->lna_gain; 4423 vgc = 0x2e + rt2x00dev->lna_gain;
4414 } else { /* 5GHZ band */ 4424 } else { /* 5GHZ band */
4415 if (rt2x00_rt(rt2x00dev, RT3572)) 4425 if (rt2x00_rt(rt2x00dev, RT3593))
4416 vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3; 4426 vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
4417 else if (rt2x00_rt(rt2x00dev, RT5592)) 4427 else if (rt2x00_rt(rt2x00dev, RT5592))
4418 vgc = 0x24 + (2 * rt2x00dev->lna_gain); 4428 vgc = 0x24 + (2 * rt2x00dev->lna_gain);
4419 else { 4429 else {
@@ -4431,11 +4441,17 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
4431 struct link_qual *qual, u8 vgc_level) 4441 struct link_qual *qual, u8 vgc_level)
4432{ 4442{
4433 if (qual->vgc_level != vgc_level) { 4443 if (qual->vgc_level != vgc_level) {
4434 if (rt2x00_rt(rt2x00dev, RT5592)) { 4444 if (rt2x00_rt(rt2x00dev, RT3572) ||
4445 rt2x00_rt(rt2x00dev, RT3593)) {
4446 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
4447 vgc_level);
4448 } else if (rt2x00_rt(rt2x00dev, RT5592)) {
4435 rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a); 4449 rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
4436 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level); 4450 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
4437 } else 4451 } else {
4438 rt2800_bbp_write(rt2x00dev, 66, vgc_level); 4452 rt2800_bbp_write(rt2x00dev, 66, vgc_level);
4453 }
4454
4439 qual->vgc_level = vgc_level; 4455 qual->vgc_level = vgc_level;
4440 qual->vgc_level_reg = vgc_level; 4456 qual->vgc_level_reg = vgc_level;
4441 } 4457 }
@@ -4454,17 +4470,35 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
4454 4470
4455 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) 4471 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
4456 return; 4472 return;
4457 /* 4473
4458 * When RSSI is better then -80 increase VGC level with 0x10, except 4474 /* When RSSI is better than a certain threshold, increase VGC
4459 * for rt5592 chip. 4475 * with a chip specific value in order to improve the balance
4476 * between sensibility and noise isolation.
4460 */ 4477 */
4461 4478
4462 vgc = rt2800_get_default_vgc(rt2x00dev); 4479 vgc = rt2800_get_default_vgc(rt2x00dev);
4463 4480
4464 if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65) 4481 switch (rt2x00dev->chip.rt) {
4465 vgc += 0x20; 4482 case RT3572:
4466 else if (qual->rssi > -80) 4483 case RT3593:
4467 vgc += 0x10; 4484 if (qual->rssi > -65) {
4485 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
4486 vgc += 0x20;
4487 else
4488 vgc += 0x10;
4489 }
4490 break;
4491
4492 case RT5592:
4493 if (qual->rssi > -65)
4494 vgc += 0x20;
4495 break;
4496
4497 default:
4498 if (qual->rssi > -80)
4499 vgc += 0x10;
4500 break;
4501 }
4468 4502
4469 rt2800_set_vgc(rt2x00dev, qual, vgc); 4503 rt2800_set_vgc(rt2x00dev, qual, vgc);
4470} 4504}
@@ -5489,7 +5523,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
5489 ant = (div_mode == 3) ? 1 : 0; 5523 ant = (div_mode == 3) ? 1 : 0;
5490 5524
5491 /* check if this is a Bluetooth combo card */ 5525 /* check if this is a Bluetooth combo card */
5492 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { 5526 if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
5493 u32 reg; 5527 u32 reg;
5494 5528
5495 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg); 5529 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
@@ -5798,7 +5832,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
5798 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 5832 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
5799 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 5833 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
5800 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 5834 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
5801 if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) 5835 if (!rt2x00_has_cap_external_lna_bg(rt2x00dev))
5802 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); 5836 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
5803 } 5837 }
5804 5838
@@ -5985,7 +6019,7 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
5985 rt2800_rfcsr_write(rt2x00dev, 20, 0xba); 6019 rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
5986 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); 6020 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
5987 rt2800_rfcsr_write(rt2x00dev, 24, 0x16); 6021 rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
5988 rt2800_rfcsr_write(rt2x00dev, 25, 0x01); 6022 rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
5989 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); 6023 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
5990 6024
5991 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { 6025 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -6441,7 +6475,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
6441 rt2800_rfcsr_write(rt2x00dev, 28, 0x00); 6475 rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
6442 rt2800_rfcsr_write(rt2x00dev, 29, 0x10); 6476 rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
6443 6477
6444 rt2800_rfcsr_write(rt2x00dev, 30, 0x00); 6478 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
6445 rt2800_rfcsr_write(rt2x00dev, 31, 0x80); 6479 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
6446 rt2800_rfcsr_write(rt2x00dev, 32, 0x80); 6480 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
6447 rt2800_rfcsr_write(rt2x00dev, 33, 0x00); 6481 rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
@@ -6479,7 +6513,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
6479 rt2800_rfcsr_write(rt2x00dev, 56, 0x22); 6513 rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
6480 rt2800_rfcsr_write(rt2x00dev, 57, 0x80); 6514 rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
6481 rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); 6515 rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
6482 rt2800_rfcsr_write(rt2x00dev, 59, 0x63); 6516 rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
6483 6517
6484 rt2800_rfcsr_write(rt2x00dev, 60, 0x45); 6518 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
6485 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 6519 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
@@ -6499,7 +6533,6 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
6499 rt2800_rf_init_calibration(rt2x00dev, 2); 6533 rt2800_rf_init_calibration(rt2x00dev, 2);
6500 6534
6501 rt2800_rfcsr_write(rt2x00dev, 1, 0x17); 6535 rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
6502 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
6503 rt2800_rfcsr_write(rt2x00dev, 3, 0x88); 6536 rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
6504 rt2800_rfcsr_write(rt2x00dev, 5, 0x10); 6537 rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
6505 rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); 6538 rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
@@ -6653,17 +6686,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
6653 u16 word; 6686 u16 word;
6654 6687
6655 /* 6688 /*
6656 * Initialize all registers. 6689 * Initialize MAC registers.
6657 */ 6690 */
6658 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || 6691 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
6659 rt2800_init_registers(rt2x00dev))) 6692 rt2800_init_registers(rt2x00dev)))
6660 return -EIO; 6693 return -EIO;
6661 6694
6695 /*
6696 * Wait BBP/RF to wake up.
6697 */
6662 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev))) 6698 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
6663 return -EIO; 6699 return -EIO;
6664 6700
6665 /* 6701 /*
6666 * Send signal to firmware during boot time. 6702 * Send signal during boot time to initialize firmware.
6667 */ 6703 */
6668 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 6704 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
6669 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 6705 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
@@ -6672,9 +6708,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
6672 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); 6708 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
6673 msleep(1); 6709 msleep(1);
6674 6710
6711 /*
6712 * Make sure BBP is up and running.
6713 */
6675 if (unlikely(rt2800_wait_bbp_ready(rt2x00dev))) 6714 if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
6676 return -EIO; 6715 return -EIO;
6677 6716
6717 /*
6718 * Initialize BBP/RF registers.
6719 */
6678 rt2800_init_bbp(rt2x00dev); 6720 rt2800_init_bbp(rt2x00dev);
6679 rt2800_init_rfcsr(rt2x00dev); 6721 rt2800_init_rfcsr(rt2x00dev);
6680 6722
@@ -7021,6 +7063,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
7021 case RF3022: 7063 case RF3022:
7022 case RF3052: 7064 case RF3052:
7023 case RF3053: 7065 case RF3053:
7066 case RF3070:
7024 case RF3290: 7067 case RF3290:
7025 case RF3320: 7068 case RF3320:
7026 case RF3322: 7069 case RF3322:
@@ -7203,7 +7246,7 @@ static const struct rf_channel rf_vals[] = {
7203 7246
7204/* 7247/*
7205 * RF value list for rt3xxx 7248 * RF value list for rt3xxx
7206 * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052) 7249 * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053)
7207 */ 7250 */
7208static const struct rf_channel rf_vals_3x[] = { 7251static const struct rf_channel rf_vals_3x[] = {
7209 {1, 241, 2, 2 }, 7252 {1, 241, 2, 2 },
@@ -7399,72 +7442,6 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
7399 {196, 83, 0, 12, 1}, 7442 {196, 83, 0, 12, 1},
7400}; 7443};
7401 7444
7402static const struct rf_channel rf_vals_3053[] = {
7403 /* Channel, N, R, K */
7404 {1, 241, 2, 2},
7405 {2, 241, 2, 7},
7406 {3, 242, 2, 2},
7407 {4, 242, 2, 7},
7408 {5, 243, 2, 2},
7409 {6, 243, 2, 7},
7410 {7, 244, 2, 2},
7411 {8, 244, 2, 7},
7412 {9, 245, 2, 2},
7413 {10, 245, 2, 7},
7414 {11, 246, 2, 2},
7415 {12, 246, 2, 7},
7416 {13, 247, 2, 2},
7417 {14, 248, 2, 4},
7418
7419 {36, 0x56, 0, 4},
7420 {38, 0x56, 0, 6},
7421 {40, 0x56, 0, 8},
7422 {44, 0x57, 0, 0},
7423 {46, 0x57, 0, 2},
7424 {48, 0x57, 0, 4},
7425 {52, 0x57, 0, 8},
7426 {54, 0x57, 0, 10},
7427 {56, 0x58, 0, 0},
7428 {60, 0x58, 0, 4},
7429 {62, 0x58, 0, 6},
7430 {64, 0x58, 0, 8},
7431
7432 {100, 0x5B, 0, 8},
7433 {102, 0x5B, 0, 10},
7434 {104, 0x5C, 0, 0},
7435 {108, 0x5C, 0, 4},
7436 {110, 0x5C, 0, 6},
7437 {112, 0x5C, 0, 8},
7438
7439 /* NOTE: Channel 114 has been removed intentionally.
7440 * The EEPROM contains no TX power values for that,
7441 * and it is disabled in the vendor driver as well.
7442 */
7443
7444 {116, 0x5D, 0, 0},
7445 {118, 0x5D, 0, 2},
7446 {120, 0x5D, 0, 4},
7447 {124, 0x5D, 0, 8},
7448 {126, 0x5D, 0, 10},
7449 {128, 0x5E, 0, 0},
7450 {132, 0x5E, 0, 4},
7451 {134, 0x5E, 0, 6},
7452 {136, 0x5E, 0, 8},
7453 {140, 0x5F, 0, 0},
7454
7455 {149, 0x5F, 0, 9},
7456 {151, 0x5F, 0, 11},
7457 {153, 0x60, 0, 1},
7458 {157, 0x60, 0, 5},
7459 {159, 0x60, 0, 7},
7460 {161, 0x60, 0, 9},
7461 {165, 0x61, 0, 1},
7462 {167, 0x61, 0, 3},
7463 {169, 0x61, 0, 5},
7464 {171, 0x61, 0, 7},
7465 {173, 0x61, 0, 9},
7466};
7467
7468static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 7445static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7469{ 7446{
7470 struct hw_mode_spec *spec = &rt2x00dev->spec; 7447 struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7543,6 +7520,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7543 rt2x00_rf(rt2x00dev, RF2020) || 7520 rt2x00_rf(rt2x00dev, RF2020) ||
7544 rt2x00_rf(rt2x00dev, RF3021) || 7521 rt2x00_rf(rt2x00dev, RF3021) ||
7545 rt2x00_rf(rt2x00dev, RF3022) || 7522 rt2x00_rf(rt2x00dev, RF3022) ||
7523 rt2x00_rf(rt2x00dev, RF3070) ||
7546 rt2x00_rf(rt2x00dev, RF3290) || 7524 rt2x00_rf(rt2x00dev, RF3290) ||
7547 rt2x00_rf(rt2x00dev, RF3320) || 7525 rt2x00_rf(rt2x00dev, RF3320) ||
7548 rt2x00_rf(rt2x00dev, RF3322) || 7526 rt2x00_rf(rt2x00dev, RF3322) ||
@@ -7553,14 +7531,11 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7553 rt2x00_rf(rt2x00dev, RF5392)) { 7531 rt2x00_rf(rt2x00dev, RF5392)) {
7554 spec->num_channels = 14; 7532 spec->num_channels = 14;
7555 spec->channels = rf_vals_3x; 7533 spec->channels = rf_vals_3x;
7556 } else if (rt2x00_rf(rt2x00dev, RF3052)) { 7534 } else if (rt2x00_rf(rt2x00dev, RF3052) ||
7535 rt2x00_rf(rt2x00dev, RF3053)) {
7557 spec->supported_bands |= SUPPORT_BAND_5GHZ; 7536 spec->supported_bands |= SUPPORT_BAND_5GHZ;
7558 spec->num_channels = ARRAY_SIZE(rf_vals_3x); 7537 spec->num_channels = ARRAY_SIZE(rf_vals_3x);
7559 spec->channels = rf_vals_3x; 7538 spec->channels = rf_vals_3x;
7560 } else if (rt2x00_rf(rt2x00dev, RF3053)) {
7561 spec->supported_bands |= SUPPORT_BAND_5GHZ;
7562 spec->num_channels = ARRAY_SIZE(rf_vals_3053);
7563 spec->channels = rf_vals_3053;
7564 } else if (rt2x00_rf(rt2x00dev, RF5592)) { 7539 } else if (rt2x00_rf(rt2x00dev, RF5592)) {
7565 spec->supported_bands |= SUPPORT_BAND_5GHZ; 7540 spec->supported_bands |= SUPPORT_BAND_5GHZ;
7566 7541
@@ -7671,6 +7646,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7671 case RF3320: 7646 case RF3320:
7672 case RF3052: 7647 case RF3052:
7673 case RF3053: 7648 case RF3053:
7649 case RF3070:
7674 case RF3290: 7650 case RF3290:
7675 case RF5360: 7651 case RF5360:
7676 case RF5370: 7652 case RF5370:
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 96961b9a395c..96677ce55da4 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1176,6 +1176,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1176 /* Linksys */ 1176 /* Linksys */
1177 { USB_DEVICE(0x13b1, 0x002f) }, 1177 { USB_DEVICE(0x13b1, 0x002f) },
1178 { USB_DEVICE(0x1737, 0x0079) }, 1178 { USB_DEVICE(0x1737, 0x0079) },
1179 /* Logitec */
1180 { USB_DEVICE(0x0789, 0x0170) },
1179 /* Ralink */ 1181 /* Ralink */
1180 { USB_DEVICE(0x148f, 0x3572) }, 1182 { USB_DEVICE(0x148f, 0x3572) },
1181 /* Sitecom */ 1183 /* Sitecom */
@@ -1199,6 +1201,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1199 { USB_DEVICE(0x050d, 0x1103) }, 1201 { USB_DEVICE(0x050d, 0x1103) },
1200 /* Cameo */ 1202 /* Cameo */
1201 { USB_DEVICE(0x148f, 0xf301) }, 1203 { USB_DEVICE(0x148f, 0xf301) },
1204 /* D-Link */
1205 { USB_DEVICE(0x2001, 0x3c1f) },
1202 /* Edimax */ 1206 /* Edimax */
1203 { USB_DEVICE(0x7392, 0x7733) }, 1207 { USB_DEVICE(0x7392, 0x7733) },
1204 /* Hawking */ 1208 /* Hawking */
@@ -1212,6 +1216,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1212 { USB_DEVICE(0x0789, 0x016b) }, 1216 { USB_DEVICE(0x0789, 0x016b) },
1213 /* NETGEAR */ 1217 /* NETGEAR */
1214 { USB_DEVICE(0x0846, 0x9012) }, 1218 { USB_DEVICE(0x0846, 0x9012) },
1219 { USB_DEVICE(0x0846, 0x9013) },
1215 { USB_DEVICE(0x0846, 0x9019) }, 1220 { USB_DEVICE(0x0846, 0x9019) },
1216 /* Planex */ 1221 /* Planex */
1217 { USB_DEVICE(0x2019, 0xed19) }, 1222 { USB_DEVICE(0x2019, 0xed19) },
@@ -1220,6 +1225,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1220 /* Sitecom */ 1225 /* Sitecom */
1221 { USB_DEVICE(0x0df6, 0x0067) }, 1226 { USB_DEVICE(0x0df6, 0x0067) },
1222 { USB_DEVICE(0x0df6, 0x006a) }, 1227 { USB_DEVICE(0x0df6, 0x006a) },
1228 { USB_DEVICE(0x0df6, 0x006e) },
1223 /* ZyXEL */ 1229 /* ZyXEL */
1224 { USB_DEVICE(0x0586, 0x3421) }, 1230 { USB_DEVICE(0x0586, 0x3421) },
1225#endif 1231#endif
@@ -1236,6 +1242,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
1236 { USB_DEVICE(0x2001, 0x3c1c) }, 1242 { USB_DEVICE(0x2001, 0x3c1c) },
1237 { USB_DEVICE(0x2001, 0x3c1d) }, 1243 { USB_DEVICE(0x2001, 0x3c1d) },
1238 { USB_DEVICE(0x2001, 0x3c1e) }, 1244 { USB_DEVICE(0x2001, 0x3c1e) },
1245 { USB_DEVICE(0x2001, 0x3c20) },
1246 { USB_DEVICE(0x2001, 0x3c22) },
1247 { USB_DEVICE(0x2001, 0x3c23) },
1239 /* LG innotek */ 1248 /* LG innotek */
1240 { USB_DEVICE(0x043e, 0x7a22) }, 1249 { USB_DEVICE(0x043e, 0x7a22) },
1241 { USB_DEVICE(0x043e, 0x7a42) }, 1250 { USB_DEVICE(0x043e, 0x7a42) },
@@ -1258,12 +1267,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
1258 { USB_DEVICE(0x043e, 0x7a32) }, 1267 { USB_DEVICE(0x043e, 0x7a32) },
1259 /* AVM GmbH */ 1268 /* AVM GmbH */
1260 { USB_DEVICE(0x057c, 0x8501) }, 1269 { USB_DEVICE(0x057c, 0x8501) },
1261 /* D-Link DWA-160-B2 */ 1270 /* Buffalo */
1271 { USB_DEVICE(0x0411, 0x0241) },
1272 /* D-Link */
1262 { USB_DEVICE(0x2001, 0x3c1a) }, 1273 { USB_DEVICE(0x2001, 0x3c1a) },
1274 { USB_DEVICE(0x2001, 0x3c21) },
1263 /* Proware */ 1275 /* Proware */
1264 { USB_DEVICE(0x043e, 0x7a13) }, 1276 { USB_DEVICE(0x043e, 0x7a13) },
1265 /* Ralink */ 1277 /* Ralink */
1266 { USB_DEVICE(0x148f, 0x5572) }, 1278 { USB_DEVICE(0x148f, 0x5572) },
1279 /* TRENDnet */
1280 { USB_DEVICE(0x20f4, 0x724a) },
1267#endif 1281#endif
1268#ifdef CONFIG_RT2800USB_UNKNOWN 1282#ifdef CONFIG_RT2800USB_UNKNOWN
1269 /* 1283 /*
@@ -1333,6 +1347,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1333 { USB_DEVICE(0x1d4d, 0x0010) }, 1347 { USB_DEVICE(0x1d4d, 0x0010) },
1334 /* Planex */ 1348 /* Planex */
1335 { USB_DEVICE(0x2019, 0xab24) }, 1349 { USB_DEVICE(0x2019, 0xab24) },
1350 { USB_DEVICE(0x2019, 0xab29) },
1336 /* Qcom */ 1351 /* Qcom */
1337 { USB_DEVICE(0x18e8, 0x6259) }, 1352 { USB_DEVICE(0x18e8, 0x6259) },
1338 /* RadioShack */ 1353 /* RadioShack */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index fe4c572db52c..e4ba2ce0f212 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
39#include <linux/input-polldev.h> 39#include <linux/input-polldev.h>
40#include <linux/kfifo.h> 40#include <linux/kfifo.h>
41#include <linux/hrtimer.h> 41#include <linux/hrtimer.h>
42#include <linux/average.h>
42 43
43#include <net/mac80211.h> 44#include <net/mac80211.h>
44 45
@@ -138,17 +139,6 @@
138#define SHORT_EIFS ( SIFS + SHORT_DIFS + \ 139#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
139 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) 140 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
140 141
141/*
142 * Structure for average calculation
143 * The avg field contains the actual average value,
144 * but avg_weight is internally used during calculations
145 * to prevent rounding errors.
146 */
147struct avg_val {
148 int avg;
149 int avg_weight;
150};
151
152enum rt2x00_chip_intf { 142enum rt2x00_chip_intf {
153 RT2X00_CHIP_INTF_PCI, 143 RT2X00_CHIP_INTF_PCI,
154 RT2X00_CHIP_INTF_PCIE, 144 RT2X00_CHIP_INTF_PCIE,
@@ -297,7 +287,7 @@ struct link_ant {
297 * Similar to the avg_rssi in the link_qual structure 287 * Similar to the avg_rssi in the link_qual structure
298 * this value is updated by using the walking average. 288 * this value is updated by using the walking average.
299 */ 289 */
300 struct avg_val rssi_ant; 290 struct ewma rssi_ant;
301}; 291};
302 292
303/* 293/*
@@ -326,7 +316,7 @@ struct link {
326 /* 316 /*
327 * Currently active average RSSI value 317 * Currently active average RSSI value
328 */ 318 */
329 struct avg_val avg_rssi; 319 struct ewma avg_rssi;
330 320
331 /* 321 /*
332 * Work structure for scheduling periodic link tuning. 322 * Work structure for scheduling periodic link tuning.
@@ -1179,6 +1169,93 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
1179 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC); 1169 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
1180} 1170}
1181 1171
1172/* Helpers for capability flags */
1173
1174static inline bool
1175rt2x00_has_cap_flag(struct rt2x00_dev *rt2x00dev,
1176 enum rt2x00_capability_flags cap_flag)
1177{
1178 return test_bit(cap_flag, &rt2x00dev->cap_flags);
1179}
1180
1181static inline bool
1182rt2x00_has_cap_hw_crypto(struct rt2x00_dev *rt2x00dev)
1183{
1184 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_HW_CRYPTO);
1185}
1186
1187static inline bool
1188rt2x00_has_cap_power_limit(struct rt2x00_dev *rt2x00dev)
1189{
1190 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_POWER_LIMIT);
1191}
1192
1193static inline bool
1194rt2x00_has_cap_control_filters(struct rt2x00_dev *rt2x00dev)
1195{
1196 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTERS);
1197}
1198
1199static inline bool
1200rt2x00_has_cap_control_filter_pspoll(struct rt2x00_dev *rt2x00dev)
1201{
1202 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTER_PSPOLL);
1203}
1204
1205static inline bool
1206rt2x00_has_cap_pre_tbtt_interrupt(struct rt2x00_dev *rt2x00dev)
1207{
1208 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_PRE_TBTT_INTERRUPT);
1209}
1210
1211static inline bool
1212rt2x00_has_cap_link_tuning(struct rt2x00_dev *rt2x00dev)
1213{
1214 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_LINK_TUNING);
1215}
1216
1217static inline bool
1218rt2x00_has_cap_frame_type(struct rt2x00_dev *rt2x00dev)
1219{
1220 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_FRAME_TYPE);
1221}
1222
1223static inline bool
1224rt2x00_has_cap_rf_sequence(struct rt2x00_dev *rt2x00dev)
1225{
1226 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_RF_SEQUENCE);
1227}
1228
1229static inline bool
1230rt2x00_has_cap_external_lna_a(struct rt2x00_dev *rt2x00dev)
1231{
1232 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_A);
1233}
1234
1235static inline bool
1236rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev)
1237{
1238 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_BG);
1239}
1240
1241static inline bool
1242rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev)
1243{
1244 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_DOUBLE_ANTENNA);
1245}
1246
1247static inline bool
1248rt2x00_has_cap_bt_coexist(struct rt2x00_dev *rt2x00dev)
1249{
1250 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_BT_COEXIST);
1251}
1252
1253static inline bool
1254rt2x00_has_cap_vco_recalibration(struct rt2x00_dev *rt2x00dev)
1255{
1256 return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_VCO_RECALIBRATION);
1257}
1258
1182/** 1259/**
1183 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes. 1260 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
1184 * @entry: Pointer to &struct queue_entry 1261 * @entry: Pointer to &struct queue_entry
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 1ca4c7ffc189..3db0d99d9da7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -52,7 +52,7 @@ void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
52 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 52 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
53 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 53 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
54 54
55 if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key) 55 if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
56 return; 56 return;
57 57
58 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags); 58 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@ -80,7 +80,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
80 struct ieee80211_key_conf *key = tx_info->control.hw_key; 80 struct ieee80211_key_conf *key = tx_info->control.hw_key;
81 unsigned int overhead = 0; 81 unsigned int overhead = 0;
82 82
83 if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key) 83 if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
84 return overhead; 84 return overhead;
85 85
86 /* 86 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fe7a7f63a9ed..7f7baae5ae02 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -750,7 +750,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
750 intf, &rt2x00debug_fop_queue_stats); 750 intf, &rt2x00debug_fop_queue_stats);
751 751
752#ifdef CONFIG_RT2X00_LIB_CRYPTO 752#ifdef CONFIG_RT2X00_LIB_CRYPTO
753 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) 753 if (rt2x00_has_cap_hw_crypto(rt2x00dev))
754 intf->crypto_stats_entry = 754 intf->crypto_stats_entry =
755 debugfs_create_file("crypto", S_IRUGO, intf->queue_folder, 755 debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
756 intf, &rt2x00debug_fop_crypto_stats); 756 intf, &rt2x00debug_fop_crypto_stats);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 712eea9d398f..080b1fcae5fa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -88,7 +88,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
88 rt2x00queue_start_queues(rt2x00dev); 88 rt2x00queue_start_queues(rt2x00dev);
89 rt2x00link_start_tuner(rt2x00dev); 89 rt2x00link_start_tuner(rt2x00dev);
90 rt2x00link_start_agc(rt2x00dev); 90 rt2x00link_start_agc(rt2x00dev);
91 if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags)) 91 if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
92 rt2x00link_start_vcocal(rt2x00dev); 92 rt2x00link_start_vcocal(rt2x00dev);
93 93
94 /* 94 /*
@@ -113,7 +113,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
113 * Stop all queues 113 * Stop all queues
114 */ 114 */
115 rt2x00link_stop_agc(rt2x00dev); 115 rt2x00link_stop_agc(rt2x00dev);
116 if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags)) 116 if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
117 rt2x00link_stop_vcocal(rt2x00dev); 117 rt2x00link_stop_vcocal(rt2x00dev);
118 rt2x00link_stop_tuner(rt2x00dev); 118 rt2x00link_stop_tuner(rt2x00dev);
119 rt2x00queue_stop_queues(rt2x00dev); 119 rt2x00queue_stop_queues(rt2x00dev);
@@ -234,7 +234,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
234 * here as they will fetch the next beacon directly prior to 234 * here as they will fetch the next beacon directly prior to
235 * transmission. 235 * transmission.
236 */ 236 */
237 if (test_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags)) 237 if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev))
238 return; 238 return;
239 239
240 /* fetch next beacon */ 240 /* fetch next beacon */
@@ -358,7 +358,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
358 * mac80211 will expect the same data to be present it the 358 * mac80211 will expect the same data to be present it the
359 * frame as it was passed to us. 359 * frame as it was passed to us.
360 */ 360 */
361 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) 361 if (rt2x00_has_cap_hw_crypto(rt2x00dev))
362 rt2x00crypto_tx_insert_iv(entry->skb, header_length); 362 rt2x00crypto_tx_insert_iv(entry->skb, header_length);
363 363
364 /* 364 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 8368aab86f28..c2b3b6629188 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -35,50 +35,28 @@
35 */ 35 */
36#define DEFAULT_RSSI -128 36#define DEFAULT_RSSI -128
37 37
38/* 38/* Constants for EWMA calculations. */
39 * Helper struct and macro to work with moving/walking averages. 39#define RT2X00_EWMA_FACTOR 1024
40 * When adding a value to the average value the following calculation 40#define RT2X00_EWMA_WEIGHT 8
41 * is needed: 41
42 * 42static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
43 * avg_rssi = ((avg_rssi * 7) + rssi) / 8; 43{
44 * 44 unsigned long avg;
45 * The advantage of this approach is that we only need 1 variable 45
46 * to store the average in (No need for a count and a total). 46 avg = ewma_read(ewma);
47 * But more importantly, normal average values will over time 47 if (avg)
48 * move less and less towards newly added values this results 48 return -avg;
49 * that with link tuning, the device can have a very good RSSI 49
50 * for a few minutes but when the device is moved away from the AP 50 return DEFAULT_RSSI;
51 * the average will not decrease fast enough to compensate. 51}
52 * The walking average compensates this and will move towards
53 * the new values correctly allowing a effective link tuning,
54 * the speed of the average moving towards other values depends
55 * on the value for the number of samples. The higher the number
56 * of samples, the slower the average will move.
57 * We use two variables to keep track of the average value to
58 * compensate for the rounding errors. This can be a significant
59 * error (>5dBm) if the factor is too low.
60 */
61#define AVG_SAMPLES 8
62#define AVG_FACTOR 1000
63#define MOVING_AVERAGE(__avg, __val) \
64({ \
65 struct avg_val __new; \
66 __new.avg_weight = \
67 (__avg).avg_weight ? \
68 ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
69 ((__val) * (AVG_FACTOR))) / \
70 (AVG_SAMPLES)) : \
71 ((__val) * (AVG_FACTOR)); \
72 __new.avg = __new.avg_weight / (AVG_FACTOR); \
73 __new; \
74})
75 52
76static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev) 53static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
77{ 54{
78 struct link_ant *ant = &rt2x00dev->link.ant; 55 struct link_ant *ant = &rt2x00dev->link.ant;
79 56
80 if (ant->rssi_ant.avg && rt2x00dev->link.qual.rx_success) 57 if (rt2x00dev->link.qual.rx_success)
81 return ant->rssi_ant.avg; 58 return rt2x00link_get_avg_rssi(&ant->rssi_ant);
59
82 return DEFAULT_RSSI; 60 return DEFAULT_RSSI;
83} 61}
84 62
@@ -100,8 +78,8 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
100 78
101static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev) 79static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
102{ 80{
103 rt2x00dev->link.ant.rssi_ant.avg = 0; 81 ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
104 rt2x00dev->link.ant.rssi_ant.avg_weight = 0; 82 RT2X00_EWMA_WEIGHT);
105} 83}
106 84
107static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev) 85static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -249,12 +227,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
249 /* 227 /*
250 * Update global RSSI 228 * Update global RSSI
251 */ 229 */
252 link->avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi); 230 ewma_add(&link->avg_rssi, -rxdesc->rssi);
253 231
254 /* 232 /*
255 * Update antenna RSSI 233 * Update antenna RSSI
256 */ 234 */
257 ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi); 235 ewma_add(&ant->rssi_ant, -rxdesc->rssi);
258} 236}
259 237
260void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) 238void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -309,6 +287,8 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
309 */ 287 */
310 rt2x00dev->link.count = 0; 288 rt2x00dev->link.count = 0;
311 memset(qual, 0, sizeof(*qual)); 289 memset(qual, 0, sizeof(*qual));
290 ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
291 RT2X00_EWMA_WEIGHT);
312 292
313 /* 293 /*
314 * Restore the VGC level as stored in the registers, 294 * Restore the VGC level as stored in the registers,
@@ -363,17 +343,17 @@ static void rt2x00link_tuner(struct work_struct *work)
363 * collect the RSSI data we could use this. Otherwise we 343 * collect the RSSI data we could use this. Otherwise we
364 * must fallback to the default RSSI value. 344 * must fallback to the default RSSI value.
365 */ 345 */
366 if (!link->avg_rssi.avg || !qual->rx_success) 346 if (!qual->rx_success)
367 qual->rssi = DEFAULT_RSSI; 347 qual->rssi = DEFAULT_RSSI;
368 else 348 else
369 qual->rssi = link->avg_rssi.avg; 349 qual->rssi = rt2x00link_get_avg_rssi(&link->avg_rssi);
370 350
371 /* 351 /*
372 * Check if link tuning is supported by the hardware, some hardware 352 * Check if link tuning is supported by the hardware, some hardware
373 * do not support link tuning at all, while other devices can disable 353 * do not support link tuning at all, while other devices can disable
374 * the feature from the EEPROM. 354 * the feature from the EEPROM.
375 */ 355 */
376 if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags)) 356 if (rt2x00_has_cap_link_tuning(rt2x00dev))
377 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count); 357 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
378 358
379 /* 359 /*
@@ -513,7 +493,7 @@ static void rt2x00link_vcocal(struct work_struct *work)
513void rt2x00link_register(struct rt2x00_dev *rt2x00dev) 493void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
514{ 494{
515 INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc); 495 INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
516 if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags)) 496 if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
517 INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal); 497 INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
518 INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog); 498 INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
519 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner); 499 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f883802f3505..51f17cfb93f9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -382,11 +382,11 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
382 * of different types, but has no a separate filter for PS Poll frames, 382 * of different types, but has no a separate filter for PS Poll frames,
383 * FIF_CONTROL flag implies FIF_PSPOLL. 383 * FIF_CONTROL flag implies FIF_PSPOLL.
384 */ 384 */
385 if (!test_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags)) { 385 if (!rt2x00_has_cap_control_filters(rt2x00dev)) {
386 if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL) 386 if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
387 *total_flags |= FIF_CONTROL | FIF_PSPOLL; 387 *total_flags |= FIF_CONTROL | FIF_PSPOLL;
388 } 388 }
389 if (!test_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags)) { 389 if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) {
390 if (*total_flags & FIF_CONTROL) 390 if (*total_flags & FIF_CONTROL)
391 *total_flags |= FIF_PSPOLL; 391 *total_flags |= FIF_PSPOLL;
392 } 392 }
@@ -469,7 +469,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
469 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 469 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
470 return 0; 470 return 0;
471 471
472 if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) 472 if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
473 return -EOPNOTSUPP; 473 return -EOPNOTSUPP;
474 474
475 /* 475 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index dc49e525ae5e..25da20e7e1f3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -119,7 +119,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
119 rt2x00dev->ops = ops; 119 rt2x00dev->ops = ops;
120 rt2x00dev->hw = hw; 120 rt2x00dev->hw = hw;
121 rt2x00dev->irq = pci_dev->irq; 121 rt2x00dev->irq = pci_dev->irq;
122 rt2x00dev->name = pci_name(pci_dev); 122 rt2x00dev->name = ops->name;
123 123
124 if (pci_is_pcie(pci_dev)) 124 if (pci_is_pcie(pci_dev))
125 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); 125 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c8a33b6ee22..50590b1420a5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -61,7 +61,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
61 * at least 8 bytes bytes available in headroom for IV/EIV 61 * at least 8 bytes bytes available in headroom for IV/EIV
62 * and 8 bytes for ICV data as tailroon. 62 * and 8 bytes for ICV data as tailroon.
63 */ 63 */
64 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) { 64 if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
65 head_size += 8; 65 head_size += 8;
66 tail_size += 8; 66 tail_size += 8;
67 } 67 }
@@ -1033,38 +1033,21 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
1033 1033
1034void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) 1034void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1035{ 1035{
1036 bool started;
1037 bool tx_queue = 1036 bool tx_queue =
1038 (queue->qid == QID_AC_VO) || 1037 (queue->qid == QID_AC_VO) ||
1039 (queue->qid == QID_AC_VI) || 1038 (queue->qid == QID_AC_VI) ||
1040 (queue->qid == QID_AC_BE) || 1039 (queue->qid == QID_AC_BE) ||
1041 (queue->qid == QID_AC_BK); 1040 (queue->qid == QID_AC_BK);
1042 1041
1043 mutex_lock(&queue->status_lock);
1044 1042
1045 /* 1043 /*
1046 * If the queue has been started, we must stop it temporarily 1044 * If we are not supposed to drop any pending
1047 * to prevent any new frames to be queued on the device. If 1045 * frames, this means we must force a start (=kick)
1048 * we are not dropping the pending frames, the queue must 1046 * to the queue to make sure the hardware will
1049 * only be stopped in the software and not the hardware, 1047 * start transmitting.
1050 * otherwise the queue will never become empty on its own.
1051 */ 1048 */
1052 started = test_bit(QUEUE_STARTED, &queue->flags); 1049 if (!drop && tx_queue)
1053 if (started) { 1050 queue->rt2x00dev->ops->lib->kick_queue(queue);
1054 /*
1055 * Pause the queue
1056 */
1057 rt2x00queue_pause_queue(queue);
1058
1059 /*
1060 * If we are not supposed to drop any pending
1061 * frames, this means we must force a start (=kick)
1062 * to the queue to make sure the hardware will
1063 * start transmitting.
1064 */
1065 if (!drop && tx_queue)
1066 queue->rt2x00dev->ops->lib->kick_queue(queue);
1067 }
1068 1051
1069 /* 1052 /*
1070 * Check if driver supports flushing, if that is the case we can 1053 * Check if driver supports flushing, if that is the case we can
@@ -1080,14 +1063,6 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1080 if (unlikely(!rt2x00queue_empty(queue))) 1063 if (unlikely(!rt2x00queue_empty(queue)))
1081 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", 1064 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1082 queue->qid); 1065 queue->qid);
1083
1084 /*
1085 * Restore the queue to the previous status
1086 */
1087 if (started)
1088 rt2x00queue_unpause_queue(queue);
1089
1090 mutex_unlock(&queue->status_lock);
1091} 1066}
1092EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); 1067EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1093 1068
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 88289873c0cf..4e121627925d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -523,7 +523,9 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
523 rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n", 523 rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
524 queue->qid); 524 queue->qid);
525 525
526 rt2x00queue_stop_queue(queue);
526 rt2x00queue_flush_queue(queue, true); 527 rt2x00queue_flush_queue(queue, true);
528 rt2x00queue_start_queue(queue);
527} 529}
528 530
529static int rt2x00usb_dma_timeout(struct data_queue *queue) 531static int rt2x00usb_dma_timeout(struct data_queue *queue)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 54d3ddfc9888..a5b69cb49012 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -685,7 +685,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
685 685
686 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529)); 686 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
687 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 687 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
688 !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)); 688 !rt2x00_has_cap_frame_type(rt2x00dev));
689 689
690 /* 690 /*
691 * Configure the RX antenna. 691 * Configure the RX antenna.
@@ -813,10 +813,10 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
813 813
814 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 814 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
815 sel = antenna_sel_a; 815 sel = antenna_sel_a;
816 lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags); 816 lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
817 } else { 817 } else {
818 sel = antenna_sel_bg; 818 sel = antenna_sel_bg;
819 lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags); 819 lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
820 } 820 }
821 821
822 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) 822 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -836,7 +836,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
836 else if (rt2x00_rf(rt2x00dev, RF2527)) 836 else if (rt2x00_rf(rt2x00dev, RF2527))
837 rt61pci_config_antenna_2x(rt2x00dev, ant); 837 rt61pci_config_antenna_2x(rt2x00dev, ant);
838 else if (rt2x00_rf(rt2x00dev, RF2529)) { 838 else if (rt2x00_rf(rt2x00dev, RF2529)) {
839 if (test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) 839 if (rt2x00_has_cap_double_antenna(rt2x00dev))
840 rt61pci_config_antenna_2x(rt2x00dev, ant); 840 rt61pci_config_antenna_2x(rt2x00dev, ant);
841 else 841 else
842 rt61pci_config_antenna_2529(rt2x00dev, ant); 842 rt61pci_config_antenna_2529(rt2x00dev, ant);
@@ -850,13 +850,13 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
850 short lna_gain = 0; 850 short lna_gain = 0;
851 851
852 if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) { 852 if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
853 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) 853 if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
854 lna_gain += 14; 854 lna_gain += 14;
855 855
856 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); 856 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
857 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); 857 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
858 } else { 858 } else {
859 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) 859 if (rt2x00_has_cap_external_lna_a(rt2x00dev))
860 lna_gain += 14; 860 lna_gain += 14;
861 861
862 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom); 862 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
@@ -1054,14 +1054,14 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
1054 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 1054 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1055 low_bound = 0x28; 1055 low_bound = 0x28;
1056 up_bound = 0x48; 1056 up_bound = 0x48;
1057 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) { 1057 if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
1058 low_bound += 0x10; 1058 low_bound += 0x10;
1059 up_bound += 0x10; 1059 up_bound += 0x10;
1060 } 1060 }
1061 } else { 1061 } else {
1062 low_bound = 0x20; 1062 low_bound = 0x20;
1063 up_bound = 0x40; 1063 up_bound = 0x40;
1064 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) { 1064 if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
1065 low_bound += 0x10; 1065 low_bound += 0x10;
1066 up_bound += 0x10; 1066 up_bound += 0x10;
1067 } 1067 }
@@ -2578,7 +2578,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2578 * eeprom word. 2578 * eeprom word.
2579 */ 2579 */
2580 if (rt2x00_rf(rt2x00dev, RF2529) && 2580 if (rt2x00_rf(rt2x00dev, RF2529) &&
2581 !test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) { 2581 !rt2x00_has_cap_double_antenna(rt2x00dev)) {
2582 rt2x00dev->default_ant.rx = 2582 rt2x00dev->default_ant.rx =
2583 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED); 2583 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
2584 rt2x00dev->default_ant.tx = 2584 rt2x00dev->default_ant.tx =
@@ -2793,7 +2793,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2793 spec->supported_bands = SUPPORT_BAND_2GHZ; 2793 spec->supported_bands = SUPPORT_BAND_2GHZ;
2794 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2794 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2795 2795
2796 if (!test_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags)) { 2796 if (!rt2x00_has_cap_rf_sequence(rt2x00dev)) {
2797 spec->num_channels = 14; 2797 spec->num_channels = 14;
2798 spec->channels = rf_vals_noseq; 2798 spec->channels = rf_vals_noseq;
2799 } else { 2799 } else {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1d3880e09a13..1baf9c896dcd 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -595,8 +595,8 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
595 switch (ant->rx) { 595 switch (ant->rx) {
596 case ANTENNA_HW_DIVERSITY: 596 case ANTENNA_HW_DIVERSITY:
597 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); 597 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
598 temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags) 598 temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
599 && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ); 599 (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
600 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp); 600 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
601 break; 601 break;
602 case ANTENNA_A: 602 case ANTENNA_A:
@@ -636,7 +636,7 @@ static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
636 636
637 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); 637 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
638 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 638 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
639 !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)); 639 !rt2x00_has_cap_frame_type(rt2x00dev));
640 640
641 /* 641 /*
642 * Configure the RX antenna. 642 * Configure the RX antenna.
@@ -709,10 +709,10 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
709 709
710 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 710 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
711 sel = antenna_sel_a; 711 sel = antenna_sel_a;
712 lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags); 712 lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
713 } else { 713 } else {
714 sel = antenna_sel_bg; 714 sel = antenna_sel_bg;
715 lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags); 715 lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
716 } 716 }
717 717
718 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) 718 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -740,7 +740,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
740 short lna_gain = 0; 740 short lna_gain = 0;
741 741
742 if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) { 742 if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
743 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) 743 if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
744 lna_gain += 14; 744 lna_gain += 14;
745 745
746 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); 746 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
@@ -930,7 +930,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
930 low_bound = 0x28; 930 low_bound = 0x28;
931 up_bound = 0x48; 931 up_bound = 0x48;
932 932
933 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) { 933 if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
934 low_bound += 0x10; 934 low_bound += 0x10;
935 up_bound += 0x10; 935 up_bound += 0x10;
936 } 936 }
@@ -946,7 +946,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
946 up_bound = 0x1c; 946 up_bound = 0x1c;
947 } 947 }
948 948
949 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) { 949 if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
950 low_bound += 0x14; 950 low_bound += 0x14;
951 up_bound += 0x10; 951 up_bound += 0x10;
952 } 952 }
@@ -1661,7 +1661,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1661 } 1661 }
1662 1662
1663 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 1663 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1664 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) { 1664 if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
1665 if (lna == 3 || lna == 2) 1665 if (lna == 3 || lna == 2)
1666 offset += 10; 1666 offset += 10;
1667 } else { 1667 } else {
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index fc207b268e4f..a91506b12a62 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1122,7 +1122,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
1122 iounmap(priv->map); 1122 iounmap(priv->map);
1123 1123
1124 err_free_dev: 1124 err_free_dev:
1125 pci_set_drvdata(pdev, NULL);
1126 ieee80211_free_hw(dev); 1125 ieee80211_free_hw(dev);
1127 1126
1128 err_free_reg: 1127 err_free_reg:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 8bb4a9a01a18..9a78e3daf742 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1613,6 +1613,35 @@ err_free:
1613} 1613}
1614EXPORT_SYMBOL(rtl_send_smps_action); 1614EXPORT_SYMBOL(rtl_send_smps_action);
1615 1615
1616void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
1617{
1618 struct rtl_priv *rtlpriv = rtl_priv(hw);
1619 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1620 enum io_type iotype;
1621
1622 if (!is_hal_stop(rtlhal)) {
1623 switch (operation) {
1624 case SCAN_OPT_BACKUP:
1625 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
1626 rtlpriv->cfg->ops->set_hw_reg(hw,
1627 HW_VAR_IO_CMD,
1628 (u8 *)&iotype);
1629 break;
1630 case SCAN_OPT_RESTORE:
1631 iotype = IO_CMD_RESUME_DM_BY_SCAN;
1632 rtlpriv->cfg->ops->set_hw_reg(hw,
1633 HW_VAR_IO_CMD,
1634 (u8 *)&iotype);
1635 break;
1636 default:
1637 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1638 "Unknown Scan Backup operation.\n");
1639 break;
1640 }
1641 }
1642}
1643EXPORT_SYMBOL(rtl_phy_scan_operation_backup);
1644
1616/* There seem to be issues in mac80211 regarding when del ba frames can be 1645/* There seem to be issues in mac80211 regarding when del ba frames can be
1617 * received. As a work around, we make a fake del_ba if we receive a ba_req; 1646 * received. As a work around, we make a fake del_ba if we receive a ba_req;
1618 * however, rx_agg was opened to let mac80211 release some ba related 1647 * however, rx_agg was opened to let mac80211 release some ba related
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 0e5fe0902daf..0cd07420777a 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -114,7 +114,6 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
114void rtl_deinit_rfkill(struct ieee80211_hw *hw); 114void rtl_deinit_rfkill(struct ieee80211_hw *hw);
115 115
116void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb); 116void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
117void rtl_watch_dog_timer_callback(unsigned long data);
118void rtl_deinit_deferred_work(struct ieee80211_hw *hw); 117void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
119 118
120bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 119bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -153,5 +152,6 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
153bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); 152bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
154struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, 153struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
155 u8 *sa, u8 *bssid, u16 tid); 154 u8 *sa, u8 *bssid, u16 tid);
155void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
156 156
157#endif 157#endif
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
index 35e00086a520..0105e6c1901e 100644
--- a/drivers/net/wireless/rtlwifi/cam.h
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -41,12 +41,12 @@
41#define CAM_CONFIG_USEDK 1 41#define CAM_CONFIG_USEDK 1
42#define CAM_CONFIG_NO_USEDK 0 42#define CAM_CONFIG_NO_USEDK 0
43 43
44extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw); 44void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
45extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, 45u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
46 u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, 46 u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
47 u32 ul_default_key, u8 *key_content); 47 u32 ul_default_key, u8 *key_content);
48int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, 48int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
49 u32 ul_key_id); 49 u32 ul_key_id);
50void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index); 50void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
51void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index); 51void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
52void rtl_cam_reset_sec_info(struct ieee80211_hw *hw); 52void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 733b7ce7f0e2..210ce7cd94d8 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -115,7 +115,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
115 mutex_lock(&rtlpriv->locks.conf_mutex); 115 mutex_lock(&rtlpriv->locks.conf_mutex);
116 116
117 mac->link_state = MAC80211_NOLINK; 117 mac->link_state = MAC80211_NOLINK;
118 memset(mac->bssid, 0, 6); 118 memset(mac->bssid, 0, ETH_ALEN);
119 mac->vendor = PEER_UNKNOWN; 119 mac->vendor = PEER_UNKNOWN;
120 120
121 /*reset sec info */ 121 /*reset sec info */
@@ -280,7 +280,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
280 mac->p2p = 0; 280 mac->p2p = 0;
281 mac->vif = NULL; 281 mac->vif = NULL;
282 mac->link_state = MAC80211_NOLINK; 282 mac->link_state = MAC80211_NOLINK;
283 memset(mac->bssid, 0, 6); 283 memset(mac->bssid, 0, ETH_ALEN);
284 mac->vendor = PEER_UNKNOWN; 284 mac->vendor = PEER_UNKNOWN;
285 mac->opmode = NL80211_IFTYPE_UNSPECIFIED; 285 mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
286 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); 286 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -721,7 +721,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
721 mac->link_state = MAC80211_LINKED; 721 mac->link_state = MAC80211_LINKED;
722 mac->cnt_after_linked = 0; 722 mac->cnt_after_linked = 0;
723 mac->assoc_id = bss_conf->aid; 723 mac->assoc_id = bss_conf->aid;
724 memcpy(mac->bssid, bss_conf->bssid, 6); 724 memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
725 725
726 if (rtlpriv->cfg->ops->linked_set_reg) 726 if (rtlpriv->cfg->ops->linked_set_reg)
727 rtlpriv->cfg->ops->linked_set_reg(hw); 727 rtlpriv->cfg->ops->linked_set_reg(hw);
@@ -750,7 +750,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
750 if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) 750 if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
751 rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); 751 rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
752 mac->link_state = MAC80211_NOLINK; 752 mac->link_state = MAC80211_NOLINK;
753 memset(mac->bssid, 0, 6); 753 memset(mac->bssid, 0, ETH_ALEN);
754 mac->vendor = PEER_UNKNOWN; 754 mac->vendor = PEER_UNKNOWN;
755 755
756 if (rtlpriv->dm.supp_phymode_switch) { 756 if (rtlpriv->dm.supp_phymode_switch) {
@@ -826,7 +826,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
826 bss_conf->bssid); 826 bss_conf->bssid);
827 827
828 mac->vendor = PEER_UNKNOWN; 828 mac->vendor = PEER_UNKNOWN;
829 memcpy(mac->bssid, bss_conf->bssid, 6); 829 memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
830 rtlpriv->cfg->ops->set_network_type(hw, vif->type); 830 rtlpriv->cfg->ops->set_network_type(hw, vif->type);
831 831
832 rcu_read_lock(); 832 rcu_read_lock();
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 838a1ed3f194..ae13fb94b2e8 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1203,20 +1203,18 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
1203 1203
1204static u16 efuse_get_current_size(struct ieee80211_hw *hw) 1204static u16 efuse_get_current_size(struct ieee80211_hw *hw)
1205{ 1205{
1206 int continual = true;
1207 u16 efuse_addr = 0; 1206 u16 efuse_addr = 0;
1208 u8 hworden; 1207 u8 hworden;
1209 u8 efuse_data, word_cnts; 1208 u8 efuse_data, word_cnts;
1210 1209
1211 while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) 1210 while (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
1212 && (efuse_addr < EFUSE_MAX_SIZE)) { 1211 efuse_addr < EFUSE_MAX_SIZE) {
1213 if (efuse_data != 0xFF) { 1212 if (efuse_data == 0xFF)
1214 hworden = efuse_data & 0x0F; 1213 break;
1215 word_cnts = efuse_calculate_word_cnts(hworden); 1214
1216 efuse_addr = efuse_addr + (word_cnts * 2) + 1; 1215 hworden = efuse_data & 0x0F;
1217 } else { 1216 word_cnts = efuse_calculate_word_cnts(hworden);
1218 continual = false; 1217 efuse_addr = efuse_addr + (word_cnts * 2) + 1;
1219 }
1220 } 1218 }
1221 1219
1222 return efuse_addr; 1220 return efuse_addr;
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 395a326acfb4..1663b3afd41e 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -104,20 +104,19 @@ struct efuse_priv {
104 u8 tx_power_g[14]; 104 u8 tx_power_g[14];
105}; 105};
106 106
107extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); 107void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
108extern void efuse_initialize(struct ieee80211_hw *hw); 108void efuse_initialize(struct ieee80211_hw *hw);
109extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address); 109u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
110extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value); 110void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
111extern void read_efuse(struct ieee80211_hw *hw, u16 _offset, 111void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf);
112 u16 _size_byte, u8 *pbuf); 112void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset,
113extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, 113 u32 *value);
114 u16 offset, u32 *value); 114void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
115extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, 115 u32 value);
116 u16 offset, u32 value); 116bool efuse_shadow_update(struct ieee80211_hw *hw);
117extern bool efuse_shadow_update(struct ieee80211_hw *hw); 117bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
118extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw); 118void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
119extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); 119void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
120extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); 120void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
121extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
122 121
123#endif 122#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 703f839af6ca..0f494444bcd1 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -736,7 +736,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
736 736
737 struct rtl_stats stats = { 737 struct rtl_stats stats = {
738 .signal = 0, 738 .signal = 0,
739 .noise = -98,
740 .rate = 0, 739 .rate = 0,
741 }; 740 };
742 int index = rtlpci->rx_ring[rx_queue_idx].idx; 741 int index = rtlpci->rx_ring[rx_queue_idx].idx;
@@ -2009,7 +2008,6 @@ fail2:
2009fail1: 2008fail1:
2010 if (hw) 2009 if (hw)
2011 ieee80211_free_hw(hw); 2010 ieee80211_free_hw(hw);
2012 pci_set_drvdata(pdev, NULL);
2013 pci_disable_device(pdev); 2011 pci_disable_device(pdev);
2014 2012
2015 return err; 2013 return err;
@@ -2064,8 +2062,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
2064 2062
2065 rtl_pci_disable_aspm(hw); 2063 rtl_pci_disable_aspm(hw);
2066 2064
2067 pci_set_drvdata(pdev, NULL);
2068
2069 ieee80211_free_hw(hw); 2065 ieee80211_free_hw(hw);
2070} 2066}
2071EXPORT_SYMBOL(rtl_pci_disconnect); 2067EXPORT_SYMBOL(rtl_pci_disconnect);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index b68cae3024fc..e06971be7df7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -143,6 +143,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
143 } else { 143 } else {
144 rtlhal->fw_clk_change_in_progress = false; 144 rtlhal->fw_clk_change_in_progress = false;
145 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); 145 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
146 break;
146 } 147 }
147 } 148 }
148 149
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index e655c0473225..d67f9c731cc4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -1136,34 +1136,6 @@ void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
1136 &bw40_pwr[0], channel); 1136 &bw40_pwr[0], channel);
1137} 1137}
1138 1138
1139void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
1140{
1141 struct rtl_priv *rtlpriv = rtl_priv(hw);
1142 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1143 enum io_type iotype;
1144
1145 if (!is_hal_stop(rtlhal)) {
1146 switch (operation) {
1147 case SCAN_OPT_BACKUP:
1148 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
1149 rtlpriv->cfg->ops->set_hw_reg(hw,
1150 HW_VAR_IO_CMD,
1151 (u8 *)&iotype);
1152 break;
1153 case SCAN_OPT_RESTORE:
1154 iotype = IO_CMD_RESUME_DM_BY_SCAN;
1155 rtlpriv->cfg->ops->set_hw_reg(hw,
1156 HW_VAR_IO_CMD,
1157 (u8 *)&iotype);
1158 break;
1159 default:
1160 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1161 "Unknown Scan Backup operation.\n");
1162 break;
1163 }
1164 }
1165}
1166
1167void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) 1139void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
1168{ 1140{
1169 struct rtl_priv *rtlpriv = rtl_priv(hw); 1141 struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
index f1acd6d27e44..89f0f1ef1465 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
@@ -200,37 +200,35 @@ enum _ANT_DIV_TYPE {
200 CGCS_RX_SW_ANTDIV = 0x05, 200 CGCS_RX_SW_ANTDIV = 0x05,
201}; 201};
202 202
203extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, 203u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
204 u32 regaddr, u32 bitmask); 204 u32 regaddr, u32 bitmask);
205extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw, 205void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
206 u32 regaddr, u32 bitmask, u32 data); 206 u32 regaddr, u32 bitmask, u32 data);
207extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw, 207u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
208 enum radio_path rfpath, u32 regaddr, 208 enum radio_path rfpath, u32 regaddr,
209 u32 bitmask); 209 u32 bitmask);
210extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw, 210void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
211 enum radio_path rfpath, u32 regaddr, 211 enum radio_path rfpath, u32 regaddr,
212 u32 bitmask, u32 data); 212 u32 bitmask, u32 data);
213extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw); 213bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
214extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw); 214bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
215extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw); 215bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
216extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); 216void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
217extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw, 217void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
218 long *powerlevel); 218 long *powerlevel);
219extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); 219void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
220extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, 220void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
221 u8 operation); 221void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
222extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw); 222 enum nl80211_channel_type ch_type);
223extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw, 223void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
224 enum nl80211_channel_type ch_type); 224u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
225extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw); 225void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
226extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
227extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
228void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw); 226void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
229void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); 227void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
230bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 228bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
231 enum radio_path rfpath); 229 enum radio_path rfpath);
232bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 230bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
233extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw, 231bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
234 enum rf_pwrstate rfpwr_state); 232 enum rf_pwrstate rfpwr_state);
235 233
236#endif 234#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index c254693a1e6a..347af1e4f438 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../core.h" 31#include "../core.h"
32#include "../pci.h" 32#include "../pci.h"
33#include "../base.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -244,7 +245,7 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
244 .set_bw_mode = rtl88e_phy_set_bw_mode, 245 .set_bw_mode = rtl88e_phy_set_bw_mode,
245 .switch_channel = rtl88e_phy_sw_chnl, 246 .switch_channel = rtl88e_phy_sw_chnl,
246 .dm_watchdog = rtl88e_dm_watchdog, 247 .dm_watchdog = rtl88e_dm_watchdog,
247 .scan_operation_backup = rtl88e_phy_scan_operation_backup, 248 .scan_operation_backup = rtl_phy_scan_operation_backup,
248 .set_rf_power_state = rtl88e_phy_set_rf_power_state, 249 .set_rf_power_state = rtl88e_phy_set_rf_power_state,
249 .led_control = rtl88ee_led_control, 250 .led_control = rtl88ee_led_control,
250 .set_desc = rtl88ee_set_desc, 251 .set_desc = rtl88ee_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 68685a898257..aece6c9cccf1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -478,7 +478,6 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
478 478
479 /*rx_status->qual = status->signal; */ 479 /*rx_status->qual = status->signal; */
480 rx_status->signal = status->recvsignalpower + 10; 480 rx_status->signal = status->recvsignalpower + 10;
481 /*rx_status->noise = -status->noise; */
482 if (status->packet_report_type == TX_REPORT2) { 481 if (status->packet_report_type == TX_REPORT2) {
483 status->macid_valid_entry[0] = 482 status->macid_valid_entry[0] =
484 GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); 483 GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index d2d57a27a7c1..e9caa5d4cff0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -541,29 +541,6 @@ EXPORT_SYMBOL(rtl92c_dm_write_dig);
541 541
542static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw) 542static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
543{ 543{
544 struct rtl_priv *rtlpriv = rtl_priv(hw);
545 long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
546
547 u8 h2c_parameter[3] = { 0 };
548
549 return;
550
551 if (tmpentry_max_pwdb != 0) {
552 rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
553 } else {
554 rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
555 }
556
557 if (tmpentry_min_pwdb != 0xff) {
558 rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
559 } else {
560 rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
561 }
562
563 h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
564 h2c_parameter[0] = 0;
565
566 rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
567} 544}
568 545
569void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw) 546void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -673,7 +650,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
673 s8 cck_index = 0; 650 s8 cck_index = 0;
674 int i; 651 int i;
675 bool is2t = IS_92C_SERIAL(rtlhal->version); 652 bool is2t = IS_92C_SERIAL(rtlhal->version);
676 s8 txpwr_level[2] = {0, 0}; 653 s8 txpwr_level[3] = {0, 0, 0};
677 u8 ofdm_min_index = 6, rf; 654 u8 ofdm_min_index = 6, rf;
678 655
679 rtlpriv->dm.txpower_trackinginit = true; 656 rtlpriv->dm.txpower_trackinginit = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 246e5352f2e1..0c0e78263a66 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -592,36 +592,6 @@ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
592} 592}
593EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm); 593EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
594 594
595void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
596{
597 struct rtl_priv *rtlpriv = rtl_priv(hw);
598 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
599 enum io_type iotype;
600
601 if (!is_hal_stop(rtlhal)) {
602 switch (operation) {
603 case SCAN_OPT_BACKUP:
604 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
605 rtlpriv->cfg->ops->set_hw_reg(hw,
606 HW_VAR_IO_CMD,
607 (u8 *)&iotype);
608
609 break;
610 case SCAN_OPT_RESTORE:
611 iotype = IO_CMD_RESUME_DM_BY_SCAN;
612 rtlpriv->cfg->ops->set_hw_reg(hw,
613 HW_VAR_IO_CMD,
614 (u8 *)&iotype);
615 break;
616 default:
617 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
618 "Unknown Scan Backup operation\n");
619 break;
620 }
621 }
622}
623EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
624
625void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, 595void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
626 enum nl80211_channel_type ch_type) 596 enum nl80211_channel_type ch_type)
627{ 597{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
index cec10d696492..e79dabe9ba1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -39,9 +39,7 @@
39#define RT_CANNOT_IO(hw) false 39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22 40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41 41
42#define IQK_ADDA_REG_NUM 16
43#define MAX_TOLERANCE 5 42#define MAX_TOLERANCE 5
44#define IQK_DELAY_TIME 1
45 43
46#define APK_BB_REG_NUM 5 44#define APK_BB_REG_NUM 5
47#define APK_AFE_REG_NUM 16 45#define APK_AFE_REG_NUM 16
@@ -205,8 +203,6 @@ void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
205void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); 203void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
206bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, 204bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
207 long power_indbm); 205 long power_indbm);
208void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
209 u8 operation);
210void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, 206void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
211 enum nl80211_channel_type ch_type); 207 enum nl80211_channel_type ch_type);
212void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw); 208void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 3cfa1bb0f476..fa24de43ce79 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -152,8 +152,6 @@ enum version_8192c {
152#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \ 152#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
153 ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) 153 ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
154#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false) 154#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false)
155#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
156 ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
157#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false) 155#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false)
158#define IS_CHIP_VENDOR_UMC(version) \ 156#define IS_CHIP_VENDOR_UMC(version) \
159 ((version & CHIP_VENDOR_UMC) ? true : false) 157 ((version & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index d5e3b704f930..94486cca4000 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -39,9 +39,7 @@
39#define RT_CANNOT_IO(hw) false 39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22 40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41 41
42#define IQK_ADDA_REG_NUM 16
43#define MAX_TOLERANCE 5 42#define MAX_TOLERANCE 5
44#define IQK_DELAY_TIME 1
45 43
46#define APK_BB_REG_NUM 5 44#define APK_BB_REG_NUM 5
47#define APK_AFE_REG_NUM 16 45#define APK_AFE_REG_NUM 16
@@ -188,36 +186,29 @@ struct tx_power_struct {
188}; 186};
189 187
190bool rtl92c_phy_bb_config(struct ieee80211_hw *hw); 188bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
191u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, 189u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
192 u32 regaddr, u32 bitmask); 190void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
193void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, 191 u32 data);
194 u32 regaddr, u32 bitmask, u32 data); 192u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
195u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, 193 u32 regaddr, u32 bitmask);
196 enum radio_path rfpath, u32 regaddr, 194void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
197 u32 bitmask); 195 u32 regaddr, u32 bitmask, u32 data);
198extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
199 enum radio_path rfpath, u32 regaddr,
200 u32 bitmask, u32 data);
201bool rtl92c_phy_mac_config(struct ieee80211_hw *hw); 196bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
202bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw); 197bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
203bool rtl92c_phy_rf_config(struct ieee80211_hw *hw); 198bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
204bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, 199bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
205 enum radio_path rfpath); 200 enum radio_path rfpath);
206void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); 201void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
207void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, 202void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
208 long *powerlevel);
209void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); 203void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
210bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, 204bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
211 long power_indbm); 205 long power_indbm);
212void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
213 u8 operation);
214void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, 206void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
215 enum nl80211_channel_type ch_type); 207 enum nl80211_channel_type ch_type);
216void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw); 208void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
217u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw); 209u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
218void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); 210void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
219void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, 211void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
220 u16 beaconinterval);
221void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); 212void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
222void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); 213void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
223void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t); 214void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
@@ -225,28 +216,25 @@ void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
225bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 216bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
226 enum radio_path rfpath); 217 enum radio_path rfpath);
227bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, 218bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
228 u32 rfpath); 219 u32 rfpath);
229bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
230bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, 220bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
231 enum rf_pwrstate rfpwr_state); 221 enum rf_pwrstate rfpwr_state);
232void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw); 222void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
233bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 223bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
234void rtl92c_phy_set_io(struct ieee80211_hw *hw); 224void rtl92c_phy_set_io(struct ieee80211_hw *hw);
235void rtl92c_bb_block_on(struct ieee80211_hw *hw); 225void rtl92c_bb_block_on(struct ieee80211_hw *hw);
236u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, 226u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
237 enum radio_path rfpath, u32 offset); 227 u32 offset);
238u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, 228u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
239 enum radio_path rfpath, u32 offset); 229 enum radio_path rfpath, u32 offset);
240u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask); 230u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
241void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, 231void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
242 enum radio_path rfpath, u32 offset, 232 enum radio_path rfpath, u32 offset, u32 data);
243 u32 data);
244void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, 233void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
245 enum radio_path rfpath, u32 offset, 234 enum radio_path rfpath, u32 offset,
246 u32 data); 235 u32 data);
247void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, 236void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
248 u32 regaddr, u32 bitmask, 237 u32 regaddr, u32 bitmask, u32 data);
249 u32 data);
250bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); 238bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
251void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); 239void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
252bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw); 240bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index bd4aef74c056..8922ecb47ad2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -560,7 +560,6 @@
560#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 560#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
561#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 561#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
562#define EEPROM_DEFAULT_HT20_DIFF 2 562#define EEPROM_DEFAULT_HT20_DIFF 2
563#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
564#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0 563#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
565#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0 564#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
566 565
@@ -639,17 +638,8 @@
639 638
640#define EEPROM_TXPWR_GROUP 0x6F 639#define EEPROM_TXPWR_GROUP 0x6F
641 640
642#define EEPROM_TSSI_A 0x76
643#define EEPROM_TSSI_B 0x77
644#define EEPROM_THERMAL_METER 0x78
645
646#define EEPROM_CHANNELPLAN 0x75 641#define EEPROM_CHANNELPLAN 0x75
647 642
648#define RF_OPTION1 0x79
649#define RF_OPTION2 0x7A
650#define RF_OPTION3 0x7B
651#define RF_OPTION4 0x7C
652
653#define STOPBECON BIT(6) 643#define STOPBECON BIT(6)
654#define STOPHIGHT BIT(5) 644#define STOPHIGHT BIT(5)
655#define STOPMGT BIT(4) 645#define STOPMGT BIT(4)
@@ -689,13 +679,6 @@
689#define RSV_CTRL 0x001C 679#define RSV_CTRL 0x001C
690#define RD_CTRL 0x0524 680#define RD_CTRL 0x0524
691 681
692#define REG_USB_INFO 0xFE17
693#define REG_USB_SPECIAL_OPTION 0xFE55
694
695#define REG_USB_DMA_AGG_TO 0xFE5B
696#define REG_USB_AGG_TO 0xFE5C
697#define REG_USB_AGG_TH 0xFE5D
698
699#define REG_USB_VID 0xFE60 682#define REG_USB_VID 0xFE60
700#define REG_USB_PID 0xFE62 683#define REG_USB_PID 0xFE62
701#define REG_USB_OPTIONAL 0xFE64 684#define REG_USB_OPTIONAL 0xFE64
@@ -1196,9 +1179,6 @@
1196#define POLLING_LLT_THRESHOLD 20 1179#define POLLING_LLT_THRESHOLD 20
1197#define POLLING_READY_TIMEOUT_COUNT 1000 1180#define POLLING_READY_TIMEOUT_COUNT 1000
1198 1181
1199#define MAX_MSS_DENSITY_2T 0x13
1200#define MAX_MSS_DENSITY_1T 0x0A
1201
1202#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) 1182#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
1203#define EPROM_CMD_CONFIG 0x3 1183#define EPROM_CMD_CONFIG 0x3
1204#define EPROM_CMD_LOAD 1 1184#define EPROM_CMD_LOAD 1
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index 6c8d56efceae..d8fe68b389d2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -34,11 +34,10 @@
34#define RF6052_MAX_REG 0x3F 34#define RF6052_MAX_REG 0x3F
35#define RF6052_MAX_PATH 2 35#define RF6052_MAX_PATH 2
36 36
37extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, 37void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
38 u8 bandwidth); 38void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
39extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 39 u8 *ppowerlevel);
40 u8 *ppowerlevel); 40void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
41extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 41 u8 *ppowerlevel, u8 channel);
42 u8 *ppowerlevel, u8 channel); 42bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
43extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
44#endif 43#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 14203561b6ee..b790320d2030 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../core.h" 31#include "../core.h"
32#include "../pci.h" 32#include "../pci.h"
33#include "../base.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -219,7 +220,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
219 .set_bw_mode = rtl92c_phy_set_bw_mode, 220 .set_bw_mode = rtl92c_phy_set_bw_mode,
220 .switch_channel = rtl92c_phy_sw_chnl, 221 .switch_channel = rtl92c_phy_sw_chnl,
221 .dm_watchdog = rtl92c_dm_watchdog, 222 .dm_watchdog = rtl92c_dm_watchdog,
222 .scan_operation_backup = rtl92c_phy_scan_operation_backup, 223 .scan_operation_backup = rtl_phy_scan_operation_backup,
223 .set_rf_power_state = rtl92c_phy_set_rf_power_state, 224 .set_rf_power_state = rtl92c_phy_set_rf_power_state,
224 .led_control = rtl92ce_led_control, 225 .led_control = rtl92ce_led_control,
225 .set_desc = rtl92ce_set_desc, 226 .set_desc = rtl92ce_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 6ad23b413eb3..52abf0a862fa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -420,7 +420,6 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
420 420
421 /*rx_status->qual = stats->signal; */ 421 /*rx_status->qual = stats->signal; */
422 rx_status->signal = stats->recvsignalpower + 10; 422 rx_status->signal = stats->recvsignalpower + 10;
423 /*rx_status->noise = -stats->noise; */
424 423
425 return true; 424 return true;
426} 425}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index da4f587199ee..393685390f3e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -32,6 +32,7 @@
32#include "../usb.h" 32#include "../usb.h"
33#include "../ps.h" 33#include "../ps.h"
34#include "../cam.h" 34#include "../cam.h"
35#include "../stats.h"
35#include "reg.h" 36#include "reg.h"
36#include "def.h" 37#include "def.h"
37#include "phy.h" 38#include "phy.h"
@@ -738,16 +739,6 @@ static u8 _rtl92c_evm_db_to_percentage(char value)
738 return ret_val; 739 return ret_val;
739} 740}
740 741
741static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
742 u8 signal_strength_index)
743{
744 long signal_power;
745
746 signal_power = (long)((signal_strength_index + 1) >> 1);
747 signal_power -= 95;
748 return signal_power;
749}
750
751static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw, 742static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
752 long currsig) 743 long currsig)
753{ 744{
@@ -913,180 +904,6 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
913 (hw, total_rssi /= rf_rx_num)); 904 (hw, total_rssi /= rf_rx_num));
914} 905}
915 906
916static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
917 struct rtl_stats *pstats)
918{
919 struct rtl_priv *rtlpriv = rtl_priv(hw);
920 struct rtl_phy *rtlphy = &(rtlpriv->phy);
921 u8 rfpath;
922 u32 last_rssi, tmpval;
923
924 if (pstats->packet_toself || pstats->packet_beacon) {
925 rtlpriv->stats.rssi_calculate_cnt++;
926 if (rtlpriv->stats.ui_rssi.total_num++ >=
927 PHY_RSSI_SLID_WIN_MAX) {
928 rtlpriv->stats.ui_rssi.total_num =
929 PHY_RSSI_SLID_WIN_MAX;
930 last_rssi =
931 rtlpriv->stats.ui_rssi.elements[rtlpriv->
932 stats.ui_rssi.index];
933 rtlpriv->stats.ui_rssi.total_val -= last_rssi;
934 }
935 rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
936 rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
937 index++] = pstats->signalstrength;
938 if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
939 rtlpriv->stats.ui_rssi.index = 0;
940 tmpval = rtlpriv->stats.ui_rssi.total_val /
941 rtlpriv->stats.ui_rssi.total_num;
942 rtlpriv->stats.signal_strength =
943 _rtl92c_translate_todbm(hw, (u8) tmpval);
944 pstats->rssi = rtlpriv->stats.signal_strength;
945 }
946 if (!pstats->is_cck && pstats->packet_toself) {
947 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
948 rfpath++) {
949 if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
950 continue;
951 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
952 rtlpriv->stats.rx_rssi_percentage[rfpath] =
953 pstats->rx_mimo_signalstrength[rfpath];
954 }
955 if (pstats->rx_mimo_signalstrength[rfpath] >
956 rtlpriv->stats.rx_rssi_percentage[rfpath]) {
957 rtlpriv->stats.rx_rssi_percentage[rfpath] =
958 ((rtlpriv->stats.
959 rx_rssi_percentage[rfpath] *
960 (RX_SMOOTH_FACTOR - 1)) +
961 (pstats->rx_mimo_signalstrength[rfpath])) /
962 (RX_SMOOTH_FACTOR);
963
964 rtlpriv->stats.rx_rssi_percentage[rfpath] =
965 rtlpriv->stats.rx_rssi_percentage[rfpath] +
966 1;
967 } else {
968 rtlpriv->stats.rx_rssi_percentage[rfpath] =
969 ((rtlpriv->stats.
970 rx_rssi_percentage[rfpath] *
971 (RX_SMOOTH_FACTOR - 1)) +
972 (pstats->rx_mimo_signalstrength[rfpath])) /
973 (RX_SMOOTH_FACTOR);
974 }
975 }
976 }
977}
978
979static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
980 struct rtl_stats *pstats)
981{
982 struct rtl_priv *rtlpriv = rtl_priv(hw);
983 int weighting = 0;
984
985 if (rtlpriv->stats.recv_signal_power == 0)
986 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
987 if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
988 weighting = 5;
989 else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
990 weighting = (-5);
991 rtlpriv->stats.recv_signal_power =
992 (rtlpriv->stats.recv_signal_power * 5 +
993 pstats->recvsignalpower + weighting) / 6;
994}
995
996static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
997 struct rtl_stats *pstats)
998{
999 struct rtl_priv *rtlpriv = rtl_priv(hw);
1000 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1001 long undec_sm_pwdb = 0;
1002
1003 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1004 return;
1005 } else {
1006 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
1007 }
1008 if (pstats->packet_toself || pstats->packet_beacon) {
1009 if (undec_sm_pwdb < 0)
1010 undec_sm_pwdb = pstats->rx_pwdb_all;
1011 if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
1012 undec_sm_pwdb = (((undec_sm_pwdb) *
1013 (RX_SMOOTH_FACTOR - 1)) +
1014 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
1015 undec_sm_pwdb += 1;
1016 } else {
1017 undec_sm_pwdb = (((undec_sm_pwdb) *
1018 (RX_SMOOTH_FACTOR - 1)) +
1019 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
1020 }
1021 rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
1022 _rtl92c_update_rxsignalstatistics(hw, pstats);
1023 }
1024}
1025
1026static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
1027 struct rtl_stats *pstats)
1028{
1029 struct rtl_priv *rtlpriv = rtl_priv(hw);
1030 u32 last_evm = 0, n_stream, tmpval;
1031
1032 if (pstats->signalquality != 0) {
1033 if (pstats->packet_toself || pstats->packet_beacon) {
1034 if (rtlpriv->stats.LINK_Q.total_num++ >=
1035 PHY_LINKQUALITY_SLID_WIN_MAX) {
1036 rtlpriv->stats.LINK_Q.total_num =
1037 PHY_LINKQUALITY_SLID_WIN_MAX;
1038 last_evm =
1039 rtlpriv->stats.LINK_Q.elements
1040 [rtlpriv->stats.LINK_Q.index];
1041 rtlpriv->stats.LINK_Q.total_val -=
1042 last_evm;
1043 }
1044 rtlpriv->stats.LINK_Q.total_val +=
1045 pstats->signalquality;
1046 rtlpriv->stats.LINK_Q.elements
1047 [rtlpriv->stats.LINK_Q.index++] =
1048 pstats->signalquality;
1049 if (rtlpriv->stats.LINK_Q.index >=
1050 PHY_LINKQUALITY_SLID_WIN_MAX)
1051 rtlpriv->stats.LINK_Q.index = 0;
1052 tmpval = rtlpriv->stats.LINK_Q.total_val /
1053 rtlpriv->stats.LINK_Q.total_num;
1054 rtlpriv->stats.signal_quality = tmpval;
1055 rtlpriv->stats.last_sigstrength_inpercent = tmpval;
1056 for (n_stream = 0; n_stream < 2;
1057 n_stream++) {
1058 if (pstats->RX_SIGQ[n_stream] != -1) {
1059 if (!rtlpriv->stats.RX_EVM[n_stream]) {
1060 rtlpriv->stats.RX_EVM[n_stream]
1061 = pstats->RX_SIGQ[n_stream];
1062 }
1063 rtlpriv->stats.RX_EVM[n_stream] =
1064 ((rtlpriv->stats.RX_EVM
1065 [n_stream] *
1066 (RX_SMOOTH_FACTOR - 1)) +
1067 (pstats->RX_SIGQ
1068 [n_stream] * 1)) /
1069 (RX_SMOOTH_FACTOR);
1070 }
1071 }
1072 }
1073 } else {
1074 ;
1075 }
1076}
1077
1078static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
1079 u8 *buffer,
1080 struct rtl_stats *pcurrent_stats)
1081{
1082 if (!pcurrent_stats->packet_matchbssid &&
1083 !pcurrent_stats->packet_beacon)
1084 return;
1085 _rtl92c_process_ui_rssi(hw, pcurrent_stats);
1086 _rtl92c_process_pwdb(hw, pcurrent_stats);
1087 _rtl92c_process_LINK_Q(hw, pcurrent_stats);
1088}
1089
1090void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw, 907void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
1091 struct sk_buff *skb, 908 struct sk_buff *skb,
1092 struct rtl_stats *pstats, 909 struct rtl_stats *pstats,
@@ -1123,5 +940,5 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
1123 _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, 940 _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
1124 packet_matchbssid, packet_toself, 941 packet_matchbssid, packet_toself,
1125 packet_beacon); 942 packet_beacon);
1126 _rtl92c_process_phyinfo(hw, tmp_buf, pstats); 943 rtl_process_phyinfo(hw, tmp_buf, pstats);
1127} 944}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index 090fd33a158d..11b439d6b671 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -34,15 +34,14 @@
34#define RF6052_MAX_REG 0x3F 34#define RF6052_MAX_REG 0x3F
35#define RF6052_MAX_PATH 2 35#define RF6052_MAX_PATH 2
36 36
37extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, 37void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
38 u8 bandwidth); 38void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
39extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 39 u8 *ppowerlevel);
40 u8 *ppowerlevel); 40void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 41 u8 *ppowerlevel, u8 channel);
42 u8 *ppowerlevel, u8 channel);
43bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw); 42bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
44bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 43bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
45 enum radio_path rfpath); 44 enum radio_path rfpath);
46void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 45void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
47 u8 *ppowerlevel); 46 u8 *ppowerlevel);
48void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 47void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 2bd598526217..9936de716ad5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -31,6 +31,7 @@
31#include "../core.h" 31#include "../core.h"
32#include "../usb.h" 32#include "../usb.h"
33#include "../efuse.h" 33#include "../efuse.h"
34#include "../base.h"
34#include "reg.h" 35#include "reg.h"
35#include "def.h" 36#include "def.h"
36#include "phy.h" 37#include "phy.h"
@@ -117,7 +118,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
117 .set_bw_mode = rtl92c_phy_set_bw_mode, 118 .set_bw_mode = rtl92c_phy_set_bw_mode,
118 .switch_channel = rtl92c_phy_sw_chnl, 119 .switch_channel = rtl92c_phy_sw_chnl,
119 .dm_watchdog = rtl92c_dm_watchdog, 120 .dm_watchdog = rtl92c_dm_watchdog,
120 .scan_operation_backup = rtl92c_phy_scan_operation_backup, 121 .scan_operation_backup = rtl_phy_scan_operation_backup,
121 .set_rf_power_state = rtl92cu_phy_set_rf_power_state, 122 .set_rf_power_state = rtl92cu_phy_set_rf_power_state,
122 .led_control = rtl92cu_led_control, 123 .led_control = rtl92cu_led_control,
123 .enable_hw_sec = rtl92cu_enable_hw_security_config, 124 .enable_hw_sec = rtl92cu_enable_hw_security_config,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 5a060e537fbe..25e50ffc44ec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -350,7 +350,6 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
350 } 350 }
351 /*rx_status->qual = stats->signal; */ 351 /*rx_status->qual = stats->signal; */
352 rx_status->signal = stats->rssi + 10; 352 rx_status->signal = stats->rssi + 10;
353 /*rx_status->noise = -stats->noise; */
354 return true; 353 return true;
355} 354}
356 355
@@ -365,7 +364,6 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
365 u8 *rxdesc; 364 u8 *rxdesc;
366 struct rtl_stats stats = { 365 struct rtl_stats stats = {
367 .signal = 0, 366 .signal = 0,
368 .noise = -98,
369 .rate = 0, 367 .rate = 0,
370 }; 368 };
371 struct rx_fwinfo_92c *p_drvinfo; 369 struct rx_fwinfo_92c *p_drvinfo;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index f700f7a614b2..7908e1c85819 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -840,9 +840,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
840 bool internal_pa = false; 840 bool internal_pa = false;
841 long ele_a = 0, ele_d, temp_cck, val_x, value32; 841 long ele_a = 0, ele_d, temp_cck, val_x, value32;
842 long val_y, ele_c = 0; 842 long val_y, ele_c = 0;
843 u8 ofdm_index[2]; 843 u8 ofdm_index[3];
844 s8 cck_index = 0; 844 s8 cck_index = 0;
845 u8 ofdm_index_old[2] = {0, 0}; 845 u8 ofdm_index_old[3] = {0, 0, 0};
846 s8 cck_index_old = 0; 846 s8 cck_index_old = 0;
847 u8 index; 847 u8 index;
848 int i; 848 int i;
@@ -1118,6 +1118,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
1118 val_x, val_y, ele_a, ele_c, ele_d, 1118 val_x, val_y, ele_a, ele_c, ele_d,
1119 val_x, val_y); 1119 val_x, val_y);
1120 1120
1121 if (cck_index >= CCK_TABLE_SIZE)
1122 cck_index = CCK_TABLE_SIZE - 1;
1123 if (cck_index < 0)
1124 cck_index = 0;
1121 if (rtlhal->current_bandtype == BAND_ON_2_4G) { 1125 if (rtlhal->current_bandtype == BAND_ON_2_4G) {
1122 /* Adjust CCK according to IQK result */ 1126 /* Adjust CCK according to IQK result */
1123 if (!rtlpriv->dm.cck_inch14) { 1127 if (!rtlpriv->dm.cck_inch14) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 7dd8f6de0550..c4a7db9135d6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1194,25 +1194,7 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
1194 * mac80211 will send pkt when scan */ 1194 * mac80211 will send pkt when scan */
1195void rtl92de_set_qos(struct ieee80211_hw *hw, int aci) 1195void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
1196{ 1196{
1197 struct rtl_priv *rtlpriv = rtl_priv(hw);
1198 rtl92d_dm_init_edca_turbo(hw); 1197 rtl92d_dm_init_edca_turbo(hw);
1199 return;
1200 switch (aci) {
1201 case AC1_BK:
1202 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
1203 break;
1204 case AC0_BE:
1205 break;
1206 case AC2_VI:
1207 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
1208 break;
1209 case AC3_VO:
1210 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
1211 break;
1212 default:
1213 RT_ASSERT(false, "invalid aci: %d !\n", aci);
1214 break;
1215 }
1216} 1198}
1217 1199
1218void rtl92de_enable_interrupt(struct ieee80211_hw *hw) 1200void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
index 7c9f7a2f1e42..1bc7b1a96d4a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
@@ -55,10 +55,9 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
55 u8 *p_macaddr, bool is_group, u8 enc_algo, 55 u8 *p_macaddr, bool is_group, u8 enc_algo,
56 bool is_wepkey, bool clear_all); 56 bool is_wepkey, bool clear_all);
57 57
58extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, 58void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
59 u32 value, u8 direct); 59 u8 direct);
60extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, 60u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct);
61 u8 direct);
62void rtl92de_suspend(struct ieee80211_hw *hw); 61void rtl92de_suspend(struct ieee80211_hw *hw);
63void rtl92de_resume(struct ieee80211_hw *hw); 62void rtl92de_resume(struct ieee80211_hw *hw);
64void rtl92d_linked_set_reg(struct ieee80211_hw *hw); 63void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 840bac5fa2f8..13196cc4b1d3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1022,34 +1022,6 @@ void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
1022 rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel); 1022 rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
1023} 1023}
1024 1024
1025void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
1026{
1027 struct rtl_priv *rtlpriv = rtl_priv(hw);
1028 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1029 enum io_type iotype;
1030
1031 if (!is_hal_stop(rtlhal)) {
1032 switch (operation) {
1033 case SCAN_OPT_BACKUP:
1034 rtlhal->current_bandtypebackup =
1035 rtlhal->current_bandtype;
1036 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
1037 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
1038 (u8 *)&iotype);
1039 break;
1040 case SCAN_OPT_RESTORE:
1041 iotype = IO_CMD_RESUME_DM_BY_SCAN;
1042 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
1043 (u8 *)&iotype);
1044 break;
1045 default:
1046 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1047 "Unknown Scan Backup operation\n");
1048 break;
1049 }
1050 }
1051}
1052
1053void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw, 1025void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
1054 enum nl80211_channel_type ch_type) 1026 enum nl80211_channel_type ch_type)
1055{ 1027{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
index f074952bf25c..48d5c6835b6a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
@@ -39,9 +39,7 @@
39#define RT_CANNOT_IO(hw) false 39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22 40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41 41
42#define IQK_ADDA_REG_NUM 16
43#define MAX_TOLERANCE 5 42#define MAX_TOLERANCE 5
44#define IQK_DELAY_TIME 1
45 43
46#define APK_BB_REG_NUM 5 44#define APK_BB_REG_NUM 5
47#define APK_AFE_REG_NUM 16 45#define APK_AFE_REG_NUM 16
@@ -127,34 +125,32 @@ static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
127 *flag); 125 *flag);
128} 126}
129 127
130extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, 128u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
131 u32 regaddr, u32 bitmask); 129 u32 regaddr, u32 bitmask);
132extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw, 130void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
133 u32 regaddr, u32 bitmask, u32 data); 131 u32 regaddr, u32 bitmask, u32 data);
134extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, 132u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
135 enum radio_path rfpath, u32 regaddr, 133 enum radio_path rfpath, u32 regaddr,
136 u32 bitmask); 134 u32 bitmask);
137extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, 135void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
138 enum radio_path rfpath, u32 regaddr, 136 enum radio_path rfpath, u32 regaddr,
139 u32 bitmask, u32 data); 137 u32 bitmask, u32 data);
140extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw); 138bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
141extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw); 139bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
142extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw); 140bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
143extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, 141bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
144 enum radio_path rfpath); 142 enum radio_path rfpath);
145extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); 143void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
146extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); 144void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
147extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, 145void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
148 u8 operation); 146 enum nl80211_channel_type ch_type);
149extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw, 147u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
150 enum nl80211_channel_type ch_type);
151extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
152bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 148bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
153 enum rf_content content, 149 enum rf_content content,
154 enum radio_path rfpath); 150 enum radio_path rfpath);
155bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 151bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
156extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw, 152bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
157 enum rf_pwrstate rfpwr_state); 153 enum rf_pwrstate rfpwr_state);
158 154
159void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw); 155void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
160void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw); 156void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
@@ -173,6 +169,5 @@ void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
173 unsigned long *flag); 169 unsigned long *flag);
174u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl); 170u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
175void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel); 171void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
176void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
177 172
178#endif 173#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
index 0fe1a48593e8..7303d12c266f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
@@ -30,15 +30,13 @@
30#ifndef __RTL92D_RF_H__ 30#ifndef __RTL92D_RF_H__
31#define __RTL92D_RF_H__ 31#define __RTL92D_RF_H__
32 32
33extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, 33void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
34 u8 bandwidth); 34void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
35extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 35 u8 *ppowerlevel);
36 u8 *ppowerlevel); 36void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
37extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 37 u8 *ppowerlevel, u8 channel);
38 u8 *ppowerlevel, u8 channel); 38bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
39extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw); 39bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
40extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0); 40void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
41extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
42 bool bmac0);
43 41
44#endif 42#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index c18c04bf0c13..edab5a5351b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../core.h" 31#include "../core.h"
32#include "../pci.h" 32#include "../pci.h"
33#include "../base.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -236,7 +237,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
236 .set_bw_mode = rtl92d_phy_set_bw_mode, 237 .set_bw_mode = rtl92d_phy_set_bw_mode,
237 .switch_channel = rtl92d_phy_sw_chnl, 238 .switch_channel = rtl92d_phy_sw_chnl,
238 .dm_watchdog = rtl92d_dm_watchdog, 239 .dm_watchdog = rtl92d_dm_watchdog,
239 .scan_operation_backup = rtl92d_phy_scan_operation_backup, 240 .scan_operation_backup = rtl_phy_scan_operation_backup,
240 .set_rf_power_state = rtl92d_phy_set_rf_power_state, 241 .set_rf_power_state = rtl92d_phy_set_rf_power_state,
241 .led_control = rtl92de_led_control, 242 .led_control = rtl92de_led_control,
242 .set_desc = rtl92de_set_desc, 243 .set_desc = rtl92de_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index b8ec718a0fab..945ddecf90c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -526,7 +526,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
526 } 526 }
527 /*rx_status->qual = stats->signal; */ 527 /*rx_status->qual = stats->signal; */
528 rx_status->signal = stats->rssi + 10; 528 rx_status->signal = stats->rssi + 10;
529 /*rx_status->noise = -stats->noise; */
530 return true; 529 return true;
531} 530}
532 531
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index 84d1181795b8..c81c83591940 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -425,14 +425,9 @@
425#define EXT_IMEM_CODE_DONE BIT(2) 425#define EXT_IMEM_CODE_DONE BIT(2)
426#define IMEM_CHK_RPT BIT(1) 426#define IMEM_CHK_RPT BIT(1)
427#define IMEM_CODE_DONE BIT(0) 427#define IMEM_CODE_DONE BIT(0)
428#define IMEM_CODE_DONE BIT(0)
429#define IMEM_CHK_RPT BIT(1)
430#define EMEM_CODE_DONE BIT(2) 428#define EMEM_CODE_DONE BIT(2)
431#define EMEM_CHK_RPT BIT(3) 429#define EMEM_CHK_RPT BIT(3)
432#define DMEM_CODE_DONE BIT(4)
433#define IMEM_RDY BIT(5) 430#define IMEM_RDY BIT(5)
434#define BASECHG BIT(6)
435#define FWRDY BIT(7)
436#define LOAD_FW_READY (IMEM_CODE_DONE | \ 431#define LOAD_FW_READY (IMEM_CODE_DONE | \
437 IMEM_CHK_RPT | \ 432 IMEM_CHK_RPT | \
438 EMEM_CODE_DONE | \ 433 EMEM_CODE_DONE | \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index c7095118de6e..222d2e792ca6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -330,7 +330,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
330 330
331 /*rx_status->qual = stats->signal; */ 331 /*rx_status->qual = stats->signal; */
332 rx_status->signal = stats->rssi + 10; 332 rx_status->signal = stats->rssi + 10;
333 /*rx_status->noise = -stats->noise; */
334 333
335 return true; 334 return true;
336} 335}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index eafbb18dd48e..5d318a85eda4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -934,35 +934,6 @@ static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
934 return pwrout_dbm; 934 return pwrout_dbm;
935} 935}
936 936
937void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
938{
939 struct rtl_priv *rtlpriv = rtl_priv(hw);
940 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
941 enum io_type iotype;
942
943 if (!is_hal_stop(rtlhal)) {
944 switch (operation) {
945 case SCAN_OPT_BACKUP:
946 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
947 rtlpriv->cfg->ops->set_hw_reg(hw,
948 HW_VAR_IO_CMD,
949 (u8 *)&iotype);
950
951 break;
952 case SCAN_OPT_RESTORE:
953 iotype = IO_CMD_RESUME_DM_BY_SCAN;
954 rtlpriv->cfg->ops->set_hw_reg(hw,
955 HW_VAR_IO_CMD,
956 (u8 *)&iotype);
957 break;
958 default:
959 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
960 "Unknown Scan Backup operation.\n");
961 break;
962 }
963 }
964}
965
966void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) 937void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
967{ 938{
968 struct rtl_priv *rtlpriv = rtl_priv(hw); 939 struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index e7a59eba351a..007ebdbbe108 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -183,42 +183,40 @@ struct tx_power_struct {
183 u32 mcs_original_offset[4][16]; 183 u32 mcs_original_offset[4][16];
184}; 184};
185 185
186extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, 186u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
187 u32 regaddr, u32 bitmask); 187 u32 regaddr, u32 bitmask);
188extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw, 188void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
189 u32 regaddr, u32 bitmask, u32 data); 189 u32 regaddr, u32 bitmask, u32 data);
190extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, 190u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
191 enum radio_path rfpath, u32 regaddr, 191 enum radio_path rfpath, u32 regaddr,
192 u32 bitmask); 192 u32 bitmask);
193extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw, 193void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
194 enum radio_path rfpath, u32 regaddr, 194 enum radio_path rfpath, u32 regaddr,
195 u32 bitmask, u32 data); 195 u32 bitmask, u32 data);
196extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw); 196bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
197extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw); 197bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
198extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw); 198bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
199extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, 199bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
200 enum radio_path rfpath); 200 enum radio_path rfpath);
201extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); 201void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
202extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, 202void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
203 long *powerlevel); 203 long *powerlevel);
204extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw, 204void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
205 u8 channel); 205 u8 channel);
206extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw, 206bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
207 long power_indbm); 207 long power_indbm);
208extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, 208void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
209 u8 operation); 209void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
210extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw); 210 enum nl80211_channel_type ch_type);
211extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw, 211void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
212 enum nl80211_channel_type ch_type); 212u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
213extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw); 213void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
214extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
215extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
216void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw); 214void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
217void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); 215void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
218bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 216bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
219 enum radio_path rfpath); 217 enum radio_path rfpath);
220bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 218bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
221extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw, 219bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
222 enum rf_pwrstate rfpwr_state); 220 enum rf_pwrstate rfpwr_state);
223 221
224#endif 222#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
index d0f9dd79abea..57f1933ee663 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
@@ -32,12 +32,11 @@
32 32
33#define RF6052_MAX_TX_PWR 0x3F 33#define RF6052_MAX_TX_PWR 0x3F
34 34
35extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, 35void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
36 u8 bandwidth); 36void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
37extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 37 u8 *ppowerlevel);
38 u8 *ppowerlevel); 38void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
39extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 39 u8 *ppowerlevel, u8 channel);
40 u8 *ppowerlevel, u8 channel); 40bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
41extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
42 41
43#endif 42#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index d9ee2efffe5f..62b204faf773 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -33,6 +33,7 @@
33 33
34#include "../core.h" 34#include "../core.h"
35#include "../pci.h" 35#include "../pci.h"
36#include "../base.h"
36#include "reg.h" 37#include "reg.h"
37#include "def.h" 38#include "def.h"
38#include "phy.h" 39#include "phy.h"
@@ -220,7 +221,7 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
220 .set_bw_mode = rtl8723ae_phy_set_bw_mode, 221 .set_bw_mode = rtl8723ae_phy_set_bw_mode,
221 .switch_channel = rtl8723ae_phy_sw_chnl, 222 .switch_channel = rtl8723ae_phy_sw_chnl,
222 .dm_watchdog = rtl8723ae_dm_watchdog, 223 .dm_watchdog = rtl8723ae_dm_watchdog,
223 .scan_operation_backup = rtl8723ae_phy_scan_operation_backup, 224 .scan_operation_backup = rtl_phy_scan_operation_backup,
224 .set_rf_power_state = rtl8723ae_phy_set_rf_power_state, 225 .set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
225 .led_control = rtl8723ae_led_control, 226 .led_control = rtl8723ae_led_control,
226 .set_desc = rtl8723ae_set_desc, 227 .set_desc = rtl8723ae_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index bcd82a1020a5..50b7be3f3a60 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -359,7 +359,6 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
359 359
360 /*rx_status->qual = status->signal; */ 360 /*rx_status->qual = status->signal; */
361 rx_status->signal = status->recvsignalpower + 10; 361 rx_status->signal = status->recvsignalpower + 10;
362 /*rx_status->noise = -status->noise; */
363 362
364 return true; 363 return true;
365} 364}
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index e56778cac9bf..6e2b5c5c83c8 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -455,7 +455,6 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
455 struct ieee80211_rx_status rx_status = {0}; 455 struct ieee80211_rx_status rx_status = {0};
456 struct rtl_stats stats = { 456 struct rtl_stats stats = {
457 .signal = 0, 457 .signal = 0,
458 .noise = -98,
459 .rate = 0, 458 .rate = 0,
460 }; 459 };
461 460
@@ -498,7 +497,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
498 struct ieee80211_rx_status rx_status = {0}; 497 struct ieee80211_rx_status rx_status = {0};
499 struct rtl_stats stats = { 498 struct rtl_stats stats = {
500 .signal = 0, 499 .signal = 0,
501 .noise = -98,
502 .rate = 0, 500 .rate = 0,
503 }; 501 };
504 502
@@ -582,12 +580,15 @@ static void _rtl_rx_work(unsigned long param)
582static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr, 580static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
583 unsigned int len) 581 unsigned int len)
584{ 582{
583#if NET_IP_ALIGN != 0
585 unsigned int padding = 0; 584 unsigned int padding = 0;
585#endif
586 586
587 /* make function no-op when possible */ 587 /* make function no-op when possible */
588 if (NET_IP_ALIGN == 0 || len < sizeof(*hdr)) 588 if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
589 return 0; 589 return 0;
590 590
591#if NET_IP_ALIGN != 0
591 /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */ 592 /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
592 /* TODO: deduplicate common code, define helper function instead? */ 593 /* TODO: deduplicate common code, define helper function instead? */
593 594
@@ -608,6 +609,7 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
608 padding ^= NET_IP_ALIGN; 609 padding ^= NET_IP_ALIGN;
609 610
610 return padding; 611 return padding;
612#endif
611} 613}
612 614
613#define __RADIO_TAP_SIZE_RSV 32 615#define __RADIO_TAP_SIZE_RSV 32
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 703258742d28..d224dc3bb092 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -192,8 +192,6 @@ enum hardware_type {
192(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal)) 192(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
193#define IS_HARDWARE_TYPE_8723(rtlhal) \ 193#define IS_HARDWARE_TYPE_8723(rtlhal) \
194(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal)) 194(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
195#define IS_HARDWARE_TYPE_8723U(rtlhal) \
196 (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
197 195
198#define RX_HAL_IS_CCK_RATE(_pdesc)\ 196#define RX_HAL_IS_CCK_RATE(_pdesc)\
199 (_pdesc->rxmcs == DESC92_RATE1M || \ 197 (_pdesc->rxmcs == DESC92_RATE1M || \
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index c7dc6feab2ff..1342f81e683d 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -243,7 +243,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
243 struct wl1251 *wl; 243 struct wl1251 *wl;
244 int ret; 244 int ret;
245 245
246 pdata = spi->dev.platform_data; 246 pdata = dev_get_platdata(&spi->dev);
247 if (!pdata) { 247 if (!pdata) {
248 wl1251_error("no platform data"); 248 wl1251_error("no platform data");
249 return -ENODEV; 249 return -ENODEV;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index fd02060038de..2c3bd1bff3f6 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -424,8 +424,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
424#define CHIP_ID_1271_PG10 (0x4030101) 424#define CHIP_ID_1271_PG10 (0x4030101)
425#define CHIP_ID_1271_PG20 (0x4030111) 425#define CHIP_ID_1271_PG20 (0x4030111)
426 426
427#define WL1251_FW_NAME "wl1251-fw.bin" 427#define WL1251_FW_NAME "ti-connectivity/wl1251-fw.bin"
428#define WL1251_NVS_NAME "wl1251-nvs.bin" 428#define WL1251_NVS_NAME "ti-connectivity/wl1251-nvs.bin"
429 429
430#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */ 430#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
431 431
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 1c627da85083..591526b99154 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1704,7 +1704,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
1704static int wl12xx_setup(struct wl1271 *wl) 1704static int wl12xx_setup(struct wl1271 *wl)
1705{ 1705{
1706 struct wl12xx_priv *priv = wl->priv; 1706 struct wl12xx_priv *priv = wl->priv;
1707 struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data; 1707 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
1708 struct wl12xx_platform_data *pdata = pdev_data->pdata; 1708 struct wl12xx_platform_data *pdata = pdev_data->pdata;
1709 1709
1710 wl->rtable = wl12xx_rtable; 1710 wl->rtable = wl12xx_rtable;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 7aa0eb848c5a..d0daca1d23bc 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -623,6 +623,18 @@ static const int wl18xx_rtable[REG_TABLE_LEN] = {
623 [REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR, 623 [REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
624}; 624};
625 625
626static const struct wl18xx_clk_cfg wl18xx_clk_table_coex[NUM_CLOCK_CONFIGS] = {
627 [CLOCK_CONFIG_16_2_M] = { 8, 121, 0, 0, false },
628 [CLOCK_CONFIG_16_368_M] = { 8, 120, 0, 0, false },
629 [CLOCK_CONFIG_16_8_M] = { 8, 117, 0, 0, false },
630 [CLOCK_CONFIG_19_2_M] = { 10, 128, 0, 0, false },
631 [CLOCK_CONFIG_26_M] = { 11, 104, 0, 0, false },
632 [CLOCK_CONFIG_32_736_M] = { 8, 120, 0, 0, false },
633 [CLOCK_CONFIG_33_6_M] = { 8, 117, 0, 0, false },
634 [CLOCK_CONFIG_38_468_M] = { 10, 128, 0, 0, false },
635 [CLOCK_CONFIG_52_M] = { 11, 104, 0, 0, false },
636};
637
626static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = { 638static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
627 [CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true }, 639 [CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
628 [CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true }, 640 [CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
@@ -704,6 +716,23 @@ static int wl18xx_set_clk(struct wl1271 *wl)
704 wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q, 716 wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
705 wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit"); 717 wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
706 718
719 /* coex PLL configuration */
720 ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_N,
721 wl18xx_clk_table_coex[clk_freq].n);
722 if (ret < 0)
723 goto out;
724
725 ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_M,
726 wl18xx_clk_table_coex[clk_freq].m);
727 if (ret < 0)
728 goto out;
729
730 /* bypass the swallowing logic */
731 ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
732 PLLSH_COEX_PLL_SWALLOW_EN_VAL1);
733 if (ret < 0)
734 goto out;
735
707 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N, 736 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
708 wl18xx_clk_table[clk_freq].n); 737 wl18xx_clk_table[clk_freq].n);
709 if (ret < 0) 738 if (ret < 0)
@@ -745,6 +774,30 @@ static int wl18xx_set_clk(struct wl1271 *wl)
745 PLLSH_WCS_PLL_SWALLOW_EN_VAL2); 774 PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
746 } 775 }
747 776
777 /* choose WCS PLL */
778 ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_SEL,
779 PLLSH_WL_PLL_SEL_WCS_PLL);
780 if (ret < 0)
781 goto out;
782
783 /* enable both PLLs */
784 ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL1);
785 if (ret < 0)
786 goto out;
787
788 udelay(1000);
789
790 /* disable coex PLL */
791 ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL2);
792 if (ret < 0)
793 goto out;
794
795 /* reset the swallowing logic */
796 ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
797 PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
798 if (ret < 0)
799 goto out;
800
748out: 801out:
749 return ret; 802 return ret;
750} 803}
@@ -1175,16 +1228,48 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
1175 } 1228 }
1176} 1229}
1177 1230
1231static const char *wl18xx_rdl_name(enum wl18xx_rdl_num rdl_num)
1232{
1233 switch (rdl_num) {
1234 case RDL_1_HP:
1235 return "183xH";
1236 case RDL_2_SP:
1237 return "183x or 180x";
1238 case RDL_3_HP:
1239 return "187xH";
1240 case RDL_4_SP:
1241 return "187x";
1242 case RDL_5_SP:
1243 return "RDL11 - Not Supported";
1244 case RDL_6_SP:
1245 return "180xD";
1246 case RDL_7_SP:
1247 return "RDL13 - Not Supported (1893Q)";
1248 case RDL_8_SP:
1249 return "18xxQ";
1250 case RDL_NONE:
1251 return "UNTRIMMED";
1252 default:
1253 return "UNKNOWN";
1254 }
1255}
1256
1178static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver) 1257static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1179{ 1258{
1180 u32 fuse; 1259 u32 fuse;
1181 s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0; 1260 s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0, package_type = 0;
1182 int ret; 1261 int ret;
1183 1262
1184 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]); 1263 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
1185 if (ret < 0) 1264 if (ret < 0)
1186 goto out; 1265 goto out;
1187 1266
1267 ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
1268 if (ret < 0)
1269 goto out;
1270
1271 package_type = (fuse >> WL18XX_PACKAGE_TYPE_OFFSET) & 1;
1272
1188 ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse); 1273 ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
1189 if (ret < 0) 1274 if (ret < 0)
1190 goto out; 1275 goto out;
@@ -1192,7 +1277,7 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1192 pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET; 1277 pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
1193 rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET; 1278 rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
1194 1279
1195 if (rom <= 0xE) 1280 if ((rom <= 0xE) && (package_type == WL18XX_PACKAGE_TYPE_WSP))
1196 metal = (fuse & WL18XX_METAL_VER_MASK) >> 1281 metal = (fuse & WL18XX_METAL_VER_MASK) >>
1197 WL18XX_METAL_VER_OFFSET; 1282 WL18XX_METAL_VER_OFFSET;
1198 else 1283 else
@@ -1204,11 +1289,9 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1204 goto out; 1289 goto out;
1205 1290
1206 rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET; 1291 rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
1207 if (rdl_ver > RDL_MAX)
1208 rdl_ver = RDL_NONE;
1209 1292
1210 wl1271_info("wl18xx HW: RDL %d, %s, PG %x.%x (ROM %x)", 1293 wl1271_info("wl18xx HW: %s, PG %d.%d (ROM 0x%x)",
1211 rdl_ver, rdl_names[rdl_ver], pg_ver, metal, rom); 1294 wl18xx_rdl_name(rdl_ver), pg_ver, metal, rom);
1212 1295
1213 if (ver) 1296 if (ver)
1214 *ver = pg_ver; 1297 *ver = pg_ver;
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 05dd8bad2746..a433a75f3cd7 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -114,6 +114,11 @@
114#define PLATFORM_DETECTION 0xA0E3E0 114#define PLATFORM_DETECTION 0xA0E3E0
115#define OCS_EN 0xA02080 115#define OCS_EN 0xA02080
116#define PRIMARY_CLK_DETECT 0xA020A6 116#define PRIMARY_CLK_DETECT 0xA020A6
117#define PLLSH_COEX_PLL_N 0xA02384
118#define PLLSH_COEX_PLL_M 0xA02382
119#define PLLSH_COEX_PLL_SWALLOW_EN 0xA0238E
120#define PLLSH_WL_PLL_SEL 0xA02398
121
117#define PLLSH_WCS_PLL_N 0xA02362 122#define PLLSH_WCS_PLL_N 0xA02362
118#define PLLSH_WCS_PLL_M 0xA02360 123#define PLLSH_WCS_PLL_M 0xA02360
119#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364 124#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
@@ -128,19 +133,30 @@
128#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF 133#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
129#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F 134#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
130 135
136#define PLLSH_WL_PLL_EN_VAL1 0x7
137#define PLLSH_WL_PLL_EN_VAL2 0x2
138#define PLLSH_COEX_PLL_SWALLOW_EN_VAL1 0x2
139#define PLLSH_COEX_PLL_SWALLOW_EN_VAL2 0x11
140
131#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1 141#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
132#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12 142#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
133 143
144#define PLLSH_WL_PLL_SEL_WCS_PLL 0x0
145#define PLLSH_WL_PLL_SEL_COEX_PLL 0x1
146
134#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C 147#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
135#define WL18XX_PG_VER_MASK 0x70 148#define WL18XX_PG_VER_MASK 0x70
136#define WL18XX_PG_VER_OFFSET 4 149#define WL18XX_PG_VER_OFFSET 4
137#define WL18XX_ROM_VER_MASK 0x3 150#define WL18XX_ROM_VER_MASK 0x3e00
138#define WL18XX_ROM_VER_OFFSET 0 151#define WL18XX_ROM_VER_OFFSET 9
139#define WL18XX_METAL_VER_MASK 0xC 152#define WL18XX_METAL_VER_MASK 0xC
140#define WL18XX_METAL_VER_OFFSET 2 153#define WL18XX_METAL_VER_OFFSET 2
141#define WL18XX_NEW_METAL_VER_MASK 0x180 154#define WL18XX_NEW_METAL_VER_MASK 0x180
142#define WL18XX_NEW_METAL_VER_OFFSET 7 155#define WL18XX_NEW_METAL_VER_OFFSET 7
143 156
157#define WL18XX_PACKAGE_TYPE_OFFSET 13
158#define WL18XX_PACKAGE_TYPE_WSP 0
159
144#define WL18XX_REG_FUSE_DATA_2_3 0xA02614 160#define WL18XX_REG_FUSE_DATA_2_3 0xA02614
145#define WL18XX_RDL_VER_MASK 0x1f00 161#define WL18XX_RDL_VER_MASK 0x1f00
146#define WL18XX_RDL_VER_OFFSET 8 162#define WL18XX_RDL_VER_OFFSET 8
@@ -201,24 +217,21 @@ enum {
201 NUM_BOARD_TYPES, 217 NUM_BOARD_TYPES,
202}; 218};
203 219
204enum { 220enum wl18xx_rdl_num {
205 RDL_NONE = 0, 221 RDL_NONE = 0,
206 RDL_1_HP = 1, 222 RDL_1_HP = 1,
207 RDL_2_SP = 2, 223 RDL_2_SP = 2,
208 RDL_3_HP = 3, 224 RDL_3_HP = 3,
209 RDL_4_SP = 4, 225 RDL_4_SP = 4,
226 RDL_5_SP = 0x11,
227 RDL_6_SP = 0x12,
228 RDL_7_SP = 0x13,
229 RDL_8_SP = 0x14,
210 230
211 _RDL_LAST, 231 _RDL_LAST,
212 RDL_MAX = _RDL_LAST - 1, 232 RDL_MAX = _RDL_LAST - 1,
213}; 233};
214 234
215static const char * const rdl_names[] = {
216 [RDL_NONE] = "",
217 [RDL_1_HP] = "1853 SISO",
218 [RDL_2_SP] = "1857 MIMO",
219 [RDL_3_HP] = "1893 SISO",
220 [RDL_4_SP] = "1897 MIMO",
221};
222 235
223/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */ 236/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
224#define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40 237#define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index c9e060795d13..9e5416f8764d 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1126,6 +1126,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1126 u16 template_id_2_4 = wl->scan_templ_id_2_4; 1126 u16 template_id_2_4 = wl->scan_templ_id_2_4;
1127 u16 template_id_5 = wl->scan_templ_id_5; 1127 u16 template_id_5 = wl->scan_templ_id_5;
1128 1128
1129 wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
1130
1129 skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len, 1131 skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
1130 ie_len); 1132 ie_len);
1131 if (!skb) { 1133 if (!skb) {
@@ -1135,8 +1137,6 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1135 if (ie_len) 1137 if (ie_len)
1136 memcpy(skb_put(skb, ie_len), ie, ie_len); 1138 memcpy(skb_put(skb, ie_len), ie, ie_len);
1137 1139
1138 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
1139
1140 if (sched_scan && 1140 if (sched_scan &&
1141 (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) { 1141 (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
1142 template_id_2_4 = wl->sched_scan_templ_id_2_4; 1142 template_id_2_4 = wl->sched_scan_templ_id_2_4;
@@ -1172,7 +1172,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
1172 if (!skb) 1172 if (!skb)
1173 goto out; 1173 goto out;
1174 1174
1175 wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len); 1175 wl1271_debug(DEBUG_SCAN, "set ap probe request template");
1176 1176
1177 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]); 1177 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
1178 if (wlvif->band == IEEE80211_BAND_2GHZ) 1178 if (wlvif->band == IEEE80211_BAND_2GHZ)
@@ -1607,33 +1607,43 @@ out:
1607 1607
1608static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch) 1608static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
1609{ 1609{
1610 int idx = -1; 1610 /*
1611 1611 * map the given band/channel to the respective predefined
1612 * bit expected by the fw
1613 */
1612 switch (band) { 1614 switch (band) {
1613 case IEEE80211_BAND_5GHZ:
1614 if (ch >= 8 && ch <= 16)
1615 idx = ((ch-8)/4 + 18);
1616 else if (ch >= 34 && ch <= 64)
1617 idx = ((ch-34)/2 + 3 + 18);
1618 else if (ch >= 100 && ch <= 140)
1619 idx = ((ch-100)/4 + 15 + 18);
1620 else if (ch >= 149 && ch <= 165)
1621 idx = ((ch-149)/4 + 26 + 18);
1622 else
1623 idx = -1;
1624 break;
1625 case IEEE80211_BAND_2GHZ: 1615 case IEEE80211_BAND_2GHZ:
1616 /* channels 1..14 are mapped to 0..13 */
1626 if (ch >= 1 && ch <= 14) 1617 if (ch >= 1 && ch <= 14)
1627 idx = ch - 1; 1618 return ch - 1;
1628 else 1619 break;
1629 idx = -1; 1620 case IEEE80211_BAND_5GHZ:
1621 switch (ch) {
1622 case 8 ... 16:
1623 /* channels 8,12,16 are mapped to 18,19,20 */
1624 return 18 + (ch-8)/4;
1625 case 34 ... 48:
1626 /* channels 34,36..48 are mapped to 21..28 */
1627 return 21 + (ch-34)/2;
1628 case 52 ... 64:
1629 /* channels 52,56..64 are mapped to 29..32 */
1630 return 29 + (ch-52)/4;
1631 case 100 ... 140:
1632 /* channels 100,104..140 are mapped to 33..43 */
1633 return 33 + (ch-100)/4;
1634 case 149 ... 165:
1635 /* channels 149,153..165 are mapped to 44..48 */
1636 return 44 + (ch-149)/4;
1637 default:
1638 break;
1639 }
1630 break; 1640 break;
1631 default: 1641 default:
1632 wl1271_error("get reg conf ch idx - unknown band: %d", 1642 break;
1633 (int)band);
1634 } 1643 }
1635 1644
1636 return idx; 1645 wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch);
1646 return -1;
1637} 1647}
1638 1648
1639void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, 1649void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
@@ -1646,7 +1656,7 @@ void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
1646 1656
1647 ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel); 1657 ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
1648 1658
1649 if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS) 1659 if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
1650 set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending); 1660 set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
1651} 1661}
1652 1662
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 38995f90040d..bbdd10632373 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1062,7 +1062,8 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1062 static const char* const PLT_MODE[] = { 1062 static const char* const PLT_MODE[] = {
1063 "PLT_OFF", 1063 "PLT_OFF",
1064 "PLT_ON", 1064 "PLT_ON",
1065 "PLT_FEM_DETECT" 1065 "PLT_FEM_DETECT",
1066 "PLT_CHIP_AWAKE"
1066 }; 1067 };
1067 1068
1068 int ret; 1069 int ret;
@@ -1088,9 +1089,11 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1088 if (ret < 0) 1089 if (ret < 0)
1089 goto power_off; 1090 goto power_off;
1090 1091
1091 ret = wl->ops->plt_init(wl); 1092 if (plt_mode != PLT_CHIP_AWAKE) {
1092 if (ret < 0) 1093 ret = wl->ops->plt_init(wl);
1093 goto power_off; 1094 if (ret < 0)
1095 goto power_off;
1096 }
1094 1097
1095 wl->state = WLCORE_STATE_ON; 1098 wl->state = WLCORE_STATE_ON;
1096 wl1271_notice("firmware booted in PLT mode %s (%s)", 1099 wl1271_notice("firmware booted in PLT mode %s (%s)",
@@ -2008,6 +2011,47 @@ out:
2008 mutex_unlock(&wl->mutex); 2011 mutex_unlock(&wl->mutex);
2009} 2012}
2010 2013
2014static void wlcore_pending_auth_complete_work(struct work_struct *work)
2015{
2016 struct delayed_work *dwork;
2017 struct wl1271 *wl;
2018 struct wl12xx_vif *wlvif;
2019 unsigned long time_spare;
2020 int ret;
2021
2022 dwork = container_of(work, struct delayed_work, work);
2023 wlvif = container_of(dwork, struct wl12xx_vif,
2024 pending_auth_complete_work);
2025 wl = wlvif->wl;
2026
2027 mutex_lock(&wl->mutex);
2028
2029 if (unlikely(wl->state != WLCORE_STATE_ON))
2030 goto out;
2031
2032 /*
2033 * Make sure a second really passed since the last auth reply. Maybe
2034 * a second auth reply arrived while we were stuck on the mutex.
2035 * Check for a little less than the timeout to protect from scheduler
2036 * irregularities.
2037 */
2038 time_spare = jiffies +
2039 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2040 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2041 goto out;
2042
2043 ret = wl1271_ps_elp_wakeup(wl);
2044 if (ret < 0)
2045 goto out;
2046
2047 /* cancel the ROC if active */
2048 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2049
2050 wl1271_ps_elp_sleep(wl);
2051out:
2052 mutex_unlock(&wl->mutex);
2053}
2054
2011static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx) 2055static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2012{ 2056{
2013 u8 policy = find_first_zero_bit(wl->rate_policies_map, 2057 u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2159,6 +2203,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2159 wlcore_channel_switch_work); 2203 wlcore_channel_switch_work);
2160 INIT_DELAYED_WORK(&wlvif->connection_loss_work, 2204 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2161 wlcore_connection_loss_work); 2205 wlcore_connection_loss_work);
2206 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2207 wlcore_pending_auth_complete_work);
2162 INIT_LIST_HEAD(&wlvif->list); 2208 INIT_LIST_HEAD(&wlvif->list);
2163 2209
2164 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 2210 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2376,6 +2422,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2376 int ret = 0; 2422 int ret = 0;
2377 u8 role_type; 2423 u8 role_type;
2378 2424
2425 if (wl->plt) {
2426 wl1271_error("Adding Interface not allowed while in PLT mode");
2427 return -EBUSY;
2428 }
2429
2379 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 2430 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2380 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 2431 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2381 2432
@@ -2590,6 +2641,7 @@ unlock:
2590 cancel_work_sync(&wlvif->rx_streaming_disable_work); 2641 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2591 cancel_delayed_work_sync(&wlvif->connection_loss_work); 2642 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2592 cancel_delayed_work_sync(&wlvif->channel_switch_work); 2643 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2644 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2593 2645
2594 mutex_lock(&wl->mutex); 2646 mutex_lock(&wl->mutex);
2595} 2647}
@@ -2875,6 +2927,25 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2875 wlvif->rate_set = wlvif->basic_rate_set; 2927 wlvif->rate_set = wlvif->basic_rate_set;
2876} 2928}
2877 2929
2930static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2931 bool idle)
2932{
2933 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2934
2935 if (idle == cur_idle)
2936 return;
2937
2938 if (idle) {
2939 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2940 } else {
2941 /* The current firmware only supports sched_scan in idle */
2942 if (wl->sched_vif == wlvif)
2943 wl->ops->sched_scan_stop(wl, wlvif);
2944
2945 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2946 }
2947}
2948
2878static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, 2949static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2879 struct ieee80211_conf *conf, u32 changed) 2950 struct ieee80211_conf *conf, u32 changed)
2880{ 2951{
@@ -3969,6 +4040,13 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3969 } 4040 }
3970 } else { 4041 } else {
3971 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { 4042 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4043 /*
4044 * AP might be in ROC in case we have just
4045 * sent auth reply. handle it.
4046 */
4047 if (test_bit(wlvif->role_id, wl->roc_map))
4048 wl12xx_croc(wl, wlvif->role_id);
4049
3972 ret = wl12xx_cmd_role_stop_ap(wl, wlvif); 4050 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3973 if (ret < 0) 4051 if (ret < 0)
3974 goto out; 4052 goto out;
@@ -4120,6 +4198,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4120 do_join = true; 4198 do_join = true;
4121 } 4199 }
4122 4200
4201 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4202 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4203
4123 if (changed & BSS_CHANGED_CQM) { 4204 if (changed & BSS_CHANGED_CQM) {
4124 bool enable = false; 4205 bool enable = false;
4125 if (bss_conf->cqm_rssi_thold) 4206 if (bss_conf->cqm_rssi_thold)
@@ -4656,29 +4737,49 @@ static void wlcore_roc_if_possible(struct wl1271 *wl,
4656 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel); 4737 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4657} 4738}
4658 4739
4659static void wlcore_update_inconn_sta(struct wl1271 *wl, 4740/*
4660 struct wl12xx_vif *wlvif, 4741 * when wl_sta is NULL, we treat this call as if coming from a
4661 struct wl1271_station *wl_sta, 4742 * pending auth reply.
4662 bool in_connection) 4743 * wl->mutex must be taken and the FW must be awake when the call
4744 * takes place.
4745 */
4746void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4747 struct wl1271_station *wl_sta, bool in_conn)
4663{ 4748{
4664 if (in_connection) { 4749 if (in_conn) {
4665 if (WARN_ON(wl_sta->in_connection)) 4750 if (WARN_ON(wl_sta && wl_sta->in_connection))
4666 return; 4751 return;
4667 wl_sta->in_connection = true; 4752
4668 if (!wlvif->inconn_count++) 4753 if (!wlvif->ap_pending_auth_reply &&
4754 !wlvif->inconn_count)
4669 wlcore_roc_if_possible(wl, wlvif); 4755 wlcore_roc_if_possible(wl, wlvif);
4756
4757 if (wl_sta) {
4758 wl_sta->in_connection = true;
4759 wlvif->inconn_count++;
4760 } else {
4761 wlvif->ap_pending_auth_reply = true;
4762 }
4670 } else { 4763 } else {
4671 if (!wl_sta->in_connection) 4764 if (wl_sta && !wl_sta->in_connection)
4765 return;
4766
4767 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4672 return; 4768 return;
4673 4769
4674 wl_sta->in_connection = false; 4770 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4675 wlvif->inconn_count--;
4676 if (WARN_ON(wlvif->inconn_count < 0))
4677 return; 4771 return;
4678 4772
4679 if (!wlvif->inconn_count) 4773 if (wl_sta) {
4680 if (test_bit(wlvif->role_id, wl->roc_map)) 4774 wl_sta->in_connection = false;
4681 wl12xx_croc(wl, wlvif->role_id); 4775 wlvif->inconn_count--;
4776 } else {
4777 wlvif->ap_pending_auth_reply = false;
4778 }
4779
4780 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4781 test_bit(wlvif->role_id, wl->roc_map))
4782 wl12xx_croc(wl, wlvif->role_id);
4682 } 4783 }
4683} 4784}
4684 4785
@@ -5313,10 +5414,7 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
5313 5414
5314/* 5 GHz band channels for WL1273 */ 5415/* 5 GHz band channels for WL1273 */
5315static struct ieee80211_channel wl1271_channels_5ghz[] = { 5416static struct ieee80211_channel wl1271_channels_5ghz[] = {
5316 { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5317 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR }, 5417 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5318 { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5319 { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5320 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR }, 5418 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5321 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR }, 5419 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5322 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR }, 5420 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
@@ -5896,14 +5994,20 @@ static const struct wiphy_wowlan_support wlcore_wowlan_support = {
5896}; 5994};
5897#endif 5995#endif
5898 5996
5997static irqreturn_t wlcore_hardirq(int irq, void *cookie)
5998{
5999 return IRQ_WAKE_THREAD;
6000}
6001
5899static void wlcore_nvs_cb(const struct firmware *fw, void *context) 6002static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5900{ 6003{
5901 struct wl1271 *wl = context; 6004 struct wl1271 *wl = context;
5902 struct platform_device *pdev = wl->pdev; 6005 struct platform_device *pdev = wl->pdev;
5903 struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data; 6006 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
5904 struct wl12xx_platform_data *pdata = pdev_data->pdata; 6007 struct wl12xx_platform_data *pdata = pdev_data->pdata;
5905 unsigned long irqflags; 6008 unsigned long irqflags;
5906 int ret; 6009 int ret;
6010 irq_handler_t hardirq_fn = NULL;
5907 6011
5908 if (fw) { 6012 if (fw) {
5909 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); 6013 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
@@ -5932,12 +6036,14 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5932 wl->platform_quirks = pdata->platform_quirks; 6036 wl->platform_quirks = pdata->platform_quirks;
5933 wl->if_ops = pdev_data->if_ops; 6037 wl->if_ops = pdev_data->if_ops;
5934 6038
5935 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 6039 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
5936 irqflags = IRQF_TRIGGER_RISING; 6040 irqflags = IRQF_TRIGGER_RISING;
5937 else 6041 hardirq_fn = wlcore_hardirq;
6042 } else {
5938 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; 6043 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6044 }
5939 6045
5940 ret = request_threaded_irq(wl->irq, NULL, wlcore_irq, 6046 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
5941 irqflags, pdev->name, wl); 6047 irqflags, pdev->name, wl);
5942 if (ret < 0) { 6048 if (ret < 0) {
5943 wl1271_error("request_irq() failed: %d", ret); 6049 wl1271_error("request_irq() failed: %d", ret);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 98066d40c2ad..26bfc365ba70 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -83,6 +83,10 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
83 struct wl12xx_vif *wlvif; 83 struct wl12xx_vif *wlvif;
84 u32 timeout; 84 u32 timeout;
85 85
86 /* We do not enter elp sleep in PLT mode */
87 if (wl->plt)
88 return;
89
86 if (wl->sleep_auth != WL1271_PSM_ELP) 90 if (wl->sleep_auth != WL1271_PSM_ELP)
87 return; 91 return;
88 92
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index f407101e525b..13e743df2e31 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -174,17 +174,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
174 /* if radar is set, we ignore the passive flag */ 174 /* if radar is set, we ignore the passive flag */
175 (radar || 175 (radar ||
176 !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) { 176 !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
177 wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", 177
178 req_channels[i]->band,
179 req_channels[i]->center_freq);
180 wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
181 req_channels[i]->hw_value,
182 req_channels[i]->flags);
183 wl1271_debug(DEBUG_SCAN, "max_power %d",
184 req_channels[i]->max_power);
185 wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
186 min_dwell_time_active,
187 max_dwell_time_active);
188 178
189 if (flags & IEEE80211_CHAN_RADAR) { 179 if (flags & IEEE80211_CHAN_RADAR) {
190 channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS; 180 channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -222,6 +212,17 @@ wlcore_scan_get_channels(struct wl1271 *wl,
222 *n_pactive_ch); 212 *n_pactive_ch);
223 } 213 }
224 214
215 wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
216 req_channels[i]->center_freq,
217 req_channels[i]->hw_value,
218 req_channels[i]->flags,
219 req_channels[i]->max_power,
220 min_dwell_time_active,
221 max_dwell_time_active,
222 flags & IEEE80211_CHAN_RADAR ?
223 ", DFS" : "",
224 flags & IEEE80211_CHAN_PASSIVE_SCAN ?
225 ", PASSIVE" : "");
225 j++; 226 j++;
226 } 227 }
227 } 228 }
@@ -364,7 +365,7 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
364 struct cfg80211_ssid *ssids = req->ssids; 365 struct cfg80211_ssid *ssids = req->ssids;
365 int ret = 0, type, i, j, n_match_ssids = 0; 366 int ret = 0, type, i, j, n_match_ssids = 0;
366 367
367 wl1271_debug(DEBUG_CMD, "cmd sched scan ssid list"); 368 wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list");
368 369
369 /* count the match sets that contain SSIDs */ 370 /* count the match sets that contain SSIDs */
370 for (i = 0; i < req->n_match_sets; i++) 371 for (i = 0; i < req->n_match_sets; i++)
@@ -442,8 +443,6 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
442 } 443 }
443 } 444 }
444 445
445 wl1271_dump(DEBUG_SCAN, "SSID_LIST: ", cmd, sizeof(*cmd));
446
447 ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd, 446 ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
448 sizeof(*cmd), 0); 447 sizeof(*cmd), 0);
449 if (ret < 0) { 448 if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 1b0cd98e35f1..b2c018dccf18 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -335,7 +335,7 @@ static int wl1271_probe(struct spi_device *spi)
335 if (!pdev_data) 335 if (!pdev_data)
336 goto out; 336 goto out;
337 337
338 pdev_data->pdata = spi->dev.platform_data; 338 pdev_data->pdata = dev_get_platdata(&spi->dev);
339 if (!pdev_data->pdata) { 339 if (!pdev_data->pdata) {
340 dev_err(&spi->dev, "no platform data\n"); 340 dev_err(&spi->dev, "no platform data\n");
341 ret = -ENODEV; 341 ret = -ENODEV;
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 527590f2adfb..a3b7d950d8e9 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -297,7 +297,8 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
297 ret = wl1271_plt_stop(wl); 297 ret = wl1271_plt_stop(wl);
298 break; 298 break;
299 case PLT_ON: 299 case PLT_ON:
300 ret = wl1271_plt_start(wl, PLT_ON); 300 case PLT_CHIP_AWAKE:
301 ret = wl1271_plt_start(wl, val);
301 break; 302 break;
302 case PLT_FEM_DETECT: 303 case PLT_FEM_DETECT:
303 ret = wl1271_tm_detect_fem(wl, tb); 304 ret = wl1271_tm_detect_fem(wl, tb);
@@ -361,6 +362,7 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
361{ 362{
362 struct wl1271 *wl = hw->priv; 363 struct wl1271 *wl = hw->priv;
363 struct nlattr *tb[WL1271_TM_ATTR_MAX + 1]; 364 struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
365 u32 nla_cmd;
364 int err; 366 int err;
365 367
366 err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy); 368 err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
@@ -370,7 +372,14 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
370 if (!tb[WL1271_TM_ATTR_CMD_ID]) 372 if (!tb[WL1271_TM_ATTR_CMD_ID])
371 return -EINVAL; 373 return -EINVAL;
372 374
373 switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) { 375 nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
376
377 /* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */
378 if (wl->plt_mode == PLT_CHIP_AWAKE &&
379 nla_cmd != WL1271_TM_CMD_SET_PLT_MODE)
380 return -EOPNOTSUPP;
381
382 switch (nla_cmd) {
374 case WL1271_TM_CMD_TEST: 383 case WL1271_TM_CMD_TEST:
375 return wl1271_tm_cmd_test(wl, tb); 384 return wl1271_tm_cmd_test(wl, tb);
376 case WL1271_TM_CMD_INTERROGATE: 385 case WL1271_TM_CMD_INTERROGATE:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 7e93fe63a2c7..87cd707affa2 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -86,19 +86,34 @@ void wl1271_free_tx_id(struct wl1271 *wl, int id)
86EXPORT_SYMBOL(wl1271_free_tx_id); 86EXPORT_SYMBOL(wl1271_free_tx_id);
87 87
88static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 88static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
89 struct wl12xx_vif *wlvif,
89 struct sk_buff *skb) 90 struct sk_buff *skb)
90{ 91{
91 struct ieee80211_hdr *hdr; 92 struct ieee80211_hdr *hdr;
92 93
94 hdr = (struct ieee80211_hdr *)(skb->data +
95 sizeof(struct wl1271_tx_hw_descr));
96 if (!ieee80211_is_auth(hdr->frame_control))
97 return;
98
93 /* 99 /*
94 * add the station to the known list before transmitting the 100 * add the station to the known list before transmitting the
95 * authentication response. this way it won't get de-authed by FW 101 * authentication response. this way it won't get de-authed by FW
96 * when transmitting too soon. 102 * when transmitting too soon.
97 */ 103 */
98 hdr = (struct ieee80211_hdr *)(skb->data + 104 wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
99 sizeof(struct wl1271_tx_hw_descr)); 105
100 if (ieee80211_is_auth(hdr->frame_control)) 106 /*
101 wl1271_acx_set_inconnection_sta(wl, hdr->addr1); 107 * ROC for 1 second on the AP channel for completing the connection.
108 * Note the ROC will be continued by the update_sta_state callbacks
109 * once the station reaches the associated state.
110 */
111 wlcore_update_inconn_sta(wl, wlvif, NULL, true);
112 wlvif->pending_auth_reply_time = jiffies;
113 cancel_delayed_work(&wlvif->pending_auth_complete_work);
114 ieee80211_queue_delayed_work(wl->hw,
115 &wlvif->pending_auth_complete_work,
116 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
102} 117}
103 118
104static void wl1271_tx_regulate_link(struct wl1271 *wl, 119static void wl1271_tx_regulate_link(struct wl1271 *wl,
@@ -386,7 +401,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
386 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || 401 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
387 (cipher == WLAN_CIPHER_SUITE_WEP104); 402 (cipher == WLAN_CIPHER_SUITE_WEP104);
388 403
389 if (WARN_ON(is_wep && wlvif->default_key != idx)) { 404 if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
390 ret = wl1271_set_default_wep_key(wl, wlvif, idx); 405 ret = wl1271_set_default_wep_key(wl, wlvif, idx);
391 if (ret < 0) 406 if (ret < 0)
392 return ret; 407 return ret;
@@ -404,7 +419,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
404 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); 419 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
405 420
406 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { 421 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
407 wl1271_tx_ap_update_inconnection_sta(wl, skb); 422 wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
408 wl1271_tx_regulate_link(wl, wlvif, hlid); 423 wl1271_tx_regulate_link(wl, wlvif, hlid);
409 } 424 }
410 425
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 55aa4acf9105..35489c300da1 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -56,6 +56,9 @@
56/* Used for management frames and dummy packets */ 56/* Used for management frames and dummy packets */
57#define WL1271_TID_MGMT 7 57#define WL1271_TID_MGMT 7
58 58
59/* stop a ROC for pending authentication reply after this time (ms) */
60#define WLCORE_PEND_AUTH_ROC_TIMEOUT 1000
61
59struct wl127x_tx_mem { 62struct wl127x_tx_mem {
60 /* 63 /*
61 * Number of extra memory blocks to allocate for this packet 64 * Number of extra memory blocks to allocate for this packet
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0034979e97cb..54ce5d5e84db 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -481,6 +481,8 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
481 struct ieee80211_sta *sta, 481 struct ieee80211_sta *sta,
482 struct ieee80211_key_conf *key_conf); 482 struct ieee80211_key_conf *key_conf);
483void wlcore_regdomain_config(struct wl1271 *wl); 483void wlcore_regdomain_config(struct wl1271 *wl);
484void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
485 struct wl1271_station *wl_sta, bool in_conn);
484 486
485static inline void 487static inline void
486wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band, 488wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e5e146435fe7..2a50e089b0e7 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -255,6 +255,7 @@ enum wl12xx_vif_flags {
255 WLVIF_FLAG_CS_PROGRESS, 255 WLVIF_FLAG_CS_PROGRESS,
256 WLVIF_FLAG_AP_PROBE_RESP_SET, 256 WLVIF_FLAG_AP_PROBE_RESP_SET,
257 WLVIF_FLAG_IN_USE, 257 WLVIF_FLAG_IN_USE,
258 WLVIF_FLAG_ACTIVE,
258}; 259};
259 260
260struct wl12xx_vif; 261struct wl12xx_vif;
@@ -307,6 +308,7 @@ enum plt_mode {
307 PLT_OFF = 0, 308 PLT_OFF = 0,
308 PLT_ON = 1, 309 PLT_ON = 1,
309 PLT_FEM_DETECT = 2, 310 PLT_FEM_DETECT = 2,
311 PLT_CHIP_AWAKE = 3
310}; 312};
311 313
312struct wl12xx_rx_filter_field { 314struct wl12xx_rx_filter_field {
@@ -456,6 +458,15 @@ struct wl12xx_vif {
456 */ 458 */
457 int hw_queue_base; 459 int hw_queue_base;
458 460
461 /* do we have a pending auth reply? (and ROC) */
462 bool ap_pending_auth_reply;
463
464 /* time when we sent the pending auth reply */
465 unsigned long pending_auth_reply_time;
466
467 /* work for canceling ROC after pending auth reply */
468 struct delayed_work pending_auth_complete_work;
469
459 /* 470 /*
460 * This struct must be last! 471 * This struct must be last!
461 * data that has to be saved acrossed reconfigs (e.g. recovery) 472 * data that has to be saved acrossed reconfigs (e.g. recovery)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5715318d6bab..55b8dec86233 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -87,9 +87,13 @@ struct pending_tx_info {
87struct xenvif_rx_meta { 87struct xenvif_rx_meta {
88 int id; 88 int id;
89 int size; 89 int size;
90 int gso_type;
90 int gso_size; 91 int gso_size;
91}; 92};
92 93
94#define GSO_BIT(type) \
95 (1 << XEN_NETIF_GSO_TYPE_ ## type)
96
93/* Discriminate from any valid pending_idx value. */ 97/* Discriminate from any valid pending_idx value. */
94#define INVALID_PENDING_IDX 0xFFFF 98#define INVALID_PENDING_IDX 0xFFFF
95 99
@@ -150,10 +154,12 @@ struct xenvif {
150 u8 fe_dev_addr[6]; 154 u8 fe_dev_addr[6];
151 155
152 /* Frontend feature information. */ 156 /* Frontend feature information. */
157 int gso_mask;
158 int gso_prefix_mask;
159
153 u8 can_sg:1; 160 u8 can_sg:1;
154 u8 gso:1; 161 u8 ip_csum:1;
155 u8 gso_prefix:1; 162 u8 ipv6_csum:1;
156 u8 csum:1;
157 163
158 /* Internal feature information. */ 164 /* Internal feature information. */
159 u8 can_queue:1; /* can queue packets for receiver? */ 165 u8 can_queue:1; /* can queue packets for receiver? */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 01bb854c7f62..e4aa26748f80 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -214,10 +214,14 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
214 214
215 if (!vif->can_sg) 215 if (!vif->can_sg)
216 features &= ~NETIF_F_SG; 216 features &= ~NETIF_F_SG;
217 if (!vif->gso && !vif->gso_prefix) 217 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
218 features &= ~NETIF_F_TSO; 218 features &= ~NETIF_F_TSO;
219 if (!vif->csum) 219 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
220 features &= ~NETIF_F_TSO6;
221 if (!vif->ip_csum)
220 features &= ~NETIF_F_IP_CSUM; 222 features &= ~NETIF_F_IP_CSUM;
223 if (!vif->ipv6_csum)
224 features &= ~NETIF_F_IPV6_CSUM;
221 225
222 return features; 226 return features;
223} 227}
@@ -306,7 +310,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
306 vif->domid = domid; 310 vif->domid = domid;
307 vif->handle = handle; 311 vif->handle = handle;
308 vif->can_sg = 1; 312 vif->can_sg = 1;
309 vif->csum = 1; 313 vif->ip_csum = 1;
310 vif->dev = dev; 314 vif->dev = dev;
311 315
312 vif->credit_bytes = vif->remaining_credit = ~0UL; 316 vif->credit_bytes = vif->remaining_credit = ~0UL;
@@ -316,8 +320,10 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
316 vif->credit_timeout.expires = jiffies; 320 vif->credit_timeout.expires = jiffies;
317 321
318 dev->netdev_ops = &xenvif_netdev_ops; 322 dev->netdev_ops = &xenvif_netdev_ops;
319 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 323 dev->hw_features = NETIF_F_SG |
320 dev->features = dev->hw_features; 324 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
325 NETIF_F_TSO | NETIF_F_TSO6;
326 dev->features = dev->hw_features | NETIF_F_RXCSUM;
321 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); 327 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
322 328
323 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 329 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f3e591c611de..828fdab4f1a4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -109,15 +109,12 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); 109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
110} 110}
111 111
112/* 112/* This is a miniumum size for the linear area to avoid lots of
113 * This is the amount of packet we copy rather than map, so that the 113 * calls to __pskb_pull_tail() as we set up checksum offsets. The
114 * guest can't fiddle with the contents of the headers while we do 114 * value 128 was chosen as it covers all IPv4 and most likely
115 * packet processing on them (netfilter, routing, etc). 115 * IPv6 headers.
116 */ 116 */
117#define PKT_PROT_LEN (ETH_HLEN + \ 117#define PKT_PROT_LEN 128
118 VLAN_HLEN + \
119 sizeof(struct iphdr) + MAX_IPOPTLEN + \
120 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
121 118
122static u16 frag_get_pending_idx(skb_frag_t *frag) 119static u16 frag_get_pending_idx(skb_frag_t *frag)
123{ 120{
@@ -145,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
145 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); 142 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
146 143
147 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ 144 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
148 if (vif->can_sg || vif->gso || vif->gso_prefix) 145 if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
149 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ 146 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
150 147
151 return max; 148 return max;
@@ -317,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
317 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 314 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
318 315
319 meta = npo->meta + npo->meta_prod++; 316 meta = npo->meta + npo->meta_prod++;
317 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
320 meta->gso_size = 0; 318 meta->gso_size = 0;
321 meta->size = 0; 319 meta->size = 0;
322 meta->id = req->id; 320 meta->id = req->id;
@@ -339,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
339 struct gnttab_copy *copy_gop; 337 struct gnttab_copy *copy_gop;
340 struct xenvif_rx_meta *meta; 338 struct xenvif_rx_meta *meta;
341 unsigned long bytes; 339 unsigned long bytes;
340 int gso_type;
342 341
343 /* Data must not cross a page boundary. */ 342 /* Data must not cross a page boundary. */
344 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); 343 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -397,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
397 } 396 }
398 397
399 /* Leave a gap for the GSO descriptor. */ 398 /* Leave a gap for the GSO descriptor. */
400 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) 399 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
400 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
401 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
402 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
403 else
404 gso_type = XEN_NETIF_GSO_TYPE_NONE;
405
406 if (*head && ((1 << gso_type) & vif->gso_mask))
401 vif->rx.req_cons++; 407 vif->rx.req_cons++;
402 408
403 *head = 0; /* There must be something in this buffer now. */ 409 *head = 0; /* There must be something in this buffer now. */
@@ -428,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
428 unsigned char *data; 434 unsigned char *data;
429 int head = 1; 435 int head = 1;
430 int old_meta_prod; 436 int old_meta_prod;
437 int gso_type;
438 int gso_size;
431 439
432 old_meta_prod = npo->meta_prod; 440 old_meta_prod = npo->meta_prod;
433 441
442 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
443 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
444 gso_size = skb_shinfo(skb)->gso_size;
445 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
446 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
447 gso_size = skb_shinfo(skb)->gso_size;
448 } else {
449 gso_type = XEN_NETIF_GSO_TYPE_NONE;
450 gso_size = 0;
451 }
452
434 /* Set up a GSO prefix descriptor, if necessary */ 453 /* Set up a GSO prefix descriptor, if necessary */
435 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { 454 if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
436 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 455 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
437 meta = npo->meta + npo->meta_prod++; 456 meta = npo->meta + npo->meta_prod++;
438 meta->gso_size = skb_shinfo(skb)->gso_size; 457 meta->gso_type = gso_type;
458 meta->gso_size = gso_size;
439 meta->size = 0; 459 meta->size = 0;
440 meta->id = req->id; 460 meta->id = req->id;
441 } 461 }
@@ -443,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
443 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 463 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
444 meta = npo->meta + npo->meta_prod++; 464 meta = npo->meta + npo->meta_prod++;
445 465
446 if (!vif->gso_prefix) 466 if ((1 << gso_type) & vif->gso_mask) {
447 meta->gso_size = skb_shinfo(skb)->gso_size; 467 meta->gso_type = gso_type;
448 else 468 meta->gso_size = gso_size;
469 } else {
470 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
449 meta->gso_size = 0; 471 meta->gso_size = 0;
472 }
450 473
451 meta->size = 0; 474 meta->size = 0;
452 meta->id = req->id; 475 meta->id = req->id;
@@ -592,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
592 615
593 vif = netdev_priv(skb->dev); 616 vif = netdev_priv(skb->dev);
594 617
595 if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) { 618 if ((1 << vif->meta[npo.meta_cons].gso_type) &
619 vif->gso_prefix_mask) {
596 resp = RING_GET_RESPONSE(&vif->rx, 620 resp = RING_GET_RESPONSE(&vif->rx,
597 vif->rx.rsp_prod_pvt++); 621 vif->rx.rsp_prod_pvt++);
598 622
@@ -629,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
629 vif->meta[npo.meta_cons].size, 653 vif->meta[npo.meta_cons].size,
630 flags); 654 flags);
631 655
632 if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { 656 if ((1 << vif->meta[npo.meta_cons].gso_type) &
657 vif->gso_mask) {
633 struct xen_netif_extra_info *gso = 658 struct xen_netif_extra_info *gso =
634 (struct xen_netif_extra_info *) 659 (struct xen_netif_extra_info *)
635 RING_GET_RESPONSE(&vif->rx, 660 RING_GET_RESPONSE(&vif->rx,
@@ -637,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
637 662
638 resp->flags |= XEN_NETRXF_extra_info; 663 resp->flags |= XEN_NETRXF_extra_info;
639 664
665 gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
640 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; 666 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
641 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
642 gso->u.gso.pad = 0; 667 gso->u.gso.pad = 0;
643 gso->u.gso.features = 0; 668 gso->u.gso.features = 0;
644 669
@@ -1101,15 +1126,20 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1101 return -EINVAL; 1126 return -EINVAL;
1102 } 1127 }
1103 1128
1104 /* Currently only TCPv4 S.O. is supported. */ 1129 switch (gso->u.gso.type) {
1105 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 1130 case XEN_NETIF_GSO_TYPE_TCPV4:
1131 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1132 break;
1133 case XEN_NETIF_GSO_TYPE_TCPV6:
1134 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1135 break;
1136 default:
1106 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1137 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1107 xenvif_fatal_tx_err(vif); 1138 xenvif_fatal_tx_err(vif);
1108 return -EINVAL; 1139 return -EINVAL;
1109 } 1140 }
1110 1141
1111 skb_shinfo(skb)->gso_size = gso->u.gso.size; 1142 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1112 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1113 1143
1114 /* Header must be checked, and gso_segs computed. */ 1144 /* Header must be checked, and gso_segs computed. */
1115 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 1145 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -1118,61 +1148,74 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1118 return 0; 1148 return 0;
1119} 1149}
1120 1150
1121static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) 1151static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
1152{
1153 if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
1154 /* If we need to pullup then pullup to the max, so we
1155 * won't need to do it again.
1156 */
1157 int target = min_t(int, skb->len, MAX_TCP_HEADER);
1158 __pskb_pull_tail(skb, target - skb_headlen(skb));
1159 }
1160}
1161
1162static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1163 int recalculate_partial_csum)
1122{ 1164{
1123 struct iphdr *iph; 1165 struct iphdr *iph = (void *)skb->data;
1166 unsigned int header_size;
1167 unsigned int off;
1124 int err = -EPROTO; 1168 int err = -EPROTO;
1125 int recalculate_partial_csum = 0;
1126 1169
1127 /* 1170 off = sizeof(struct iphdr);
1128 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1129 * peers can fail to set NETRXF_csum_blank when sending a GSO
1130 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1131 * recalculate the partial checksum.
1132 */
1133 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1134 vif->rx_gso_checksum_fixup++;
1135 skb->ip_summed = CHECKSUM_PARTIAL;
1136 recalculate_partial_csum = 1;
1137 }
1138 1171
1139 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 1172 header_size = skb->network_header + off + MAX_IPOPTLEN;
1140 if (skb->ip_summed != CHECKSUM_PARTIAL) 1173 maybe_pull_tail(skb, header_size);
1141 return 0;
1142 1174
1143 if (skb->protocol != htons(ETH_P_IP)) 1175 off = iph->ihl * 4;
1144 goto out;
1145 1176
1146 iph = (void *)skb->data;
1147 switch (iph->protocol) { 1177 switch (iph->protocol) {
1148 case IPPROTO_TCP: 1178 case IPPROTO_TCP:
1149 if (!skb_partial_csum_set(skb, 4 * iph->ihl, 1179 if (!skb_partial_csum_set(skb, off,
1150 offsetof(struct tcphdr, check))) 1180 offsetof(struct tcphdr, check)))
1151 goto out; 1181 goto out;
1152 1182
1153 if (recalculate_partial_csum) { 1183 if (recalculate_partial_csum) {
1154 struct tcphdr *tcph = tcp_hdr(skb); 1184 struct tcphdr *tcph = tcp_hdr(skb);
1185
1186 header_size = skb->network_header +
1187 off +
1188 sizeof(struct tcphdr);
1189 maybe_pull_tail(skb, header_size);
1190
1155 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1191 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1156 skb->len - iph->ihl*4, 1192 skb->len - off,
1157 IPPROTO_TCP, 0); 1193 IPPROTO_TCP, 0);
1158 } 1194 }
1159 break; 1195 break;
1160 case IPPROTO_UDP: 1196 case IPPROTO_UDP:
1161 if (!skb_partial_csum_set(skb, 4 * iph->ihl, 1197 if (!skb_partial_csum_set(skb, off,
1162 offsetof(struct udphdr, check))) 1198 offsetof(struct udphdr, check)))
1163 goto out; 1199 goto out;
1164 1200
1165 if (recalculate_partial_csum) { 1201 if (recalculate_partial_csum) {
1166 struct udphdr *udph = udp_hdr(skb); 1202 struct udphdr *udph = udp_hdr(skb);
1203
1204 header_size = skb->network_header +
1205 off +
1206 sizeof(struct udphdr);
1207 maybe_pull_tail(skb, header_size);
1208
1167 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1209 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1168 skb->len - iph->ihl*4, 1210 skb->len - off,
1169 IPPROTO_UDP, 0); 1211 IPPROTO_UDP, 0);
1170 } 1212 }
1171 break; 1213 break;
1172 default: 1214 default:
1173 if (net_ratelimit()) 1215 if (net_ratelimit())
1174 netdev_err(vif->dev, 1216 netdev_err(vif->dev,
1175 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", 1217 "Attempting to checksum a non-TCP/UDP packet, "
1218 "dropping a protocol %d packet\n",
1176 iph->protocol); 1219 iph->protocol);
1177 goto out; 1220 goto out;
1178 } 1221 }
@@ -1183,6 +1226,158 @@ out:
1183 return err; 1226 return err;
1184} 1227}
1185 1228
1229static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1230 int recalculate_partial_csum)
1231{
1232 int err = -EPROTO;
1233 struct ipv6hdr *ipv6h = (void *)skb->data;
1234 u8 nexthdr;
1235 unsigned int header_size;
1236 unsigned int off;
1237 bool fragment;
1238 bool done;
1239
1240 done = false;
1241
1242 off = sizeof(struct ipv6hdr);
1243
1244 header_size = skb->network_header + off;
1245 maybe_pull_tail(skb, header_size);
1246
1247 nexthdr = ipv6h->nexthdr;
1248
1249 while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
1250 !done) {
1251 switch (nexthdr) {
1252 case IPPROTO_DSTOPTS:
1253 case IPPROTO_HOPOPTS:
1254 case IPPROTO_ROUTING: {
1255 struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
1256
1257 header_size = skb->network_header +
1258 off +
1259 sizeof(struct ipv6_opt_hdr);
1260 maybe_pull_tail(skb, header_size);
1261
1262 nexthdr = hp->nexthdr;
1263 off += ipv6_optlen(hp);
1264 break;
1265 }
1266 case IPPROTO_AH: {
1267 struct ip_auth_hdr *hp = (void *)(skb->data + off);
1268
1269 header_size = skb->network_header +
1270 off +
1271 sizeof(struct ip_auth_hdr);
1272 maybe_pull_tail(skb, header_size);
1273
1274 nexthdr = hp->nexthdr;
1275 off += (hp->hdrlen+2)<<2;
1276 break;
1277 }
1278 case IPPROTO_FRAGMENT:
1279 fragment = true;
1280 /* fall through */
1281 default:
1282 done = true;
1283 break;
1284 }
1285 }
1286
1287 if (!done) {
1288 if (net_ratelimit())
1289 netdev_err(vif->dev, "Failed to parse packet header\n");
1290 goto out;
1291 }
1292
1293 if (fragment) {
1294 if (net_ratelimit())
1295 netdev_err(vif->dev, "Packet is a fragment!\n");
1296 goto out;
1297 }
1298
1299 switch (nexthdr) {
1300 case IPPROTO_TCP:
1301 if (!skb_partial_csum_set(skb, off,
1302 offsetof(struct tcphdr, check)))
1303 goto out;
1304
1305 if (recalculate_partial_csum) {
1306 struct tcphdr *tcph = tcp_hdr(skb);
1307
1308 header_size = skb->network_header +
1309 off +
1310 sizeof(struct tcphdr);
1311 maybe_pull_tail(skb, header_size);
1312
1313 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1314 &ipv6h->daddr,
1315 skb->len - off,
1316 IPPROTO_TCP, 0);
1317 }
1318 break;
1319 case IPPROTO_UDP:
1320 if (!skb_partial_csum_set(skb, off,
1321 offsetof(struct udphdr, check)))
1322 goto out;
1323
1324 if (recalculate_partial_csum) {
1325 struct udphdr *udph = udp_hdr(skb);
1326
1327 header_size = skb->network_header +
1328 off +
1329 sizeof(struct udphdr);
1330 maybe_pull_tail(skb, header_size);
1331
1332 udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1333 &ipv6h->daddr,
1334 skb->len - off,
1335 IPPROTO_UDP, 0);
1336 }
1337 break;
1338 default:
1339 if (net_ratelimit())
1340 netdev_err(vif->dev,
1341 "Attempting to checksum a non-TCP/UDP packet, "
1342 "dropping a protocol %d packet\n",
1343 nexthdr);
1344 goto out;
1345 }
1346
1347 err = 0;
1348
1349out:
1350 return err;
1351}
1352
1353static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1354{
1355 int err = -EPROTO;
1356 int recalculate_partial_csum = 0;
1357
1358 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1359 * peers can fail to set NETRXF_csum_blank when sending a GSO
1360 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1361 * recalculate the partial checksum.
1362 */
1363 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1364 vif->rx_gso_checksum_fixup++;
1365 skb->ip_summed = CHECKSUM_PARTIAL;
1366 recalculate_partial_csum = 1;
1367 }
1368
1369 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1370 if (skb->ip_summed != CHECKSUM_PARTIAL)
1371 return 0;
1372
1373 if (skb->protocol == htons(ETH_P_IP))
1374 err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
1375 else if (skb->protocol == htons(ETH_P_IPV6))
1376 err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
1377
1378 return err;
1379}
1380
1186static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) 1381static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1187{ 1382{
1188 unsigned long now = jiffies; 1383 unsigned long now = jiffies;
@@ -1428,12 +1623,7 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)
1428 1623
1429 xenvif_fill_frags(vif, skb); 1624 xenvif_fill_frags(vif, skb);
1430 1625
1431 /* 1626 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1432 * If the initial fragment was < PKT_PROT_LEN then
1433 * pull through some bytes from the other fragments to
1434 * increase the linear region to PKT_PROT_LEN bytes.
1435 */
1436 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1437 int target = min_t(int, skb->len, PKT_PROT_LEN); 1627 int target = min_t(int, skb->len, PKT_PROT_LEN);
1438 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1628 __pskb_pull_tail(skb, target - skb_headlen(skb));
1439 } 1629 }
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1b08d8798372..f0358992b04f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -105,6 +105,22 @@ static int netback_probe(struct xenbus_device *dev,
105 goto abort_transaction; 105 goto abort_transaction;
106 } 106 }
107 107
108 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
109 "%d", sg);
110 if (err) {
111 message = "writing feature-gso-tcpv6";
112 goto abort_transaction;
113 }
114
115 /* We support partial checksum setup for IPv6 packets */
116 err = xenbus_printf(xbt, dev->nodename,
117 "feature-ipv6-csum-offload",
118 "%d", 1);
119 if (err) {
120 message = "writing feature-ipv6-csum-offload";
121 goto abort_transaction;
122 }
123
108 /* We support rx-copy path. */ 124 /* We support rx-copy path. */
109 err = xenbus_printf(xbt, dev->nodename, 125 err = xenbus_printf(xbt, dev->nodename,
110 "feature-rx-copy", "%d", 1); 126 "feature-rx-copy", "%d", 1);
@@ -561,20 +577,50 @@ static int connect_rings(struct backend_info *be)
561 val = 0; 577 val = 0;
562 vif->can_sg = !!val; 578 vif->can_sg = !!val;
563 579
580 vif->gso_mask = 0;
581 vif->gso_prefix_mask = 0;
582
564 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", 583 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
565 "%d", &val) < 0) 584 "%d", &val) < 0)
566 val = 0; 585 val = 0;
567 vif->gso = !!val; 586 if (val)
587 vif->gso_mask |= GSO_BIT(TCPV4);
568 588
569 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", 589 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
570 "%d", &val) < 0) 590 "%d", &val) < 0)
571 val = 0; 591 val = 0;
572 vif->gso_prefix = !!val; 592 if (val)
593 vif->gso_prefix_mask |= GSO_BIT(TCPV4);
594
595 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
596 "%d", &val) < 0)
597 val = 0;
598 if (val)
599 vif->gso_mask |= GSO_BIT(TCPV6);
600
601 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
602 "%d", &val) < 0)
603 val = 0;
604 if (val)
605 vif->gso_prefix_mask |= GSO_BIT(TCPV6);
606
607 if (vif->gso_mask & vif->gso_prefix_mask) {
608 xenbus_dev_fatal(dev, err,
609 "%s: gso and gso prefix flags are not "
610 "mutually exclusive",
611 dev->otherend);
612 return -EOPNOTSUPP;
613 }
573 614
574 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", 615 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
575 "%d", &val) < 0) 616 "%d", &val) < 0)
576 val = 0; 617 val = 0;
577 vif->csum = !val; 618 vif->ip_csum = !val;
619
620 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
621 "%d", &val) < 0)
622 val = 0;
623 vif->ipv6_csum = !!val;
578 624
579 /* Map the shared frame, irq etc. */ 625 /* Map the shared frame, irq etc. */
580 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, 626 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 36808bf25677..dd1011e55cb5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -952,7 +952,7 @@ static int handle_incoming_queue(struct net_device *dev,
952 u64_stats_update_end(&stats->syncp); 952 u64_stats_update_end(&stats->syncp);
953 953
954 /* Pass it up. */ 954 /* Pass it up. */
955 netif_receive_skb(skb); 955 napi_gro_receive(&np->napi, skb);
956 } 956 }
957 957
958 return packets_dropped; 958 return packets_dropped;
@@ -1051,6 +1051,8 @@ err:
1051 if (work_done < budget) { 1051 if (work_done < budget) {
1052 int more_to_do = 0; 1052 int more_to_do = 0;
1053 1053
1054 napi_gro_flush(napi, false);
1055
1054 local_irq_save(flags); 1056 local_irq_save(flags);
1055 1057
1056 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 1058 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);